Publications
Search
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chadalapaka, Viswanath; Ustun, Volkan; Liu, Lixing
Leveraging Graph Networks to Model Environments in Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
@article{chadalapaka_leveraging_2023,
title = {Leveraging Graph Networks to Model Environments in Reinforcement Learning},
author = {Viswanath Chadalapaka and Volkan Ustun and Lixing Liu},
url = {https://journals.flvc.org/FLAIRS/article/view/133118},
doi = {10.32473/flairs.36.133118},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper proposes leveraging graph neural networks (GNNs) to model an agent’s environment to construct superior policy networks in reinforcement learning (RL). To this end, we explore the effects of different combinations of GNNs and graph network pooling functions on policy performance. We also run experiments at different levels of problem complexity, which affect how easily we expect an agent to learn an optimal policy and therefore show whether or not graph networks are effective at various problem complexity levels. The efficacy of our approach is shown via experimentation in a partially-observable, non-stationary environment that parallels the highly-practical scenario of a military training exercise with human trainees, where the learning goal is to become the best sparring partner possible for human trainees. Our results present that our models can generate better-performing sparring partners by employing GNNs, as demonstrated by these experiments in the proof-of-concept environment. We also explore our model’s applicability in Multi-Agent RL scenarios. Our code is available online at https://github.com/Derposoft/GNNsAsEnvs.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Zhu, Runhe; Becerik‐Gerber, Burcin; Lucas, Gale M.; Southers, Erroll G.
Be prepared: How training and emergency type affect evacuation behaviour Journal Article
In: Computer Assisted Learning, pp. jcal.12812, 2023, ISSN: 0266-4909, 1365-2729.
@article{liu_be_2023,
title = {Be prepared: How training and emergency type affect evacuation behaviour},
author = {Ruying Liu and Runhe Zhu and Burcin Becerik‐Gerber and Gale M. Lucas and Erroll G. Southers},
url = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12812},
doi = {10.1111/jcal.12812},
issn = {0266-4909, 1365-2729},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-22},
journal = {Computer Assisted Learning},
pages = {jcal.12812},
abstract = {Abstract
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.
Hsu, Wan-Yu; Anguera, Joaquin A.; Rizzo, Albert; Campusano, Richard; Chiaravalloti, Nancy D.; DeLuca, John; Gazzaley, Adam; Bove, Riley M.
A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study Journal Article
In: Frontiers in Human Neuroscience, 2023, (Place: Lausanne, Switzerland Publisher: Frontiers Research Foundation Section: ORIGINAL RESEARCH article).
@article{hsu_virtual_2023,
title = {A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study},
author = {Wan-Yu Hsu and Joaquin A. Anguera and Albert Rizzo and Richard Campusano and Nancy D. Chiaravalloti and John DeLuca and Adam Gazzaley and Riley M. Bove},
url = {https://www.proquest.com/docview/2787027204/abstract/BEA88F7BB72B4623PQ/1},
doi = {10.3389/fnhum.2023.1139316},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Frontiers in Human Neuroscience},
abstract = {Introduction: Cognitive impairment is a debilitating symptom in people with multiple sclerosis (MS). Most of the neuropsychological tasks have little resemblance to everyday life. There is a need for ecologically valid tools for assessing cognition in real-life functional contexts in MS. One potential solution would involve the use of virtual reality (VR) to exert finer control over the task presentation environment; however, VR studies in the MS population are scarce. Objectives: To explore the utility and feasibility of a VR program for cognitive assessment in MS. Methods: A VR classroom embedded with a continuous performance task (CPT) was assessed in 10 non-MS adults and 10 people with MS with low cognitive functioning. Participants performed the CPT with distractors (ie. WD) and without distractors (ie. ND). The Symbol Digit Modalities Test (SDMT), California Verbal Learning Test – II (CVLT-II), and a feedback survey on the VR program were administered. Results: People with MS exhibited greater reaction time variability (RTV) compared to non-MS participants, and greater RTV in both WD and ND conditions was associated with lower SDMT. Conclusions: VR tools warrant further research to determine their value as an ecologically valid platform for assessing cognition and everyday functioning in people with MS.},
note = {Place: Lausanne, Switzerland
Publisher: Frontiers Research Foundation
Section: ORIGINAL RESEARCH article},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Jing; Xiao, Hanyuan; Teng, Wenbin; Cai, Yunxuan; Zhao, Yajie
Light Sampling Field and BRDF Representation for Physically-based Neural Rendering Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{yang_light_2023,
title = {Light Sampling Field and BRDF Representation for Physically-based Neural Rendering},
author = {Jing Yang and Hanyuan Xiao and Wenbin Teng and Yunxuan Cai and Yajie Zhao},
url = {https://arxiv.org/abs/2304.05472},
doi = {10.48550/ARXIV.2304.05472},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-22},
abstract = {Physically-based rendering (PBR) is key for immersive rendering effects used widely in the industry to showcase detailed realistic scenes from computer graphics assets. A well-known caveat is that producing the same is computationally heavy and relies on complex capture devices. Inspired by the success in quality and efficiency of recent volumetric neural rendering, we want to develop a physically-based neural shader to eliminate device dependency and significantly boost performance. However, no existing lighting and material models in the current neural rendering approaches can accurately represent the comprehensive lighting models and BRDFs properties required by the PBR process. Thus, this paper proposes a novel lighting representation that models direct and indirect light locally through a light sampling strategy in a learned light sampling field. We also propose BRDF models to separately represent surface/subsurface scattering details to enable complex objects such as translucent material (i.e., skin, jade). We then implement our proposed representations with an end-to-end physically-based neural face skin shader, which takes a standard face asset (i.e., geometry, albedo map, and normal map) and an HDRI for illumination as inputs and generates a photo-realistic rendering as output. Extensive experiments showcase the quality and efficiency of our PBR face skin shader, indicating the effectiveness of our proposed lighting and material representations.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2023
Liu, Ruying; Awada, Mohamad; Gerber, Burcin Becerik; Lucas, Gale M.; Roll, Shawn C.
Gender moderates the effects of ambient bergamot scent on stress restoration in offices Journal Article
In: Journal of Environmental Psychology, vol. 91, pp. 102135, 2023, ISSN: 02724944.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{liu_gender_2023,
title = {Gender moderates the effects of ambient bergamot scent on stress restoration in offices},
author = {Ruying Liu and Mohamad Awada and Burcin Becerik Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0272494423001834},
doi = {10.1016/j.jenvp.2023.102135},
issn = {02724944},
year = {2023},
date = {2023-11-01},
urldate = {2023-09-20},
journal = {Journal of Environmental Psychology},
volume = {91},
pages = {102135},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Seyedrezaei, Mirmahdi; Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
In: Building and Environment, vol. 244, pp. 110743, 2023, ISSN: 03601323.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{seyedrezaei_interaction_2023,
title = {Interaction effects of indoor environmental quality factors on cognitive performance and perceived comfort of young adults in open plan offices in North American Mediterranean climate},
author = {Mirmahdi Seyedrezaei and Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0360132323007709},
doi = {10.1016/j.buildenv.2023.110743},
issn = {03601323},
year = {2023},
date = {2023-10-01},
urldate = {2023-09-20},
journal = {Building and Environment},
volume = {244},
pages = {110743},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Ruying; Becerik-Gerber, Burcin; Lucas, Gale M.
Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents Journal Article
In: Safety Science, vol. 164, pp. 106175, 2023, ISSN: 09257535.
Links | BibTeX | Tags: Simulation, UARC, virtual reality
@article{liu_effectiveness_2023,
title = {Effectiveness of VR-based training on improving occupants’ response and preparedness for active shooter incidents},
author = {Ruying Liu and Burcin Becerik-Gerber and Gale M. Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0925753523001170},
doi = {10.1016/j.ssci.2023.106175},
issn = {09257535},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-22},
journal = {Safety Science},
volume = {164},
pages = {106175},
keywords = {Simulation, UARC, virtual reality},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chadalapaka, Viswanath; Ustun, Volkan; Liu, Lixing
Leveraging Graph Networks to Model Environments in Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC
@article{chadalapaka_leveraging_2023,
title = {Leveraging Graph Networks to Model Environments in Reinforcement Learning},
author = {Viswanath Chadalapaka and Volkan Ustun and Lixing Liu},
url = {https://journals.flvc.org/FLAIRS/article/view/133118},
doi = {10.32473/flairs.36.133118},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper proposes leveraging graph neural networks (GNNs) to model an agent’s environment to construct superior policy networks in reinforcement learning (RL). To this end, we explore the effects of different combinations of GNNs and graph network pooling functions on policy performance. We also run experiments at different levels of problem complexity, which affect how easily we expect an agent to learn an optimal policy and therefore show whether or not graph networks are effective at various problem complexity levels. The efficacy of our approach is shown via experimentation in a partially-observable, non-stationary environment that parallels the highly-practical scenario of a military training exercise with human trainees, where the learning goal is to become the best sparring partner possible for human trainees. Our results present that our models can generate better-performing sparring partners by employing GNNs, as demonstrated by these experiments in the proof-of-concept environment. We also explore our model’s applicability in Multi-Agent RL scenarios. Our code is available online at https://github.com/Derposoft/GNNsAsEnvs.},
keywords = {CogArch, Cognitive Architecture, UARC},
pubstate = {published},
tppubtype = {article}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning Journal Article
In: FLAIRS, vol. 36, 2023, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@article{aris_learning_2023,
title = {Learning to Take Cover with Navigation-Based Waypoints via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/133348},
doi = {10.32473/flairs.36.133348},
issn = {2334-0762},
year = {2023},
date = {2023-05-01},
urldate = {2023-08-04},
journal = {FLAIRS},
volume = {36},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. This work expands on previous work on raycast-based agents by increasing the number of enemies from one to three. We demonstrate an automated way of generating training and testing data within geo-specific terrains. We show that replacing the action space with a more abstracted, navmesh-based waypoint movement system can increase the generality and success rate of the models while providing similar results to our previous paper's results regarding retraining across terrains. We also comprehensively evaluate the differences between these and the previous models. Finally, we show that incorporating pixels into the model's input can increase performance at the cost of longer training times.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Liu, Ruying; Zhu, Runhe; Becerik‐Gerber, Burcin; Lucas, Gale M.; Southers, Erroll G.
Be prepared: How training and emergency type affect evacuation behaviour Journal Article
In: Computer Assisted Learning, pp. jcal.12812, 2023, ISSN: 0266-4909, 1365-2729.
Abstract | Links | BibTeX | Tags: Simulation, UARC
@article{liu_be_2023,
title = {Be prepared: How training and emergency type affect evacuation behaviour},
author = {Ruying Liu and Runhe Zhu and Burcin Becerik‐Gerber and Gale M. Lucas and Erroll G. Southers},
url = {https://onlinelibrary.wiley.com/doi/10.1111/jcal.12812},
doi = {10.1111/jcal.12812},
issn = {0266-4909, 1365-2729},
year = {2023},
date = {2023-04-01},
urldate = {2023-08-22},
journal = {Computer Assisted Learning},
pages = {jcal.12812},
abstract = {Abstract
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.},
keywords = {Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Background
Video‐based training has been widely adopted by private organizations and public authorities to educate occupants on various types of building emergencies. However, the effectiveness of video‐based training for preparing occupants for building emergencies has not been rigorously studied nor has the impact of emergency type been investigated on training effectiveness.
Objectives
This study examines whether video‐based training is an effective method to prepare occupants for building emergencies and how the effectiveness differs in the context of different building emergencies.
Methods
We simulated fire and active shooter emergencies in a virtual office building and conducted evacuation experiments to examine participants' emergency responses using both objective and subjective metrics. A total of 108 participants were recruited and responded to the fire or active shooter incident with or without video‐based training.
Results and Conclusions
The results revealed that participants with video‐based training more often chose to follow other recommendations when responding to building emergencies instead of simply following others. Results from ANOVA showed that training increased participants' self‐efficacy significantly, especially for those in the active shooter group. Moreover, participants in the active shooter simulation had a higher level of response efficacy than those in the fire emergency simulation. Our results also demonstrated the influence of emergency type on participants' final decisions and considerations of the recommendations.
Implications
Our results suggested that video‐based training is effective in improving participants' emergency preparedness and changing their behaviour patterns to a certain extent such as reducing following behaviour and encouraging safe evacuations. Additionally, statistically significant interactions between video‐based training and emergency types suggested that training effectiveness should be considered in accordance with the emergency type.
,
Lay Description
What is already known about this topic
People can behave differently in different types of building emergencies. Understanding human behaviours in building emergencies is essential for developing emergency preparedness strategies.
Emergency training is important for building occupants and video is a widely used media for emergency training. However, its training effectiveness needs to be evaluated.
What this paper adds
We used virtual environments to investigate evacuation behaviour.
The effectiveness of video‐based training and human responses in building emergencies were studied on both subjective responses and objective measurements.
Video‐based training significantly reduced the occurrence of following behaviours.
The different natures of the fire emergency and active shooter incidents shape the effectiveness of video‐based training.
Implications of study findings for practitioners
Video‐based training can improve building occupants' emergency preparedness to a certain extent.
Emergency training media should be designed considering the influence of emergency type.
Hsu, Wan-Yu; Anguera, Joaquin A.; Rizzo, Albert; Campusano, Richard; Chiaravalloti, Nancy D.; DeLuca, John; Gazzaley, Adam; Bove, Riley M.
A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study Journal Article
In: Frontiers in Human Neuroscience, 2023, (Place: Lausanne, Switzerland Publisher: Frontiers Research Foundation Section: ORIGINAL RESEARCH article).
Abstract | Links | BibTeX | Tags: UARC
@article{hsu_virtual_2023,
title = {A virtual reality program to assess cognitive function in multiple sclerosis: A pilot study},
author = {Wan-Yu Hsu and Joaquin A. Anguera and Albert Rizzo and Richard Campusano and Nancy D. Chiaravalloti and John DeLuca and Adam Gazzaley and Riley M. Bove},
url = {https://www.proquest.com/docview/2787027204/abstract/BEA88F7BB72B4623PQ/1},
doi = {10.3389/fnhum.2023.1139316},
year = {2023},
date = {2023-03-01},
urldate = {2023-03-31},
journal = {Frontiers in Human Neuroscience},
abstract = {Introduction: Cognitive impairment is a debilitating symptom in people with multiple sclerosis (MS). Most of the neuropsychological tasks have little resemblance to everyday life. There is a need for ecologically valid tools for assessing cognition in real-life functional contexts in MS. One potential solution would involve the use of virtual reality (VR) to exert finer control over the task presentation environment; however, VR studies in the MS population are scarce. Objectives: To explore the utility and feasibility of a VR program for cognitive assessment in MS. Methods: A VR classroom embedded with a continuous performance task (CPT) was assessed in 10 non-MS adults and 10 people with MS with low cognitive functioning. Participants performed the CPT with distractors (ie. WD) and without distractors (ie. ND). The Symbol Digit Modalities Test (SDMT), California Verbal Learning Test – II (CVLT-II), and a feedback survey on the VR program were administered. Results: People with MS exhibited greater reaction time variability (RTV) compared to non-MS participants, and greater RTV in both WD and ND conditions was associated with lower SDMT. Conclusions: VR tools warrant further research to determine their value as an ecologically valid platform for assessing cognition and everyday functioning in people with MS.},
note = {Place: Lausanne, Switzerland
Publisher: Frontiers Research Foundation
Section: ORIGINAL RESEARCH article},
keywords = {UARC},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: STG, UARC
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Jing; Xiao, Hanyuan; Teng, Wenbin; Cai, Yunxuan; Zhao, Yajie
Light Sampling Field and BRDF Representation for Physically-based Neural Rendering Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: Computer Vision and Pattern Recognition (cs.CV), UARC
@article{yang_light_2023,
title = {Light Sampling Field and BRDF Representation for Physically-based Neural Rendering},
author = {Jing Yang and Hanyuan Xiao and Wenbin Teng and Yunxuan Cai and Yajie Zhao},
url = {https://arxiv.org/abs/2304.05472},
doi = {10.48550/ARXIV.2304.05472},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-22},
abstract = {Physically-based rendering (PBR) is key for immersive rendering effects used widely in the industry to showcase detailed realistic scenes from computer graphics assets. A well-known caveat is that producing the same is computationally heavy and relies on complex capture devices. Inspired by the success in quality and efficiency of recent volumetric neural rendering, we want to develop a physically-based neural shader to eliminate device dependency and significantly boost performance. However, no existing lighting and material models in the current neural rendering approaches can accurately represent the comprehensive lighting models and BRDFs properties required by the PBR process. Thus, this paper proposes a novel lighting representation that models direct and indirect light locally through a light sampling strategy in a learned light sampling field. We also propose BRDF models to separately represent surface/subsurface scattering details to enable complex objects such as translucent material (i.e., skin, jade). We then implement our proposed representations with an end-to-end physically-based neural face skin shader, which takes a standard face asset (i.e., geometry, albedo map, and normal map) and an HDRI for illumination as inputs and generates a photo-realistic rendering as output. Extensive experiments showcase the quality and efficiency of our PBR face skin shader, indicating the effectiveness of our proposed lighting and material representations.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {Computer Vision and Pattern Recognition (cs.CV), UARC},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2022
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.; Landicho, Earl
The impact of security countermeasures on human behavior during active shooter incidents Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 929, 2022, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{zhu_impact_2022,
title = {The impact of security countermeasures on human behavior during active shooter incidents},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers and Earl Landicho},
url = {https://www.nature.com/articles/s41598-022-04922-8},
doi = {10.1038/s41598-022-04922-8},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-26},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {929},
abstract = {Abstract Active shooter incidents represent an increasing threat to American society, especially in commercial and educational buildings. In recent years, a wide variety of security countermeasures have been recommended by public and governmental agencies. Many of these countermeasures are aimed to increase building security, yet their impact on human behavior when an active shooter incident occurs remains underexplored. To fill this research gap, we conducted virtual experiments to evaluate the impact of countermeasures on human behavior during active shooter incidents. A total of 162 office workers and middle/high school teachers were recruited to respond to an active shooter incident in virtual office and school buildings with or without the implementation of multiple countermeasures. The experiment results showed countermeasures significantly influenced participants’ response time and decisions (e.g., run, hide, fight). Participants’ responses and perceptions of the active shooter incident were also contingent on their daily roles, as well as building and social contexts. Teachers had more concerns for occupants’ safety than office workers. Moreover, teachers had more positive perceptions of occupants in the school, whereas office workers had more positive perceptions of occupants in the office.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Harvey, Philip D.; Depp, Colin A.; Rizzo, Albert A.; Strauss, Gregory P.; Spelber, David; Carpenter, Linda L.; Kalin, Ned H.; Krystal, John H.; McDonald, William M.; Nemeroff, Charles B.; Rodriguez, Carolyn I.; Widge, Alik S.; Torous, John
Technology and Mental Health: State of the Art for Assessment and Treatment Journal Article
In: AJP, vol. 179, no. 12, pp. 897–914, 2022, ISSN: 0002-953X, 1535-7228.
Links | BibTeX | Tags: MedVR, UARC
@article{harvey_technology_2022,
title = {Technology and Mental Health: State of the Art for Assessment and Treatment},
author = {Philip D. Harvey and Colin A. Depp and Albert A. Rizzo and Gregory P. Strauss and David Spelber and Linda L. Carpenter and Ned H. Kalin and John H. Krystal and William M. McDonald and Charles B. Nemeroff and Carolyn I. Rodriguez and Alik S. Widge and John Torous},
url = {http://ajp.psychiatryonline.org/doi/10.1176/appi.ajp.21121254},
doi = {10.1176/appi.ajp.21121254},
issn = {0002-953X, 1535-7228},
year = {2022},
date = {2022-12-01},
urldate = {2023-08-22},
journal = {AJP},
volume = {179},
number = {12},
pages = {897–914},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Hu, Qingyong; Yu, Zifan; Thomas, Hugues; Feng, Andrew; Hou, Yu; McCullough, Kyle; Ren, Fengbo; Soibelman, Lucio
STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset Miscellaneous
2022, (arXiv:2203.09065 [cs]).
Abstract | Links | BibTeX | Tags: UARC
@misc{chen_stpls3d_2022,
title = {STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset},
author = {Meida Chen and Qingyong Hu and Zifan Yu and Hugues Thomas and Andrew Feng and Yu Hou and Kyle McCullough and Fengbo Ren and Lucio Soibelman},
url = {http://arxiv.org/abs/2203.09065},
year = {2022},
date = {2022-10-01},
urldate = {2023-08-22},
publisher = {arXiv},
abstract = {Although various 3D datasets with different functions and scales have been proposed recently, it remains challenging for individuals to complete the whole pipeline of large-scale data collection, sanitization, and annotation. Moreover, the created datasets usually suffer from extremely imbalanced class distribution or partial low-quality data samples. Motivated by this, we explore the procedurally synthetic 3D data generation paradigm to equip individuals with the full capability of creating large-scale annotated photogrammetry point clouds. Specifically, we introduce a synthetic aerial photogrammetry point clouds generation pipeline that takes full advantage of open geospatial data sources and off-the-shelf commercial packages. Unlike generating synthetic data in virtual games, where the simulated data usually have limited gaming environments created by artists, the proposed pipeline simulates the reconstruction process of the real environment by following the same UAV flight pattern on different synthetic terrain shapes and building densities, which ensure similar quality, noise pattern, and diversity with real data. In addition, the precise semantic and instance annotations can be generated fully automatically, avoiding the expensive and time-consuming manual annotation. Based on the proposed pipeline, we present a richly-annotated synthetic 3D aerial photogrammetry point cloud dataset, termed STPLS3D, with more than 16 $kmˆ2$ of landscapes and up to 18 fine-grained semantic categories. For verification purposes, we also provide a parallel dataset collected from four areas in the real environment. Extensive experiments conducted on our datasets demonstrate the effectiveness and quality of the proposed synthetic dataset.},
note = {arXiv:2203.09065 [cs]},
keywords = {UARC},
pubstate = {published},
tppubtype = {misc}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 14566, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{awada_cognitive_2022,
title = {Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://www.nature.com/articles/s41598-022-18862-w},
doi = {10.1038/s41598-022-18862-w},
issn = {2045-2322},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {14566},
abstract = {Noise is often considered a distractor; however recent studies suggest that sub-attentive individuals or individuals diagnosed with attention deficit hyperactivity disorder can benefit from white noise to enhance their cognitive performance. Research regarding the effect of white noise on neurotypical adults presents mixed results, thus the implications of white noise on the neurotypical population remain unclear. Thus, this study investigates the effect of 2 white noise conditions, white noise level at 45 dB and white noise level at 65 dB, on the cognitive performance, creativity, and stress levels of neurotypical young adults in a private office space. These conditions are compared to a baseline condition where participants are exposed to the office ambient noise. Our findings showed that the white noise level at 45 dB resulted in better cognitive performance in terms of sustained attention, accuracy, and speed of performance as well as enhanced creativity and lower stress levels. On the other hand, the 65 dB white noise condition led to improved working memory but higher stress levels, which leads to the conclusion that different tasks might require different noise levels for optimal performance. These results lay the foundation for the integration of white noise into office workspaces as a tool to enhance office workers’ performance.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Proceedings Article
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240–3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots Journal Article
In: J. Comput. Civ. Eng., vol. 36, no. 3, pp. 04022006, 2022, ISSN: 0887-3801, 1943-5487.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans, VR
@article{adami_impact_2022,
title = {Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0001016},
doi = {10.1061/(ASCE)CP.1943-5487.0001016},
issn = {0887-3801, 1943-5487},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
journal = {J. Comput. Civ. Eng.},
volume = {36},
number = {3},
pages = {04022006},
keywords = {DTIC, UARC, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Rodrigues, Patrick B.; Xiao, Yijing; Fukumura, Yoko E.; Awada, Mohamad; Aryal, Ashrant; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Ergonomic assessment of office worker postures using 3D automated joint angle assessment Journal Article
In: Advanced Engineering Informatics, vol. 52, pp. 101596, 2022, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Machine Learning, UARC
@article{rodrigues_ergonomic_2022,
title = {Ergonomic assessment of office worker postures using 3D automated joint angle assessment},
author = {Patrick B. Rodrigues and Yijing Xiao and Yoko E. Fukumura and Mohamad Awada and Ashrant Aryal and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034622000672},
doi = {10.1016/j.aei.2022.101596},
issn = {14740346},
year = {2022},
date = {2022-04-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {52},
pages = {101596},
keywords = {DTIC, Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan
The Impact of Personalized Feedback on Negotiation Training Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. Volume 9, pp. 92–104, US Army Combat Capabilities Development Command–Soldier Center, 2022.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@incollection{johnson_impact_2022,
title = {The Impact of Personalized Feedback on Negotiation Training},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://adlnet.gov/assets/uploads/Vol%209_CompetencyBasedScenarioDesignBook_Complete_Final_021722v2.pdf#page=93},
year = {2022},
date = {2022-02-01},
urldate = {2022-02-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {Volume 9},
pages = {92–104},
publisher = {US Army Combat Capabilities Development Command–Soldier Center},
series = {Competency, Based Scenario Design},
abstract = {Intelligent tutoring systems (ITSs) have made great strides in teaching cognitive skills, including math (Koedinger et al., 1997; Koedinger & Corbett, 2005; Koedinger & Corbett, 2006), reading (Mills-Tettey, et al., 2009; Wijekumar et al., 2005;) and computer literacy (Guo, 2015; Olney et al., 2017;). Recent research has begun to extend these techniques to interpersonal skills such as public speaking (Chollet et al., 2014), medical interviews (Pataki, 2012; Stevens, 2006), collaborative problem solving (Graesser et al., 2018) and negotiation (Gratch et al., 2016; Kim et al., 2009). An extensive body of research has documented the benefits of ITSs for cognitive skill development, but relative to this, research on ITSs for interpersonal skills is still in its infancy. This chapter highlights our efforts in adapting ITS techniques to teaching negotiation.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Zhou, Jincheng; Ustun, Volkan
PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture Book Section
In: Goertzel, Ben; Iklé, Matthew; Potapov, Alexey (Ed.): Artificial General Intelligence, vol. 13154, pp. 355–366, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93757-7 978-3-030-93758-4.
Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, UARC
@incollection{zhou_pysigma_2022,
title = {PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture},
author = {Jincheng Zhou and Volkan Ustun},
editor = {Ben Goertzel and Matthew Iklé and Alexey Potapov},
url = {https://link.springer.com/10.1007/978-3-030-93758-4_36},
doi = {10.1007/978-3-030-93758-4_36},
isbn = {978-3-030-93757-7 978-3-030-93758-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-21},
booktitle = {Artificial General Intelligence},
volume = {13154},
pages = {355–366},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {CogArch, Cognitive Architecture, DTIC, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Leitner, Maxyn; Greenwald, Eric; Montgomery, Ryan; Wang, Ning
Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education Proceedings Article
In: Proceedings of the 30th International Conference on Computers in Education, 2022.
Abstract | Links | BibTeX | Tags: AI, UARC
@inproceedings{leitner_design_2022,
title = {Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ryan Montgomery and Ning Wang},
url = {https://par.nsf.gov/servlets/purl/10440195},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 30th International Conference on Computers in Education},
abstract = {Artificial Intelligence (AI) is increasingly vital to our everyday lives. Future generations will not only consume AI, but work with AI-driven tools and contribute to the development of AI. As such, students will need exposure to AI knowledge at a younger age. Despite this need, relatively little is currently known about how to most effectively provide AI education to K-12 (kindergarten through 12th grade) students. In this paper, we discuss the design of an educational game for high-school AI education called ARIN-561. The game centered around two agents – a player character and a companion robot, as the story and learning experience unfold through conversations between the two agents and explorations that bond the two agents A series of studies were carried out at high schools in the United States to evaluate the efficacy of the game. Results indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Shi, Weiyan; Zhang, Jingwen; Lucas, Gale; Yu, Zhou; Gratch, Jonathan
Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks Journal Article
In: 2022, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_social_2022,
title = {Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks},
author = {Kushal Chawla and Weiyan Shi and Jingwen Zhang and Gale Lucas and Zhou Yu and Jonathan Gratch},
url = {https://arxiv.org/abs/2210.05664},
doi = {10.48550/ARXIV.2210.05664},
year = {2022},
date = {2022-01-01},
urldate = {2023-08-22},
abstract = {Dialogue systems capable of social influence such as persuasion, negotiation, and therapy, are essential for extending the use of technology to numerous realistic scenarios. However, existing research primarily focuses on either task-oriented or open-domain scenarios, a categorization that has been inadequate for capturing influence skills systematically. There exists no formal definition or category for dialogue systems with these skills and data-driven efforts in this direction are highly limited. In this work, we formally define and introduce the category of social influence dialogue systems that influence users' cognitive and emotional responses, leading to changes in thoughts, opinions, and behaviors through natural conversations. We present a survey of various tasks, datasets, and methods, compiling the progress across seven diverse domains. We discuss the commonalities and differences between the examined systems, identify limitations, and recommend future directions. This study serves as a comprehensive reference for social influence dialogue systems to inspire more dedicated research and discussion in this emerging area.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Herrick, Imogen; Sinatra, Gale; Kennedy, Alana; Nye, Benjamin; Swartout, William; Lindsey, Emily
Using Augmented Reality (AR) to Bring the Past to Life in Informal Science Learning Journal Article
In: NSF-PAR, 2022.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{herrick_using_2022,
title = {Using Augmented Reality (AR) to Bring the Past to Life in Informal Science Learning},
author = {Imogen Herrick and Gale Sinatra and Alana Kennedy and Benjamin Nye and William Swartout and Emily Lindsey},
url = {https://par.nsf.gov/biblio/10344989},
year = {2022},
date = {2022-01-01},
journal = {NSF-PAR},
abstract = {A key mission for museums is to engage a large and diverse public audience in science learning (Macdonald, 1997). To that end, science museums attempt to use immersive technologies in entertaining, socially oriented, and innovative ways. An example is the use of augmented reality (AR) to overlay virtual objects onto the real-world (Azuma, Baillot, Behringer, Feiner, Julier, & MacIntyre, 2001).We used a Design Based Research (DBR) approach to develop and test four features of an AR experience to promote place-based science learning in an museum setting. While quantitative differences were not found among conditions in knowledge gained, significant learning gains were seen from pre to post, illustrating the potential for place-based informal science learning. Incorporating AR technology into museum exhibits can update them with 21st tools to support visitor engagement in the learning experience. This research contributes to understanding of usability and logistical issues for different AR designs for a public, outdoor informal settings.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
2021
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic Journal Article
In: ASME Journal of Engineering for Sustainable Buildings and Cities, vol. 2, no. 4, pp. 041001, 2021, ISSN: 2642-6641, 2642-6625.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_associations_2021,
title = {Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://asmedigitalcollection.asme.org/sustainablebuildings/article/2/4/041001/1122847/Associations-Among-Home-Indoor-Environmental},
doi = {10.1115/1.4052822},
issn = {2642-6641, 2642-6625},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-26},
journal = {ASME Journal of Engineering for Sustainable Buildings and Cities},
volume = {2},
number = {4},
pages = {041001},
abstract = {Abstract The outbreak of SARS-CoV-2 virus forced office workers to conduct their daily work activities from home over an extended period. Given this unique situation, an opportunity emerged to study the satisfaction of office workers with indoor environmental quality (IEQ) factors of their houses where work activities took place and associate these factors with mental and physical health. We designed and administered a questionnaire that was open for 45 days during the COVID-19 pandemic and received valid data from 988 respondents. The results show that low satisfaction with natural lighting, glare, and humidity predicted eye-related symptoms, while low satisfaction with noise was a strong predictor of fatigue or tiredness, headaches or migraines, anxiety, and depression or sadness. Nose- and throat-related symptoms and skin-related symptoms were only uniquely predicted by low satisfaction with humidity. Low satisfaction with glare uniquely predicted an increase in musculoskeletal discomfort. Symptoms related to mental stress, rumination, or worry were predicted by low satisfaction with air quality and noise. Finally, low satisfaction with noise and indoor temperature predicted the prevalence of symptoms related to trouble concentrating, maintaining attention, or focus. Workers with higher income were more satisfied with humidity, air quality, and indoor temperature and had better overall mental health. Older individuals had increased satisfaction with natural lighting, humidity, air quality, noise, and indoor temperature. Findings from this study can inform future design practices that focus on hybrid home-work environments by highlighting the impact of IEQ factors on occupant well-being.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804–3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation Journal Article
In: Advanced Engineering Informatics, vol. 50, pp. 101431, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, VR
@article{adami_effectiveness_2021,
title = {Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S147403462100183X},
doi = {10.1016/j.aei.2021.101431},
issn = {14740346},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101431},
keywords = {DTIC, Learning Sciences, UARC, VR},
pubstate = {published},
tppubtype = {article}
}