Publications
Search
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Incollection
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41--50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Yeh, Shih-Ching; Lin, Sheng-Yang; Wu, Eric Hsiao-Kuang; Zhang, Kai-Feng; Xiu, Xu; Rizzo, Albert; Chung, Chia-Ru
A Virtual-Reality System Integrated With Neuro-Behavior Sensing for Attention-Deficit/Hyperactivity Disorder Intelligent Assessment Journal Article
In: IEEE Transactions on Neural Systems and Rehabilitation Engineering, vol. 28, no. 9, pp. 1899–1907, 2020, ISSN: 1534-4320, 1558-0210.
@article{yeh_virtual-reality_2020,
title = {A Virtual-Reality System Integrated With Neuro-Behavior Sensing for Attention-Deficit/Hyperactivity Disorder Intelligent Assessment},
author = {Shih-Ching Yeh and Sheng-Yang Lin and Eric Hsiao-Kuang Wu and Kai-Feng Zhang and Xu Xiu and Albert Rizzo and Chia-Ru Chung},
url = {https://ieeexplore.ieee.org/document/9123917/},
doi = {10.1109/TNSRE.2020.3004545},
issn = {1534-4320, 1558-0210},
year = {2020},
date = {2020-09-01},
journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering},
volume = {28},
number = {9},
pages = {1899--1907},
abstract = {Attention-deficit/Hyperactivity disorder (ADHD) is a common neurodevelopmental disorder among children. Traditional assessment methods generally rely on behavioral rating scales (BRS) performed by clinicians, and sometimes parents or teachers. However, BRS assessment is time consuming, and the subjective ratings may lead to bias for the evaluation. Therefore, the major purpose of this study was to develop a Virtual Reality (VR) classroom associated with an intelligent assessment model to assist clinicians for the diagnosis of ADHD. In this study, an immersive VR classroom embedded with sustained and selective attention tasks was developed in which visual, audio, and visual-audio hybrid distractions, were triggered while attention tasks were conducted. A clinical experiment with 37 ADHD and 31 healthy subjects was performed. Data from BRS was compared with VR task performance and analyzed by rank-sum tests and Pearson Correlation. Results showed that 23 features out of total 28 were related to distinguish the ADHD and non-ADHD children. Several features of task performance and neuro-behavioral measurements were also correlated with features of the BRSs. Additionally, the machine learning models incorporating task performance and neuro-behavior were used to classify ADHD and non-ADHD children. The mean accuracy for the repeated cross-validation reached to 83.2%, which demonstrated a great potential for our system to provide more help for clinicians on assessment of ADHD.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Miller, Rob; Morgenstern, Leora; Turán, György
Preface Journal Article
In: Annals of Mathematics and Artificial Intelligence, 2020, ISSN: 1012-2443, 1573-7470.
@article{gordon_preface_2020,
title = {Preface},
author = {Andrew S. Gordon and Rob Miller and Leora Morgenstern and György Turán},
url = {http://link.springer.com/10.1007/s10472-020-09711-5},
doi = {10.1007/s10472-020-09711-5},
issn = {1012-2443, 1573-7470},
year = {2020},
date = {2020-09-01},
journal = {Annals of Mathematics and Artificial Intelligence},
abstract = {A few years after the 1956 Dartmouth Summer Workshop [1, 2], which first established artificial intelligence as a field of research, John McCarthy [3] discussed the importance of explicitly representing and reasoning with commonsense knowledge to the enterprise of creating artificially intelligent robots and agents. McCarthy proposed that commonsense knowledge was best represented using formal logic, which he viewed as a uniquely powerful lingua franca that could be used to express and reason with virtually any sort of information that humans might reason with when problem solving, a stance he further developed and propounded in [4, 5]. This approach, the formalist or logic-based approach to commonsense reasoning, was practiced by an increasing set of adherents over the next several decades [6, 7], and continues to be represented by the Commonsense Symposium Series, first held in 1991 [8] and held biennially, for the most part, after that.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315--332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Luu, Cindy; Talbot, Thomas B.; Fung, Cha Chi; Ben-Isaac, Eyal; Espinoza, Juan; Fischer, Susan; Cho, Christine S.; Sargsyan, Mariam; Korand, Sridevi; Chang, Todd P.
In: Simulation & Gaming, vol. 51, no. 4, pp. 550–570, 2020, ISSN: 1046-8781, 1552-826X.
@article{luu_development_2020,
title = {Development and Performance Assessment of a Digital Serious Game to Assess Multi-Patient Care Skills in a Simulated Pediatric Emergency Department},
author = {Cindy Luu and Thomas B. Talbot and Cha Chi Fung and Eyal Ben-Isaac and Juan Espinoza and Susan Fischer and Christine S. Cho and Mariam Sargsyan and Sridevi Korand and Todd P. Chang},
url = {http://journals.sagepub.com/doi/10.1177/1046878120904984},
doi = {10.1177/1046878120904984},
issn = {1046-8781, 1552-826X},
year = {2020},
date = {2020-08-01},
journal = {Simulation & Gaming},
volume = {51},
number = {4},
pages = {550--570},
abstract = {Objective. Multi-patient care is important among medical trainees in an emergency department (ED). While resident efficiency is a typically measured metric, multi-patient care involves both efficiency and diagnostic / treatment accuracy. Multi-patient care ability is difficult to assess, though simulation is a potential alternative. Our objective was to generate validity evidence for a serious game in assessing multi-patient care skills among a variety of learners.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rakofsky, Jeffrey J.; Talbot, Thomas B.; Dunlop, Boadie W.
A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency Journal Article
In: Academic Psychiatry, 2020, ISSN: 1042-9670, 1545-7230.
@article{rakofsky_virtual_2020,
title = {A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency},
author = {Jeffrey J. Rakofsky and Thomas B. Talbot and Boadie W. Dunlop},
url = {http://link.springer.com/10.1007/s40596-020-01286-x},
doi = {10.1007/s40596-020-01286-x},
issn = {1042-9670, 1545-7230},
year = {2020},
date = {2020-07-01},
journal = {Academic Psychiatry},
abstract = {Objectives A virtual standardized patient-based assessment simulator was developed to address biases and practical limitations in existing methods for evaluating residents’ proficiency in psychopharmacological knowledge and practice. Methods The simulator was designed to replicate an outpatient psychiatric clinic experience. The virtual patient reported symptoms of a treatment-resistant form of major depressive disorder (MDD), requiring the learner to use various antidepressants in order for the patient to fully remit. Test scores were based on the proportion of correct responses to questions asked by the virtual patient about possible side effects, dosing, and titration decisions, which depended upon the patient’s tolerability and response to the learner’s selected medications. The validation paradigm included a novice-expert performance comparison across 4th year medical students, psychiatric residents from all four post-graduate year classes, and psychiatry department faculty, and a correlational analysis of simulator performance with the PRITE Somatic Treatments subscale score. Post-test surveys evaluated the test takers’ subjective impressions of the simulator. Results Forty-three subjects completed the online exam and survey. Total mean scores on the exam differed significantly across all the learner groups in a step-wise manner from students to faculty (F = 6.10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Malta, Loretta S.; Giosan, Cezar; Szkodny, Lauren E.; Altemus, Margaret M.; Rizzo, Albert A.; Silbersweig, David A.; Difede, JoAnn
Development of a virtual reality laboratory stressor Journal Article
In: Virtual Reality, 2020, ISSN: 1359-4338, 1434-9957.
@article{malta_development_2020,
title = {Development of a virtual reality laboratory stressor},
author = {Loretta S. Malta and Cezar Giosan and Lauren E. Szkodny and Margaret M. Altemus and Albert A. Rizzo and David A. Silbersweig and JoAnn Difede},
url = {http://link.springer.com/10.1007/s10055-020-00455-5},
doi = {10.1007/s10055-020-00455-5},
issn = {1359-4338, 1434-9957},
year = {2020},
date = {2020-07-01},
journal = {Virtual Reality},
abstract = {This research report describes the development of a virtual reality (VR) laboratory stressor to study the effects of exposure to stressful events. The aim of the research was to develop a VR simulation that would evoke stressor responses at a level that was tolerable for participants. Veterans with and without warzone-related posttraumatic stress disorder (PTSD) were presented with VR simulations of combat stressors. There was one complaint of feeling hot during simulations but no incidents of simulator sickness. Participants denied experiencing the simulations as overly distressing, and there were no reports of any distress or problems related to study participation when they were contacted two weeks after the VR challenge. Simulations elicited moderate levels of anxiety and mild levels of dissociation that were significantly greater in Veterans with PTSD. Simulations were less successful in eliciting differential heart rate reactivity and stress hormone secretion, though history of civilian trauma exposure was associated with elevated heart rates during the second simulation. The study demonstrated that the VR paradigm was feasible and tolerable and that it holds promise as a new method with which to conduct controlled laboratory research on the effects of exposure to stressful events.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Davis, Dan M.; Rizvi, Sanad Z.; Carr, Kayla; Swartout, William; Thacker, Raj; Shaw, Kenneth
Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors Journal Article
In: Journal of Research on Technology in Education, pp. 1–23, 2020, ISSN: 1539-1523, 1945-0818.
@article{nye_feasibility_2020,
title = {Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors},
author = {Benjamin D. Nye and Dan M. Davis and Sanad Z. Rizvi and Kayla Carr and William Swartout and Raj Thacker and Kenneth Shaw},
url = {https://www.tandfonline.com/doi/full/10.1080/15391523.2020.1771640},
doi = {10.1080/15391523.2020.1771640},
issn = {1539-1523, 1945-0818},
year = {2020},
date = {2020-07-01},
journal = {Journal of Research on Technology in Education},
pages = {1--23},
abstract = {One-on-one mentoring is an effective method to help novices with career development. However, traditional mentoring scales poorly. To address this problem, MentorPal emulates conversations with a panel of virtual mentors based on recordings of real STEM professionals. Students freely ask questions as they might in a career fair, while machine learning algorithms attempt to provide the best answers. MentorPal has developed strategies for the rapid development of new virtual mentors, where training data will be sparse. In a usability study, 31 high school students self-reported a) increased career knowledge and confidence, b) positive ease-of-use, and that c) mentors were helpful (87%) but often did not cover their preferred career (29%). Results demonstrate the feasibility of scalable virtual mentoring, but efficacy studies are needed to evaluate the impact of virtual mentors, particularly for groups with limited STEM opportunities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Thiry, Katherine Elizabeth; Jenkins, Michael
In: Advances in Usability, User Experience, Wearable and Assistive Technology, vol. 1217, pp. 129–135, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-51827-1 978-3-030-51828-8.
@incollection{talbot_storyboarding_2020,
title = {Storyboarding the Virtuality: Methods and Best Practices to Depict Scenes and Interactive Stories in Virtual and Mixed Reality},
author = {Thomas Brett Talbot and Katherine Elizabeth Thiry and Michael Jenkins},
url = {http://link.springer.com/10.1007/978-3-030-51828-8_17},
doi = {10.1007/978-3-030-51828-8_17},
isbn = {978-3-030-51827-1 978-3-030-51828-8},
year = {2020},
date = {2020-07-01},
booktitle = {Advances in Usability, User Experience, Wearable and Assistive Technology},
volume = {1217},
pages = {129--135},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Storyboarding is a cinematic prototyping technique to visualize settings, event sequences, dialogues & character depictions. Interactive VR/MR experiences benefit from storyboarding as part of the creation process, yet free movement & immersive 3D introduce challenges. Techniques to visualize 3D settings are explored with methods to conduct traditional storyboarding while requiring multiple viewpoints within a single timestep are elaborated. This is possible w/ perspective scene views. Even with 3D prototyping tools, it is important to maintain practices which optimize VR storyboarding and maintain spatial efficiency, allow storyboards to be hand drawn and be intuitive to read. A powerful solution is to bind several perspectives together to represent a specific time while reverting to a traditional single viewpoint when not necessary, therefore balancing three dimensionality, spatial efficiency & ease of creation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304--307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Li, Ruilong; Xiu, Yuliang; Saito, Shunsuke; Huang, Zeng; Olszewski, Kyle; Li, Hao
Monocular Real-Time Volumetric Performance Capture Journal Article
In: ResearchGate, pp. 30, 2020.
@article{li_monocular_2020,
title = {Monocular Real-Time Volumetric Performance Capture},
author = {Ruilong Li and Yuliang Xiu and Shunsuke Saito and Zeng Huang and Kyle Olszewski and Hao Li},
url = {https://www.researchgate.net/publication/343279742_Monocular_Real-Time_Volumetric_Performance_Capture},
year = {2020},
date = {2020-07-01},
journal = {ResearchGate},
pages = {30},
abstract = {We present the first approach to volumetric performance capture and novel-view rendering at real-time speed from monocular video, eliminating the need for expensive multi-view systems or cumbersome pre-acquisition of a personalized template model. Our system reconstructs a fully textured 3D human from each frame by leveraging Pixel-Aligned Implicit Function (PIFu). While PIFu achieves high-resolution reconstruction in a memory-efficient manner, its computationally expensive inference prevents us from deploying such a system for real-time applications. To this end, we propose a novel hierarchical surface localization algorithm and a direct rendering method without explicitly extracting surface meshes. By culling unnecessary regions for evaluation in a coarse-to-fine manner, we successfully accelerate the reconstruction by two orders of magnitude from the baseline without compromising the quality. Furthermore, we introduce an Online Hard Example Mining (OHEM) technique that effectively suppresses failure modes due to the rare occurrence of challenging examples. We adaptively update the sampling probability of the training data based on the current reconstruction accuracy, which effectively alleviates reconstruction artifacts. Our experiments and evaluations demonstrate the robustness of our system to various challenging angles, illuminations, poses, and clothing styles. We also show that our approach compares favorably with the state-of-the-art monocular performance capture. Our proposed approach removes the need for multi-view studio settings and enables a consumer-accessible solution for volumetric capture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Inproceedings
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121--2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hilty, Donald M.; Randhawa, Karan; Maheu, Marlene M.; McKean, Alastair J. S.; Pantera, Richard; Mishkind, Matthew C.; Rizzo, Albert “Skip”
A Review of Telepresence, Virtual Reality, and Augmented Reality Applied to Clinical Care Journal Article
In: Journal of Technology in Behavioral Science, vol. 5, no. 2, pp. 178–205, 2020, ISSN: 2366-5963.
@article{hilty_review_2020,
title = {A Review of Telepresence, Virtual Reality, and Augmented Reality Applied to Clinical Care},
author = {Donald M. Hilty and Karan Randhawa and Marlene M. Maheu and Alastair J. S. McKean and Richard Pantera and Matthew C. Mishkind and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/s41347-020-00126-x},
doi = {10.1007/s41347-020-00126-x},
issn = {2366-5963},
year = {2020},
date = {2020-06-01},
journal = {Journal of Technology in Behavioral Science},
volume = {5},
number = {2},
pages = {178--205},
abstract = {This scoping review article explores the application of telepresence (TPr), virtual reality (VR), and augmented reality (AR) to clinical care. A literature search of key words was conducted from January 1990 through May 2019 of the following databases: PubMed/ Medline, American Psychological Association PsycNET, Pubmed/Medline, Cochrane, Embase, PsycINFO, Web of Science, Scopus, OTSeeker, ABI/INFORM, computer-mediated communication (CMC), technology-mediated communications, Arts & Humanities Citation Index, Project Muse, ProQuest Research Library Plus, Sociological abstracts, Computers and Applied Sciences Complete and IT Source. It focused on concept areas: (1) TPr related to technologies; (2) virtual, augmented, reality, environment; (3) technology or computer-mediated communication; (4) clinical therapeutic relationship (boundaries, care, communication, connect, engagement, empathy, intimacy, trust); (5) telebehavioral health; (6) psychotherapy via technology; and (7) medicine/health care. Inclusion criteria were concept area 1 in combination with 2–7 and 2 or 3 in combination with any of 4–7. From a total of 5214 potential references, the authors found 512 eligible for full-text review and found 85 papers directly relevant to the concepts. From papers’ references and a review of books and popular literature about TPr, virtual reality (VR), and augmented reality (AR), 13 other sources of information were found. The historical evolution of TPr, VR, and AR shows that definitions, foci of studies (e.g., social neuroscience to business), and applications vary; assessments of TPr also vary widely. Studies discuss VR, AR, and TPr in medicine (e.g., rehabilitation, robotics), experimental psychology (laboratory, field, mixed), and behavioral health. Virtual environment (VE) designs aid the study of interpersonal communication and behavior, using standardized social interaction partners, virtual standardized patients, and/or virtual humans—all contingent on the participants’ experience of presence and the ability to engage. Additional research is needed to standardize experimental and clinical interventions, while maintaining ecological validity. Technology can significantly improve quality of care, access to new treatments and training, if ethical and reimbursement issues are better explored.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Saxon, Leslie; DiPaula, Brooks; Fox, Glenn R; Ebert, Rebecca; Duhaime, Josiah; Nocera, Luciano; Tran, Luan; Sobhani, Mona
Continuous Measurement of Reconnaissance Marines in Training With Custom Smartphone App and Watch: Observational Cohort Study Journal Article
In: JMIR mHealth and uHealth, vol. 8, no. 6, pp. e14116, 2020, ISSN: 2291-5222.
@article{saxon_continuous_2020,
title = {Continuous Measurement of Reconnaissance Marines in Training With Custom Smartphone App and Watch: Observational Cohort Study},
author = {Leslie Saxon and Brooks DiPaula and Glenn R Fox and Rebecca Ebert and Josiah Duhaime and Luciano Nocera and Luan Tran and Mona Sobhani},
url = {https://mhealth.jmir.org/2020/6/e14116},
doi = {10.2196/14116},
issn = {2291-5222},
year = {2020},
date = {2020-06-01},
journal = {JMIR mHealth and uHealth},
volume = {8},
number = {6},
pages = {e14116},
abstract = {Background: Specialized training for elite US military units is associated with high attrition due to intense psychological and physical demands. The need to graduate more service members without degrading performance standards necessitates the identification of factors to predict success or failure in targeted training interventions. Objective: The aim of this study was to continuously quantify the mental and physical status of trainees of an elite military unit to identify novel predictors of success in training. Methods: A total of 3 consecutive classes of a specialized training course were provided with an Apple iPhone, Watch, and specially designed mobile app. Baseline personality assessments and continuous daily measures of mental status, physical pain, heart rate, activity, sleep, hydration, and nutrition were collected from the app and Watch data. Results: A total of 115 trainees enrolled and completed the study (100% male; age: mean 22 years, SD 4 years) and 64 (55.7%) successfully graduated. Most training withdrawals (27/115, 23.5%) occurred by day 7 (mean 5.5 days, SD 3.4 days; range 1-22 days). Extraversion, positive affect personality traits, and daily psychological profiles were associated with course completion; key psychological factors could predict withdrawals 1-2 days in advance (P=.009). Conclusions: Gathering accurate and continuous mental and physical status data during elite military training is possible with early predictors of withdrawal providing an opportunity for intervention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Inproceedings
In: Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3090–3099, IEEE, Seattle, WA, USA, 2020, ISBN: 978-1-72817-168-5.
@inproceedings{huang_arch_2020,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://ieeexplore.ieee.org/document/9157750/},
doi = {10.1109/CVPR42600.2020.00316},
isbn = {978-1-72817-168-5},
year = {2020},
date = {2020-06-01},
booktitle = {Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3090--3099},
publisher = {IEEE},
address = {Seattle, WA, USA},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malta, Loretta S.; Giosan, Cezar; Szkodny, Lauren E.; Altemus, Margaret M.; Rizzo, Albert A.; Silbersweig, David A.; Difede, JoAnn
Predictors of involuntary and voluntary emotional episodic memories of virtual reality scenarios in Veterans with and without PTSD Journal Article
In: Memory, pp. 1–17, 2020, ISSN: 0965-8211, 1464-0686.
@article{malta_predictors_2020,
title = {Predictors of involuntary and voluntary emotional episodic memories of virtual reality scenarios in Veterans with and without PTSD},
author = {Loretta S. Malta and Cezar Giosan and Lauren E. Szkodny and Margaret M. Altemus and Albert A. Rizzo and David A. Silbersweig and JoAnn Difede},
url = {https://www.tandfonline.com/doi/full/10.1080/09658211.2020.1770289},
doi = {10.1080/09658211.2020.1770289},
issn = {0965-8211, 1464-0686},
year = {2020},
date = {2020-05-01},
journal = {Memory},
pages = {1--17},
abstract = {This study investigated predictors of involuntary and voluntary memories of stressful virtual reality scenarios. Thirty-two veterans of the two Persian Gulf Wars completed verbal memory tests and diagnostic assessments. They were randomly assigned to a Recounting (16) or a Suppression (16) condition. After immersion in the VR scenarios, the Recounting group described the scenarios and the Suppression group suppressed thoughts of the scenarios. One week later, participants completed surprise voluntary memory tests and another thought suppression task. The best predictors of voluntary memory were verbal memory ability, dissociation, and to a lesser extent, physiological arousal before and after scenarios. Dissociation and physiological stress responses selectively affected memory for neutral elements. Higher distress during scenarios impaired voluntary memory but increased the frequency of involuntary memories. Physiological stress responses promoted more frequent involuntary memories immediately after the scenarios. More frequent initial involuntary memories, tonic physiological arousal, and stronger emotional responses to dangerous events predicted difficulty inhibiting involuntary memories at follow-up. The effects of thought suppression were transient and weaker than those of other variables. The findings suggest that posttraumatic amnesia and involuntary memories of adverse events are more related to memory ability and emotional and physiological stress responses than to postexposure suppression.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Inproceedings
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222--3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2020
Olszewski, Kyle; Ceylan, Duygu; Xing, Jun; Echevarria, Jose; Chen, Zhili; Chen, Weikai; Li, Hao
Intuitive, Interactive Beard and Hair Synthesis with Generative Models Inproceedings
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{olszewski_intuitive_2020,
title = {Intuitive, Interactive Beard and Hair Synthesis with Generative Models},
author = {Kyle Olszewski and Duygu Ceylan and Jun Xing and Jose Echevarria and Zhili Chen and Weikai Chen and Hao Li},
url = {http://arxiv.org/abs/2004.06848},
doi = {10.1109/CVPR42600.2020.00747},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {We present an interactive approach to synthesizing realistic variations in facial hair in images, ranging from subtle edits to existing hair to the addition of complex and challenging hair in images of clean-shaven subjects. To circumvent the tedious and computationally expensive tasks of modeling, rendering and compositing the 3D geometry of the target hairstyle using the traditional graphics pipeline, we employ a neural network pipeline that synthesizes realistic and detailed images of facial hair directly in the target image in under one second. The synthesis is controlled by simple and sparse guide strokes from the user defining the general structural and color properties of the target hairstyle. We qualitatively and quantitatively evaluate our chosen method compared to several alternative approaches. We show compelling interactive editing results with a prototype user interface that allows novice users to progressively refine the generated image to match their desired hairstyle, and demonstrate that our approach also allows for flexible and high-fidelity scalp hair synthesis.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Inproceedings
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{huang_arch_2020-1,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG},
doi = {10.1109/CVPR42600.2020.00316},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Rosenberg, Milton; Davis, Mark C
Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_39, pp. 11, 2020.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{davis_proactive_2020,
title = {Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence},
author = {Dan M Davis and Milton Rosenberg and Mark C Davis},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_39},
pages = {11},
abstract = {There is a continuing need for battlefield simulations and virtual humans. Most recently, the authors have been focused on the creation of virtual conversation environments to leverage the mentoring skills of selected individuals by creating large libraries of short video clips of advice which are then presented to the user in response to their questions. In these endeavors two issues have arisen; the inconsistency of the definitions used and the need to ameliorate the impacts of short-tour intervals on team formation. This paper will address both of these issues, review existing research, document some early research into these impediments, and discuss the similarities of these issues to those faced by the standards community writ large. They will cite and review the work of Professor Bruce Tuckman: Forming, Storming, Norming, and Performing. The benefits of using virtual humans to enhance these processes are outlined. The need for and design of proactive Natural Language Processing-enabled virtual humans and computer agents is set forth and analyzed. The paper will lay out the research goals, identify the semantic differences, and report on the potential impacts of those differences. In its totality, this paper intends to demonstrate that, in addition to the need to evangelize about the necessity of standards, this community has a lot to contribute to researchers, developers, and implementers faced with destructive differences in terminology, understanding and practice. All of this data and analysis will be presented in a way that should make sure that the insights garnered therefrom are accessible by members of this and other communities and they can be implemented and modified, as is most effective. Future advances now in development are discussed, along with the utility of these new capabilities and approaches.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Inproceedings
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160--163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Inproceedings
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118--119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Saredakis, Dimitrios; Szpak, Ancret; Birckhead, Brandon; Keage, Hannah A. D.; Rizzo, Albert; Loetscher, Tobias
Factors Associated With Virtual Reality Sickness in Head-Mounted Displays: A Systematic Review and Meta-Analysis Journal Article
In: Frontiers in Human Neuroscience, vol. 14, pp. 96, 2020, ISSN: 1662-5161.
Abstract | Links | BibTeX | Tags: MedVR
@article{saredakis_factors_2020,
title = {Factors Associated With Virtual Reality Sickness in Head-Mounted Displays: A Systematic Review and Meta-Analysis},
author = {Dimitrios Saredakis and Ancret Szpak and Brandon Birckhead and Hannah A. D. Keage and Albert Rizzo and Tobias Loetscher},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2020.00096/full},
doi = {10.3389/fnhum.2020.00096},
issn = {1662-5161},
year = {2020},
date = {2020-03-01},
journal = {Frontiers in Human Neuroscience},
volume = {14},
pages = {96},
abstract = {The use of head-mounted displays (HMD) for virtual reality (VR) application-based purposes including therapy, rehabilitation, and training is increasing. Despite advancements in VR technologies, many users still experience sickness symptoms. VR sickness may be influenced by technological differences within HMDs such as resolution and refresh rate, however, VR content also plays a significant role. The primary objective of this systematic review and meta-analysis was to examine the literature on HMDs that report Simulator Sickness Questionnaire (SSQ) scores to determine the impact of content. User factors associated with VR sickness were also examined. A systematic search was conducted according to PRISMA guidelines. Fifty-five articles met inclusion criteria, representing 3,016 participants (mean age range 19.5–80; 41% female). Findings show gaming content recorded the highest total SSQ mean 34.26 (95%CI 29.57–38.95). VR sickness profiles were also influenced by visual stimulation, locomotion and exposure times. Older samples (mean age ≥35 years) scored significantly lower total SSQ means than younger samples, however, these findings are based on a small evidence base as a limited number of studies included older users. No sex differences were found. Across all types of content, the pooled total SSQ mean was relatively high 28.00 (95%CI 24.66–31.35) compared with recommended SSQ cut-off scores. These findings are of relevance for informing future research and the application of VR in different contexts.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Inproceedings
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1--3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Inproceedings
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
Abstract | Links | BibTeX | Tags: Graphics, Narrative, STG, UARC
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {Graphics, Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1--32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gennaro, Mauro; Krumhuber, Eva G.; Lucas, Gale
Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood Journal Article
In: Frontiers in Psychology, vol. 10, pp. 3061, 2020, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{de_gennaro_effectiveness_2020,
title = {Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood},
author = {Mauro Gennaro and Eva G. Krumhuber and Gale Lucas},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.03061/full},
doi = {10.3389/fpsyg.2019.03061},
issn = {1664-1078},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Psychology},
volume = {10},
pages = {3061},
abstract = {From past research it is well known that social exclusion has detrimental consequences for mental health. To deal with these adverse effects, socially excluded individuals frequently turn to other humans for emotional support. While chatbots can elicit social and emotional responses on the part of the human interlocutor, their effectiveness in the context of social exclusion has not been investigated. In the present study, we examined whether an empathic chatbot can serve as a buffer against the adverse effects of social ostracism. After experiencing exclusion on social media, participants were randomly assigned to either talk with an empathetic chatbot about it (e.g., “I’m sorry that this happened to you”) or a control condition where their responses were merely acknowledged (e.g., “Thank you for your feedback”). Replicating previous research, results revealed that experiences of social exclusion dampened the mood of participants. Interacting with an empathetic chatbot, however, appeared to have a mitigating impact. In particular, participants in the chatbot intervention condition reported higher mood than those in the control condition. Theoretical, methodological, and practical implications, as well as directions for future research are discussed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2019
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Inproceedings
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113--131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Inproceedings
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308--3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Hao
Technical perspective: Photorealistic facial digitization and manipulation Journal Article
In: Communications of the ACM, vol. 62, no. 1, pp. 95–95, 2019, ISSN: 00010782.
Abstract | Links | BibTeX | Tags: Graphics
@article{li_technical_2019,
title = {Technical perspective: Photorealistic facial digitization and manipulation},
author = {Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3301004.3292037},
doi = {10.1145/3292037},
issn = {00010782},
year = {2019},
date = {2019-12-01},
journal = {Communications of the ACM},
volume = {62},
number = {1},
pages = {95--95},
abstract = {For more than a decade, computer graphics (CG) researchers and visual effects experts have been fascinated with bringing photorealistic digital actors to the screen. Crossing the well-known "uncanny valley" in CG humans has been one of the most difficult and crucial challenges, due to hypersensitivity to synthetic humans lacking even the slightest and most subtle features of genuine human faces. Given sufficient resources and time, photorealistic renderings of digital characters have been achieved in recent years. Some of the most memorable cases are seen in blockbuster movies, such as The Curious Case of Benjamin Button, Furious 7, and Rogue One: A Star Wars Story, in which large teams of highly skilled digital artists use cutting-edge digitization technologies. Despite the progress of 3D-scanning solutions, facial animation systems, and advanced rendering techniques, weeks of manual work are still needed to produce even just a few seconds of animation.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Feng, Andrew; McAlinden, Ryan; Soibelman, Lucio
Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations Journal Article
In: Journal of Management in Engineering, vol. 36, no. 2, pp. 04019046, 2019, ISSN: 0742-597X, 1943-5479.
Abstract | Links | BibTeX | Tags: STG, UARC
@article{chen_photogrammetric_2019,
title = {Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations},
author = {Meida Chen and Andrew Feng and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29ME.1943-5479.0000737},
doi = {10.1061/(ASCE)ME.1943-5479.0000737},
issn = {0742-597X, 1943-5479},
year = {2019},
date = {2019-11-01},
journal = {Journal of Management in Engineering},
volume = {36},
number = {2},
pages = {04019046},
abstract = {Photogrammetric techniques have dramatically improved over the last few years, enabling the creation of visually compelling three-dimensional (3D) meshes using unmanned aerial vehicle imagery. These high-quality 3D meshes have attracted notice from both academicians and industry practitioners in developing virtual environments and simulations. However, photogrammetric generated point clouds and meshes do not allow both user-level and system-level interaction because they do not contain the semantic information to distinguish between objects. Thus, segmenting generated point clouds and meshes and extracting the associated object information is a necessary step. A framework for point cloud and mesh classification and segmentation is presented in this paper. The proposed framework was designed considering photogrammetric data-quality issues and provides a novel way of extracting object information, including (1) individual tree locations and related features and (2) building footprints. Experiments were conducted to rank different point descriptors and evaluate supervised machine-learning algorithms for segmenting photogrammetric generated point clouds. The proposed framework was validated using data collected at the University of Southern California (USC) and the Muscatatuck Urban Training Center (MUTC). DOI: 10.1061/(ASCE) ME.1943-5479.0000737. © 2019 American Society of Civil Engineers.},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Inproceedings
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1--4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Inproceedings
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833--842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
An, Capt Eric; Nolty, Anne A T; Amano, Stacy S; Rizzo, Albert A; Buckwalter, J Galen; Rensberger, Jared
Heart Rate Variability as an Index of Resilience Journal Article
In: Military Medicine, 2019, ISSN: 0026-4075, 1930-613X.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{an_heart_2019,
title = {Heart Rate Variability as an Index of Resilience},
author = {Capt Eric An and Anne A T Nolty and Stacy S Amano and Albert A Rizzo and J Galen Buckwalter and Jared Rensberger},
url = {https://academic.oup.com/milmed/advance-article/doi/10.1093/milmed/usz325/5586497},
doi = {10.1093/milmed/usz325},
issn = {0026-4075, 1930-613X},
year = {2019},
date = {2019-10-01},
journal = {Military Medicine},
abstract = {Introduction: Resilience is the ability to maintain or quickly return to a stable physical and psychological equilibrium despite experiencing stressful events. Flexibility of the autonomic nervous system is particularly important for adaptive stress responses and may contribute to individual differences in resilience. Power spectrum analysis of heart rate variability (HRV) allows measurement of sympathovagal balance, which helps to evaluate autonomic flexibility. The present study investigated HRV as a broad index of resilience. Materials and Methods: Twenty-four male participants from the Army National Guard Special Forces completed psychological measures known to relate to resilience and had HRV measured while undergoing stressful virtual environment scenarios. Pearson product-moment correlations were used to explore the relationships between HRV and resilience factors. All research was conducted with the oversight of the Human Subjects Review Committee of Fuller Theological Seminary. Results: Trends toward significance were reported in order to provide results that would reasonably be expected in a study of higher power. Trends between resilience factors and HRV were found only during specific stress-inducing simulations (see Tables III). Conclusion: Greater resilience to stress was associated with HRV during nonstress periods. Higher levels of resilience to traumatic events were associated with HRV during circumstances that were more stressful and emotionally distressing. Post hoc analysis revealed that specific factors including flexibility, emotional control, and spirituality were driving the relationship between general resilience and HRV following emotionally laden stressors. Less stress vulnerability was associated with HRV following intermittent brief stressors. In sum, HRV appears to represent some aspects of an individual’s overall resilience profile. Although resilience remains a complex, multidimensional construct, HRV shows promise as a global psychophysiological index of resilience. This study also offers important perspectives concerning ways to optimize both physical and psychological health.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Jovanovic, Tanja; Reist, Christopher; Roy, Michael J.; Norrholm, Seth D.; Glatt, Charles; Lee, Francis
Enhanced exposure therapy for combat-related Posttraumatic Stress Disorder (PTSD): Study protocol for a randomized controlled trial Journal Article
In: Contemporary Clinical Trials, pp. 105857, 2019, ISSN: 15517144.
Abstract | Links | BibTeX | Tags: MedVR
@article{difede_enhanced_2019,
title = {Enhanced exposure therapy for combat-related Posttraumatic Stress Disorder (PTSD): Study protocol for a randomized controlled trial},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Tanja Jovanovic and Christopher Reist and Michael J. Roy and Seth D. Norrholm and Charles Glatt and Francis Lee},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1551714419305725},
doi = {10.1016/j.cct.2019.105857},
issn = {15517144},
year = {2019},
date = {2019-10-01},
journal = {Contemporary Clinical Trials},
pages = {105857},
abstract = {Background: PTSD, which has been identified in up to 23% of post-9-11 veterans, often results in a chronic, pernicious course. Thus, effective treatments are imperative. The Institute of Medicine (IOM) concluded that the only intervention for PTSD with sufficient evidence to conclude efficacy is exposure therapy. This Phase III trial compares the efficacy of exposure therapy for combat-related PTSD delivered in two different formats- via virtual reality exposure therapy (VRE) or prolonged exposure therapy (PE)- combined with D-Cycloserine (DCS), a cognitive enhancer shown to facilitate the extinction of fear.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Inproceedings
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59--68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231--245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Haring, Kerstin S.; Tobias, Jessica; Waligora, Justin; Phillips, Elizabeth; Tenhundfeld, Nathan L; LUCAS, Gale; Visser, Ewart J; GRATCH, Jonathan; Tossell, Chad
Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing Inproceedings
In: Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE, New Delhi, India, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{haring_conflict_2019,
title = {Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing},
author = {Kerstin S. Haring and Jessica Tobias and Justin Waligora and Elizabeth Phillips and Nathan L Tenhundfeld and Gale LUCAS and Ewart J Visser and Jonathan GRATCH and Chad Tossell},
url = {https://ieeexplore.ieee.org/abstract/document/8956414},
doi = {10.1109/RO-MAN46459.2019.8956414},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
address = {New Delhi, India},
abstract = {Socially intelligent artificial agents and robots are anticipated to become ubiquitous in home, work, and military environments. With the addition of such agents to human teams it is crucial to evaluate their role in the planning, decision making, and conflict mediation processes. We conducted a study to evaluate the utility of a virtual agent that provided mission planning support in a three-person human team during a military strategic mission planning scenario. The team consisted of a human team lead who made the final decisions and three supporting roles, two humans and the artificial agent. The mission outcome was experimentally designed to fail and introduced a conflict between the human team members and the leader. This conflict was mediated by the artificial agent during the debriefing process through discuss or debate and open communication strategies of conflict resolution [1]. Our results showed that our teams experienced conflict. The teams also responded socially to the virtual agent, although they did not find the agent beneficial to the mediation process. Finally, teams collaborated well together and perceived task proficiency increased for team leaders. Socially intelligent agents show potential for conflict mediation, but need careful design and implementation to improve team processes and collaboration.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Messner, Eva-Maria; Song, Siyang; Liu, Shuo; Zhao, Ziping; Mallol-Ragolta, Adria; Ren, Zhao; Soleymani, Mohammad; Pantic, Maja; Schuller, Björn; Valstar, Michel; Cummins, Nicholas; Cowie, Roddy; Tavabi, Leili; Schmitt, Maximilian; Alisamir, Sina; Amiriparian, Shahin
AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition Inproceedings
In: Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19, pp. 3–12, ACM Press, Nice, France, 2019, ISBN: 978-1-4503-6913-8.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ringeval_avec_2019,
title = {AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition},
author = {Fabien Ringeval and Eva-Maria Messner and Siyang Song and Shuo Liu and Ziping Zhao and Adria Mallol-Ragolta and Zhao Ren and Mohammad Soleymani and Maja Pantic and Björn Schuller and Michel Valstar and Nicholas Cummins and Roddy Cowie and Leili Tavabi and Maximilian Schmitt and Sina Alisamir and Shahin Amiriparian},
url = {http://dl.acm.org/citation.cfm?doid=3347320.3357688},
doi = {10.1145/3347320.3357688},
isbn = {978-1-4503-6913-8},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19},
pages = {3--12},
publisher = {ACM Press},
address = {Nice, France},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2019) 'State-of-Mind, Detecting Depression with AI, and Cross-cultural Affect Recognition' is the ninth competition event aimed at the comparison of multimedia processing and machine learning methods for automatic audiovisual health and emotion analysis, with all participants competing strictly under the same conditions. The goal of the Challenge is to provide a common benchmark test set for multimodal information processing and to bring together the health and emotion recognition communities, as well as the audiovisual processing communities, to compare the relative merits of various approaches to health and emotion recognition from real-life data. This paper presents the major novelties introduced this year, the challenge guidelines, the data used, and the performance of the baseline systems on the three proposed tasks: state-of-mind recognition, depression assessment with AI, and cross-cultural affect sensing, respectively.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Inproceedings
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95--104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Establishing Social Dialog between Buildings and Their Users Journal Article
In: International Journal of Human–Computer Interaction, vol. 35, no. 17, pp. 1545–1556, 2019, ISSN: 1044-7318, 1532-7590.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{khashe_establishing_2019,
title = {Establishing Social Dialog between Buildings and Their Users},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2018.1555346},
doi = {10.1080/10447318.2018.1555346},
issn = {1044-7318, 1532-7590},
year = {2019},
date = {2019-10-01},
journal = {International Journal of Human–Computer Interaction},
volume = {35},
number = {17},
pages = {1545--1556},
abstract = {Behavioral intervention strategies have yet to become successful in the development of initiatives to foster pro-environmental behaviors in buildings. In this paper, we explored the potentials of increasing the effectiveness of requests aiming to promote pro-environmental behaviors by engaging users in a social dialog, given the effects of two possible personas that are more related to the buildings (i.e., building vs. building manager). We tested our hypotheses and evaluated our findings in virtual and physical environments and found similar effects in both environments. Our results showed that social dialog involvement persuaded respondents to perform more pro-environmental actions. However, these effects were significant when the requests were delivered by an agent representing the building. In addition, these strategies were not equally effective across all types of people and their effects varied for people with different characteristics. Our findings provide useful design choices for persuasive technologies aiming to promote pro-environmental behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Inproceedings
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{hartholt_virtual_2019,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205--207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Kamireddy, Sreekar
A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction Inproceedings
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 171–178, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, UARC
@inproceedings{pynadath_markovian_2019,
title = {A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction},
author = {David V. Pynadath and Ning Wang and Sreekar Kamireddy},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3351905},
doi = {10.1145/3349537.3351905},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {171--178},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {Trust calibration is critical to the success of human-agent interaction (HAI). However, individual differences are ubiquitous in people’s trust relationships with autonomous systems. To assist its heterogeneous human teammates calibrate their trust in it, an agent must first dynamically model them as individuals, rather than communicating with them all in the same manner. It can then generate expectations of its teammates’ behavior and optimize its own communication based on the current state of the trust relationship it has with them. In this work, we examine how an agent can generate accurate expectations given observations of only the teammate’s trust-related behaviors (e.g., did the person follow or ignore its advice?). In addition to this limited input, we also seek a specific output: accurately predicting its human teammate’s future trust behavior (e.g., will the person follow or ignore my next suggestion?). In this investigation, we construct a model capable of generating such expectations using data gathered in a humansubject study of behavior in a simulated human-robot interaction (HRI) scenario. We first analyze the ability of measures from a presurvey on trust-related traits to accurately predict subsequent trust behaviors. However, as the interaction progresses, this effect is dwarfed by the direct experience. We therefore analyze the ability of sequences of prior behavior by the teammate to accurately predict subsequent trust behaviors. Such behavioral sequences have shown to be indicative of the subjective beliefs of other teammates, and we show here that they have a predictive power as well.},
keywords = {MedVR, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Xing, Jun; Nagano, Koki; Chen, Weikai; Xu, Haotian; Wei, Li-yi; Zhao, Yajie; Lu, Jingwan; Kim, Byungmoon; Li, Hao
HairBrush for Immersive Data-Driven Hair Modeling Inproceedings
In: Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19, pp. 263–279, ACM Press, New Orleans, LA, USA, 2019, ISBN: 978-1-4503-6816-2.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{xing_hairbrush_2019,
title = {HairBrush for Immersive Data-Driven Hair Modeling},
author = {Jun Xing and Koki Nagano and Weikai Chen and Haotian Xu and Li-yi Wei and Yajie Zhao and Jingwan Lu and Byungmoon Kim and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3332165.3347876},
doi = {10.1145/3332165.3347876},
isbn = {978-1-4503-6816-2},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19},
pages = {263--279},
publisher = {ACM Press},
address = {New Orleans, LA, USA},
abstract = {While hair is an essential component of virtual humans, it is also one of the most challenging digital assets to create. Existing automatic techniques lack the generality and flexibility to create rich hair variations, while manual authoring interfaces often require considerable artistic skills and efforts, especially for intricate 3D hair structures that can be difficult to navigate. We propose an interactive hair modeling system that can help create complex hairstyles in minutes or hours that would otherwise take much longer with existing tools. Modelers, including novice users, can focus on the overall hairstyles and local hair deformations, as our system intelligently suggests the desired hair parts. Our method combines the flexibility of manual authoring and the convenience of data-driven automation. Since hair contains intricate 3D structures such as buns, knots, and strands, they are inherently challenging to create using traditional 2D interfaces. Our system provides a new 3D hair authoring interface for immersive interaction in virtual reality (VR). Users can draw high-level guide strips, from which our system predicts the most plausible hairstyles via a deep neural network trained from a professionally curated dataset. Each hairstyle in our dataset is composed of multiple variations, serving as blend-shapes to fit the user drawings via global blending and local deformation. The fitted hair models are visualized as interactive suggestions that the user can select, modify, or ignore. We conducted a user study to confirm that our system can significantly reduce manual labor while improve the output quality for modeling a variety of head and facial hairstyles that are challenging to create via existing techniques.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Inproceedings
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Gratch, Jonathan; Parkinson, Brian; Shore, Danielle
Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the, pp. 7, IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hoegen_signals_2019,
title = {Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context},
author = {Rens Hoegen and Jonathan Gratch and Brian Parkinson and Danielle Shore},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the},
pages = {7},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {In social decision-making tasks, facial expressions are informative signals that indicate motives and intentions. As people are aware that their expressions influence partner behavior, expressions may be strategically regulated in competitive environments to influence a social partner’s decisionmaking. In this work, we examine facial expressions and their strategic regulation within the context of an iterated prisoner’s dilemma. Utilizing video-cued rating procedures, we examine several key questions about the functionality of facial expressions in social decision-making. First, we assess the extent to which emotion and expression regulation are accurately detected from dynamic facial expressions in interpersonal interactions. Second, we explore which facial cues are utilized to evaluate emotion and regulation information. Finally, we investigate the role of context in participants’ emotion and regulation judgments. Results show that participants accurately perceive facial emotion and expression regulation, although they are better at recognizing emotions than regulation. Using automated expression analysis and stepwise regression, we constructed models that use action units from participant videos to predict their video-cued emotion and regulation ratings. We show that these models perform similarly and, in some cases, better than participants do. Moreover, these models demonstrate that game state information improves predictive accuracy, thus implying that context information is important in the evaluation of facial expressions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Aydogan, Reyhan; Baarslag, Tim; Jonker, Catholijn M.
The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_likeability-success_2019,
title = {The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Reyhan Aydogan and Tim Baarslag and Catholijn M. Jonker},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {We present the results of the 2nd Annual Human-Agent League of the Automated Negotiating Agent Competition. Building on the success of the previous year’s results, a new challenge was issued that focused exploring the likeability-success tradeoff in negotiations. By examining a series of repeated negotiations, actions may affect the relationship between automated negotiating agents and their human competitors over time. The results presented herein support a more complex view of human-agent negotiation and capture of integrative potential (win-win solutions). We show that, although likeability is generally seen as a tradeoff to winning, agents are able to remain well-liked while winning if integrative potential is not discovered in a given negotiation. The results indicate that the top-performing agent in this competition took advantage of this loophole by engaging in favor exchange across negotiations (cross-game logrolling). These exploratory results provide information about the effects of different submitted “black-box” agents in humanagent negotiation and provide a state-of-the-art benchmark for human-agent design.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Gratch, Jonathan
Smiles Signal Surprise in a Social Dilemma Inproceedings
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lei_smiles_2019,
title = {Smiles Signal Surprise in a Social Dilemma},
author = {Su Lei and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {This study examines spontaneous facial expressions in an iterated prisoner’s dilemma with financial stakes. Our goal was to identify typical facial expressions associated with key events during the interaction (e.g., cooperation or exploitation) and contrast these reactions with alternative theories of the meaning of facial expressions. Specifically, we examined if expressions reflect individual self-interest (e.g., winning) or social motives (e.g., promoting fairness) and the extent to which surprise might moderate the intensity of facial displays. In contrast to predictions of scientific and folk theories of expression, smiles were the only expressions consistently elicited, regardless of the reward or fairness of outcomes. Further, these smiles serve as a reliable indicator of the surprisingness of the event, but not its pleasure (contradicting research on both the meaning of smiles and indicators of surprise). To our knowledge, this is the first study to indicate that smiles signal surprise.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Rizzo, Albert; Gratch, Jonathan; Scherer, Stefan; Stratou, Giota; Boberg, Jill; Morency, Louis-Philippe
Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers Incollection
In: The Impact of Virtual and Augmented Reality on Individuals and Society, pp. 256–264, Frontiers Media SA, 2019.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{lucas_reporting_2019,
title = {Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers},
author = {Gale M. Lucas and Albert Rizzo and Jonathan Gratch and Stefan Scherer and Giota Stratou and Jill Boberg and Louis-Philippe Morency},
url = {https://books.google.com/books?hl=en&lr=&id=N724DwAAQBAJ&oi=fnd&pg=PP1&dq=The+Impact+of+Virtual+and+Augmented+Reality+on+Individuals+and+Society&ots=ZMD1P9T-K5&sig=Qqh7iHZ4Xq2iRyYecrECHwNNE38#v=onepage&q=The%20Impact%20of%20Virtual%20and%20Augmented%20Reality%20on%20Individuals%20and%20Society&f=false},
year = {2019},
date = {2019-09-01},
booktitle = {The Impact of Virtual and Augmented Reality on Individuals and Society},
pages = {256--264},
publisher = {Frontiers Media SA},
abstract = {A common barrier to healthcare for psychiatric conditions is the stigma associated with these disorders. Perceived stigma prevents many from reporting their symptoms. Stigma is a particularly pervasive problem among military service members, preventing them from reporting symptoms of combat-related conditions like posttraumatic stress disorder (PTSD). However, research shows (increased reporting by service members when anonymous assessments are used. For example, service members report more symptoms of PTSD when they anonymously answer the Post-Deployment Health Assessment (PDHA) symptom checklist compared to the official PDHA, which is identifiable and linked to their military records. To investigate the factors that influence reporting of psychological symptoms by service members, we used a transformative technology: automated virtual humans that interview people about their symptoms. Such virtual human interviewers allow simultaneous use of two techniques for eliciting disclosure that would otherwise be incompatible; they afford anonymity while also building rapport. We examined whether virtual human interviewers could increase disclosure of mental health symptoms among active-duty service members that just returned from a year-long deployment in Afghanistan. Service members reported more symptoms during a conversation with a virtual human interviewer than on the official PDHA. They also reported more to a virtual human interviewer than on an anonymized PDHA. A second, larger sample of active-duty and former service members found a similar effect that approached statistical significance. Because respondents in both studies shared more with virtual human interviewers than an anonymized PDHA—even though both conditions control for stigma and ramifications for service members’ military records—virtual human interviewers that build rapport may provide a superior option to encourage reporting.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bond, William F; Lynch, Theresa J.; Mischler, Matthew J.; Fish, Jessica L.; McGarvey, Jeremy S.; Taylor, Jason T.; Kumar, Dipen M.; Mou, Kyle M.; Ebert-Allen, Rebecca A.; Mahale, Dilip N.; Talbot, Thomas B.; Aiyer, Meenakshy
Virtual Standardized Patient Simulation Journal Article
In: Simulation in Healthcare, vol. 14, no. 4, pp. 10, 2019.
Abstract | Links | BibTeX | Tags: MedVR
@article{bond_virtual_2019,
title = {Virtual Standardized Patient Simulation},
author = {William F Bond and Theresa J. Lynch and Matthew J. Mischler and Jessica L. Fish and Jeremy S. McGarvey and Jason T. Taylor and Dipen M. Kumar and Kyle M. Mou and Rebecca A. Ebert-Allen and Dilip N. Mahale and Thomas B. Talbot and Meenakshy Aiyer},
url = {https://journals.lww.com/simulationinhealthcare/Fulltext/2019/08000/Virtual_Standardized_Patient_Simulation__Case.6.aspx#pdf-link},
year = {2019},
date = {2019-08-01},
journal = {Simulation in Healthcare},
volume = {14},
number = {4},
pages = {10},
abstract = {Introduction: High-value care (HVC) suggests that good history taking and physical examination should lead to risk stratification that drives the use or withholding of diagnostic testing. This study describes the development of a series of virtual standardized patient (VSP) cases and provides preliminary evidence that supports their ability to provide experiential learning in HVC. Methods: This pilot study used VSPs, or natural language processing–based patient avatars, within the USC Standard Patient platform. Faculty consensus was used to develop the cases, including the optimal diagnostic testing strategies, treatment options, and scored content areas. First-year resident physician learners experienced two 90-minute didactic sessions before completing the cases in a computer laboratory, using typed text to interview the avatar for history taking, then completing physical examination, differential diagnosis, diagnostic testing, and treatment modules for each case. Learners chose a primary and 2 alternative “possible” diagnoses from a list of 6 to 7 choices, diagnostic testing options from an extensive list, and treatments from a brief list ranging from 6 to 9 choices. For the historytaking module, both faculty and the platform scored the learners, and faculty assessed the appropriateness of avatar responses. Four randomly selected learner-avatar interview transcripts for each case were double rated by faculty for interrater reliability calculations. Intraclass correlations were calculated for interrater reliability, and Spearman ρ was used to determine the correlation between the platform and faculty ranking of learners' historytaking scores. Results: Eight VSP cases were experienced by 14 learners. Investigators reviewed 112 transcripts (4646 learner query-avatar responses). Interrater reliability means were 0.87 for learner query scoring and 0.83 for avatar response. Mean learner success for history taking was scored by the faculty at 57% and by the platform at 51% (ρ correlation of learner rankings = 0.80},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Inproceedings
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161--167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas; Rizzo, Albert “Skip”
Virtual Human Standardized Patients for Clinical Training Incollection
In: Virtual Reality for Psychological and Neurocognitive Interventions, pp. 387–405, Springer New York, New York, NY, 2019, ISBN: 978-1-4939-9480-9 978-1-4939-9482-3.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@incollection{talbot_virtual_2019-1,
title = {Virtual Human Standardized Patients for Clinical Training},
author = {Thomas Talbot and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/978-1-4939-9482-3_17},
doi = {10.1007/978-1-4939-9482-3_17},
isbn = {978-1-4939-9480-9 978-1-4939-9482-3},
year = {2019},
date = {2019-08-01},
booktitle = {Virtual Reality for Psychological and Neurocognitive Interventions},
pages = {387--405},
publisher = {Springer New York},
address = {New York, NY},
abstract = {Since Dr. Howard Barrows (1964) introduced the human standardized patient in 1963, there have been attempts to game a computer-based simulacrum of a patient encounter; the first being a heart attack simulation using the online PLATO system (Bitzer M, Nursing Research 15:144–150, 1966). With the now ubiquitous use of computers in medicine, interest and effort have expended in the area of Virtual Patients (VPs). There are excellent summaries in the literature (Talbot TB, International Journal of Gaming and Computer Mediated Simulations 4:1–19, 2012) that explain the different types of virtual patients along with their best case applications, strengths and limitations.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Inproceedings
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199--210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lahav, Orly; Talis, Vadim; Cinamon, Rachel Gali; Rizzo, Albert
Virtual interactive consulting agent to support freshman students in transition to higher education Journal Article
In: Journal of Computing in Higher Education, pp. 1–35, 2019, ISSN: 1042-1726, 1867-1233.
Abstract | Links | BibTeX | Tags: MedVR
@article{lahav_virtual_2019,
title = {Virtual interactive consulting agent to support freshman students in transition to higher education},
author = {Orly Lahav and Vadim Talis and Rachel Gali Cinamon and Albert Rizzo},
url = {http://link.springer.com/10.1007/s12528-019-09237-8},
doi = {10.1007/s12528-019-09237-8},
issn = {1042-1726, 1867-1233},
year = {2019},
date = {2019-08-01},
journal = {Journal of Computing in Higher Education},
pages = {1--35},
abstract = {The virtual interactive consulting agent system is an online virtual career center that supports freshman students in transition to higher education. This virtual counseling system, based on accumulative empirical knowledge for working students and knowledge about effective career intervention, aims to guide first-year university students in combining study and work effectively. Three main aspects of career interventions are supplied by this virtual interactive consulting agent system: personal assessment, information, and personal encouragement and relatedness. The virtual interactive consulting agent is based on the SimCoach system. The current research includes two studies that examine acceptability and satisfaction from two perspectives: that of the counselors (the experts) and of the consultees (the target consumers). Both studies included 87 participants divided into two research groups: 45 counselors and 42 counseled freshman students. The data were collected through four data collection tools: acceptability and satisfaction questionnaire, an openended question, Google Docs, and screen recording applications. The participants’ answers were analyzed using quantitative software. The results show that the majority of the counselors were satisfied with the usability of the system but not with the process of counseling through the virtual agent, with some expressing concern about the impact on the profession. In contrast, most of the consultees were satisfied with the counseling process and some stated that the virtual agent helped them to determine how to integrate work and study more effectively.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert “Skip”
A Review of Virtual Classroom Environments for Neuropsychological Assessment Incollection
In: Virtual Reality for Psychological and Neurocognitive Interventions, pp. 247–265, Springer New York, New York, NY, 2019, ISBN: 978-1-4939-9480-9 978-1-4939-9482-3.
Abstract | Links | BibTeX | Tags: MedVR
@incollection{parsons_review_2019,
title = {A Review of Virtual Classroom Environments for Neuropsychological Assessment},
author = {Thomas D. Parsons and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/978-1-4939-9482-3_11},
doi = {10.1007/978-1-4939-9482-3_11},
isbn = {978-1-4939-9480-9 978-1-4939-9482-3},
year = {2019},
date = {2019-08-01},
booktitle = {Virtual Reality for Psychological and Neurocognitive Interventions},
pages = {247--265},
publisher = {Springer New York},
address = {New York, NY},
abstract = {Differential diagnosis and treatment of neuropsychological disorders require assessments that can differentiate overlapping symptoms. Previous research has most often relied on paper-and-pencil as well as computerized psychometric tests of cognitive functions. Although these approaches provide highly systematic control and delivery of performance challenges, they have also been criticized as limited in the area of ecological validity. A possible answer to the problems of ecological validity in assessment of cognitive functioning in neurological populations is to immerse the participant in a virtual environment. This chapter reviews the potential of various virtual classroom environments that have been developed for neuropsychological assessment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert "Skip"; Bouchard, Stéphane (Ed.)
Virtual Reality for Psychological and Neurocognitive Interventions Book
Springer New York, New York, NY, 2019, ISBN: 978-1-4939-9480-9 978-1-4939-9482-3.
Abstract | Links | BibTeX | Tags: MedVR
@book{rizzo_virtual_2019,
title = {Virtual Reality for Psychological and Neurocognitive Interventions},
editor = {Albert "Skip" Rizzo and Stéphane Bouchard},
url = {http://link.springer.com/10.1007/978-1-4939-9482-3},
doi = {10.1007/978-1-4939-9482-3},
isbn = {978-1-4939-9480-9 978-1-4939-9482-3},
year = {2019},
date = {2019-08-01},
publisher = {Springer New York},
address = {New York, NY},
series = {Virtual Reality Technologies for Health and Clinical Applications},
abstract = {This exciting collection tours virtual reality in both its current therapeutic forms and its potential to transform a wide range of medical and mental health-related fields. Extensive findings track the contributions of VR devices, systems, and methods to accurate assessment, evidence-based and client-centered treatment methods, and—as described in a stimulating discussion of virtual patient technologies—innovative clinical training. Immersive digital technologies are shown enhancing opportunities for patients to react to situations, therapists to process patients’ physiological responses, and scientists to have greater control over test conditions and access to results. Expert coverage details leading-edge applications of VR across a broad spectrum of psychological and neurocognitive conditions, including: Treating anxiety disorders and PTSD. Treating developmental and learning disorders, including Autism Spectrum Disorder, Assessment of and rehabilitation from stroke and traumatic brain injuries. Assessment and treatment of substance abuse. Assessment of deviant sexual interests. Treating obsessive-compulsive and related disorders. Augmenting learning skills for blind persons. Readable and relevant, Virtual Reality for Psychological and Neurocognitive Interventions is an essential idea book for neuropsychologists, rehabilitation specialists (including physical, speech, vocational, and occupational therapists), and neurologists. Researchers across the behavioral and social sciences will find it a roadmap toward new and emerging areas of study.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {book}
}
Coleman, Benjamin; Marion, Sarah; Rizzo, Albert; Turnbull, Janiece; Nolty, Anne
In: Frontiers in Psychology, vol. 10, 2019, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: MedVR
@article{coleman_virtual_2019,
title = {Virtual Reality Assessment of Classroom – Related Attention: An Ecologically Relevant Approach to Evaluating the Effectiveness of Working Memory Training},
author = {Benjamin Coleman and Sarah Marion and Albert Rizzo and Janiece Turnbull and Anne Nolty},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.01851/full},
doi = {10.3389/fpsyg.2019.01851},
issn = {1664-1078},
year = {2019},
date = {2019-08-01},
journal = {Frontiers in Psychology},
volume = {10},
abstract = {Computerized cognitive interventions to improve working memory also purport to improve ADHD-related inattention and off task behavior. Such interventions have been shown to improve working memory, executive functioning, and fluid reasoning on standardized neuropsychological measures. However, debate continues as to whether such programs lead to improvement on ecologically relevant outcomes, such as classroom behavior. This study sought to propose a novel, ecologically relevant approach to evaluate the effectiveness of working memory training on real-world attention performance. Participants included 15 children, aged 6–15, identified as having attention problems were assessed via the virtual classroom continuous performance task (VCCPT) before and after completing 5 weeks of Cogmed working memory training. The VCCPT is a validated measure of sustained and selective attention set within a virtual reality (VR) environment. Several key areas of attention performance were observed to improve, including omission errors, reaction time, reaction time variability, and hit variability. Results suggest that working memory training led to substantial improvements in sustained attention in a real-life scenario of classroom learning. Moreover, the use of psychometrically validated VR measurement provides incremental validity beyond that of teacher or parent report of behavior. Observing such improvements on ecologically relevant measures of attention adds to the discussion around how to evaluate the effectiveness of working memory training as it pertains to real-life improvements and serves to inform consumer awareness of such products and their claims.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Bouchard, Stéphane; Rizzo, Albert “Skip”
Applications of Virtual Reality in Clinical Psychology and Clinical Cognitive Neuroscience–An Introduction Incollection
In: Virtual Reality for Psychological and Neurocognitive Interventions, pp. 1–13, Springer New York, New York, NY, 2019, ISBN: 978-1-4939-9480-9 978-1-4939-9482-3.
Abstract | Links | BibTeX | Tags: MedVR
@incollection{bouchard_applications_2019,
title = {Applications of Virtual Reality in Clinical Psychology and Clinical Cognitive Neuroscience–An Introduction},
author = {Stéphane Bouchard and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/978-1-4939-9482-3_1},
doi = {10.1007/978-1-4939-9482-3_1},
isbn = {978-1-4939-9480-9 978-1-4939-9482-3},
year = {2019},
date = {2019-08-01},
booktitle = {Virtual Reality for Psychological and Neurocognitive Interventions},
pages = {1--13},
publisher = {Springer New York},
address = {New York, NY},
abstract = {Simulation technology has a long history of adding value in aviation, military training, automotive/aircraft design, and surgical planning. In clinical psychology, Norcross et al. (2013) surveyed 70 therapy experts regarding interventions they predicted to increase in the next decade and virtual reality (VR) was ranked 4th out of 45 options, with other computer-supported methods occupying 4 out of the top 5 positions. The increased popularity of VR in the news, social media, conferences, and from innovative start-ups may give the impression that VR is something new. However, it is important to look back in time and recognize that as early as the 1960’s, Heilig proposed a multisensory immersive experienced called the Sensorama, and Sutherland and Sproull had created a stereoscopic head mounted display (HMD) (Berryman 2012; Srivastava et al. 2014). The term VR was coined more than 30 years ago by Jaron Lanier and commercial games were distributed to the public as early as 1989 by Mattel (in the US, and by PAX in Japan) for its PowerGlove™ and Nintendo’s failed Virtual Boy™ was released in 1995. Clinical VR applications were proposed as early as the mid 1990’s by Lamson, Pugnetti, Rothbaum, Riva, Rizzo, Weiss, and Wiederhold (named in alphabetical order), among others. Moreover, several scientific journals, conferences, and handbooks dedicated to the subject have been reporting scientific findings for decades.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul S.; Forbus, Kenneth D.
Expanding and Repositioning Cognitive Science Journal Article
In: Topics in Cognitive Science, 2019, ISSN: 1756-8757, 1756-8765.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{rosenbloom_expanding_2019,
title = {Expanding and Repositioning Cognitive Science},
author = {Paul S. Rosenbloom and Kenneth D. Forbus},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/tops.12468},
doi = {10.1111/tops.12468},
issn = {1756-8757, 1756-8765},
year = {2019},
date = {2019-08-01},
journal = {Topics in Cognitive Science},
abstract = {Cognitive science has converged in many ways with cognitive psychology, but while also maintaining a distinctive interdisciplinary nature. Here we further characterize this existing state of the field before proposing how it might be reconceptualized toward a broader and more distinct, and thus more stable, position in the realm of sciences.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Olszewski, Kyle; Tulyakov, Sergey; Woodford, Oliver; Li, Hao; Luo, Linjie
Transformable Bottleneck Networks Journal Article
In: arXiv:1904.06458 [cs], 2019.
Abstract | Links | BibTeX | Tags: Graphics
@article{olszewski_transformable_2019,
title = {Transformable Bottleneck Networks},
author = {Kyle Olszewski and Sergey Tulyakov and Oliver Woodford and Hao Li and Linjie Luo},
url = {http://arxiv.org/abs/1904.06458},
year = {2019},
date = {2019-08-01},
journal = {arXiv:1904.06458 [cs]},
abstract = {We propose a novel approach to performing fine-grained 3D manipulation of image content via a convolutional neural network, which we call the Transformable Bottleneck Network (TBN). It applies given spatial transformations directly to a volumetric bottleneck within our encoder-bottleneck-decoder architecture. Multi-view supervision encourages the network to learn to spatially disentangle the feature space within the bottleneck. The resulting spatial structure can be manipulated with arbitrary spatial transformations. We demonstrate the efficacy of TBNs for novel view synthesis, achieving state-of-the-art results on a challenging benchmark. We demonstrate that the bottlenecks produced by networks trained for this task contain meaningful spatial structure that allows us to intuitively perform a variety of image manipulations in 3D, well beyond the rigid transformations seen during training. These manipulations include non-uniform scaling, non-rigid warping, and combining content from different images. Finally, we extract explicit 3D structure from the bottleneck, performing impressive 3D reconstruction from a single input image.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-Lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-Ann
Can a Signing Virtual Human Engage a Baby's Attention? Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 162–169, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_can_2019,
title = {Can a Signing Virtual Human Engage a Baby's Attention?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-Lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329463},
doi = {10.1145/3308532.3329463},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {162--169},
publisher = {ACM Press},
address = {Paris, France},
abstract = {The child developmental period of ages 6-12 months marks a widely understood “critical period” for healthy language learning, during which, failure to receive exposure to language can place babies at risk for language and reading problems spanning life. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period. Technology has been used to augment linguistic input (e.g., auditory devices; language videotapes) but research finds limitations in learning. We evaluated an AI system that uses an Avatar (provides language and socially contingent interactions) and a robot (aids attention to the Avatar) to facilitate infants’ ability to learn aspects of American Sign Language (ASL), and asked three questions: (1) Can babies with little/no exposure to ASL distinguish among the Avatar’s different conversational modes (Linguistic Nursery Rhymes; Social Gestures; Idle/nonlinguistic postures; 3rd person observer)? (2) Can an Avatar stimulate babies’ production of socially contingent responses, and crucially, nascent language responses? (3) What is the impact of parents’ presence/absence of conversational participation? Surprisingly, babies (i) spontaneously distinguished among Avatar conversational modes, (ii) produced varied socially contingent responses to Avatar’s modes, and (iii) parents influenced an increase in babies’ response tokens to some Avatar modes, but the overall categories and pattern of babies’ behavioral responses remained proportionately similar irrespective of parental participation. Of note, babies produced the greatest percentage of linguistic responses to the Avatar’s Linguistic Nursery Rhymes versus other Avatar conversational modes. This work demonstrates the potential for Avatars to facilitate language learning in young babies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stocco, Andrea; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence Technical Report
Neuroscience 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@techreport{stocco_analysis_2019,
title = {Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence},
author = {Andrea Stocco and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {http://biorxiv.org/lookup/doi/10.1101/703777},
doi = {10.1101/703777},
year = {2019},
date = {2019-07-01},
pages = {38},
institution = {Neuroscience},
abstract = {The Common Model of Cognition (CMC) is a recently proposed, consensus architecture intended to capture decades of progress in cognitive science on modeling human and human-like intelligence. Because of the broad agreement around it and preliminary mappings of its components to specific brain areas, we hypothesized that the CMC could be a candidate model of the large-scale functional architecture of the human brain. To test this hypothesis, we analyzed functional MRI data from 200 participants and seven different tasks that cover the broad range of cognitive domains. The CMC components were identified with functionally homologous brain regions through canonical fMRI analysis, and their communication pathways were translated into predicted patterns of effective connectivity between regions. The resulting dynamic linear model was implemented and fitted using Dynamic Causal Modeling, and compared against four alternative brain architectures that had been previously proposed in the field of neuroscience (two hierarchical architectures and two hub-and-spoke architectures) using a Bayesian approach. The results show that, in all cases, the CMC vastly outperforms all other architectures, both within each domain and across all tasks. The results suggest that a common, general architecture that could be used for artificial intelligence effectively underpins all aspects of human cognition, from the overall functional architecture of the human brain to higher level thought processes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238--240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S; Ustun, Volkan
An Architectural Integration of Temporal Motivation Theory for Decision Making Inproceedings
In: In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_architectural_2019,
title = {An Architectural Integration of Temporal Motivation Theory for Decision Making},
author = {Paul S Rosenbloom and Volkan Ustun},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_7.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {Temporal Motivation Theory (TMT) is incorporated into the Sigma cognitive architecture to explore the ability of this combination to yield human-like decision making. In conjunction with Lazy Reinforcement Learning (LRL), which provides the inputs required for this form of decision making, experiments are run on a simple reinforcement learning task, a preference reversal task, and an uncertain two-choice task.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S
In: In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_symmetry_2019,
title = {(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition},
author = {Paul S Rosenbloom},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_6.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {A range of dichotomies from across the cognitive sciences are reduced to either (a)symmetry or (non)monotonicity. Taking the cross-product of these two elemental dichotomies then yields a deeper understanding of both two key trichotomies –based on control and content hierarchies – and the Common Model of Cognition, with results that bear on the structure of integrative cognitive architectures, models and systems, and on their commonalities, differences and gaps.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes Inproceedings
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 212–214, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_expert-model_2019,
title = {An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329433},
doi = {10.1145/3308532.3329433},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {212--214},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing -more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other, more limited techniques (such as linear regression models or boosted decision trees). We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}