Publications
Search
Owayyed, Mohammed Al; Tielman, Myrthe; Hartholt, Arno; Specht, Marcus; Brinkman, Willem-Paul
Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks Journal Article
In: Behaviour & Information Technology, pp. 1–28, 2024, ISSN: 0144-929X, 1362-3001.
@article{al_owayyed_agent-based_2024,
title = {Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks},
author = {Mohammed Al Owayyed and Myrthe Tielman and Arno Hartholt and Marcus Specht and Willem-Paul Brinkman},
url = {https://www.tandfonline.com/doi/full/10.1080/0144929X.2024.2374891},
doi = {10.1080/0144929X.2024.2374891},
issn = {0144-929X, 1362-3001},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
journal = {Behaviour & Information Technology},
pages = {1–28},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert Skip; Hartholt, Arno; Mozgai, Sharon
Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD Book Section
In: Rich, Grant J.; Kumar, V. K.; Farley, Frank H. (Ed.): Handbook of Media Psychology, pp. 187–213, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-56536-6 978-3-031-56537-3.
@incollection{rich_settling_2024,
title = {Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD},
author = {Albert Skip Rizzo and Arno Hartholt and Sharon Mozgai},
editor = {Grant J. Rich and V. K. Kumar and Frank H. Farley},
url = {https://link.springer.com/10.1007/978-3-031-56537-3_14},
doi = {10.1007/978-3-031-56537-3_14},
isbn = {978-3-031-56536-6 978-3-031-56537-3},
year = {2024},
date = {2024-04-01},
urldate = {2024-06-18},
booktitle = {Handbook of Media Psychology},
pages = {187–213},
publisher = {Springer Nature Switzerland},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-07-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Rizzo, Albert; Hartholt, Arno
Human-Centered Design for a Virtual Human led mHealth Intervention for Suicide Prevention Proceedings Article
In: 2023.
@inproceedings{mozgai_human-centered_2023,
title = {Human-Centered Design for a Virtual Human led mHealth Intervention for Suicide Prevention},
author = {Sharon Mozgai and Albert Rizzo and Arno Hartholt},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-88-9/article/978-1-958651-88-9_11},
doi = {10.54941/ahfe1004118},
year = {2023},
date = {2023-07-01},
urldate = {2024-07-11},
abstract = {Addressing the significant mental and physical healthcare needs of
Veterans requires innovative strategies to enhance access to evidence-based
care. The integration of Virtual Human (VH) agents into Mobile Health
(mHealth) applications presents a promising opportunity to overcome barriers
associated with suicide prevention and connect with Veterans. The Battle
Buddy (BB) project was conceived as a mobile wellness and suicide prevention
application, empowering Veterans with an always-available resource
concierged by an engaging and supportive conversational VH agent.
Human-centered design is essential in the development of all interactions
focused on the persuasive strategies of (1) personalization, (2)
self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise. Veterans
can interact with the BB VH during daily check-ins, learn about mental
health and wellness strategies, participate in interactive activities,
increase self-awareness of their current status, and build and work safety
plans in times of suicidal crisis. BB is designed to provide the Veteran
with easy access to a suicide prevention ecosystem in which a wealth of
evidenced-based interventions will be delivered in a non-stigmatizing
fashion by a computer-based dialogue system with virtual embodiment,
utilizing various multi-modal language cues such as text, speech, animated
facial expressions, and gestures to interact with users. This paper explores
our human-centered design process for the BB feature set to target the
negative effects of social isolation and loneliness, conditions that
challenge Veteran healthcare and suicide prevention.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Veterans requires innovative strategies to enhance access to evidence-based
care. The integration of Virtual Human (VH) agents into Mobile Health
(mHealth) applications presents a promising opportunity to overcome barriers
associated with suicide prevention and connect with Veterans. The Battle
Buddy (BB) project was conceived as a mobile wellness and suicide prevention
application, empowering Veterans with an always-available resource
concierged by an engaging and supportive conversational VH agent.
Human-centered design is essential in the development of all interactions
focused on the persuasive strategies of (1) personalization, (2)
self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise. Veterans
can interact with the BB VH during daily check-ins, learn about mental
health and wellness strategies, participate in interactive activities,
increase self-awareness of their current status, and build and work safety
plans in times of suicidal crisis. BB is designed to provide the Veteran
with easy access to a suicide prevention ecosystem in which a wealth of
evidenced-based interventions will be delivered in a non-stigmatizing
fashion by a computer-based dialogue system with virtual embodiment,
utilizing various multi-modal language cues such as text, speech, animated
facial expressions, and gestures to interact with users. This paper explores
our human-centered design process for the BB feature set to target the
negative effects of social isolation and loneliness, conditions that
challenge Veteran healthcare and suicide prevention.
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 2021.
@article{hartholt_introducing_2021,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
year = {2021},
date = {2021-11-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {11},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert “Skip”; Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: Journal of Technology in Human Services, vol. 39, no. 3, pp. 314–347, 2021, ISSN: 1522-8835, (Publisher: Routledge _eprint: https://doi.org/10.1080/15228835.2021.1915931).
@article{rizzo_combat_2021,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Albert “Skip” Rizzo and Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1080/15228835.2021.1915931},
doi = {10.1080/15228835.2021.1915931},
issn = {1522-8835},
year = {2021},
date = {2021-07-01},
urldate = {2023-03-31},
journal = {Journal of Technology in Human Services},
volume = {39},
number = {3},
pages = {314–347},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/15228835.2021.1915931},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Femminella, Brian; Hartholt, Arno; Rizzo, Skip
User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP) Journal Article
In: pp. 10, 2021.
@article{mozgai_user-centered_2021,
title = {User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP)},
author = {Sharon Mozgai and Brian Femminella and Arno Hartholt and Skip Rizzo},
url = {https://uploads-ssl.webflow.com/5f11f7e80d5a3b6dfdeeb614/5f9b3284d3d73e1da6a8f848_CHI_2021_Battle%20Buddy.pdf},
year = {2021},
date = {2021-01-01},
pages = {10},
abstract = {CCS Concepts: • Human-centered computing → Ubiquitous and mobile computing design and evaluation methods; HCI design and evaluation methods; User centered design; • Applied computing → Military; • Computing methodologies → Intelligent agents.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315–332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2024
Owayyed, Mohammed Al; Tielman, Myrthe; Hartholt, Arno; Specht, Marcus; Brinkman, Willem-Paul
Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks Journal Article
In: Behaviour & Information Technology, pp. 1–28, 2024, ISSN: 0144-929X, 1362-3001.
@article{al_owayyed_agent-based_2024,
title = {Agent-based social skills training systems: the ARTES architecture, interaction characteristics, learning theories and future outlooks},
author = {Mohammed Al Owayyed and Myrthe Tielman and Arno Hartholt and Marcus Specht and Willem-Paul Brinkman},
url = {https://www.tandfonline.com/doi/full/10.1080/0144929X.2024.2374891},
doi = {10.1080/0144929X.2024.2374891},
issn = {0144-929X, 1362-3001},
year = {2024},
date = {2024-07-01},
urldate = {2024-08-15},
journal = {Behaviour & Information Technology},
pages = {1–28},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon A; Kaurloto, Cari; Winn, Jade G; Leeds, Andrew; Beland, Sarah; Sookiassian, Arman; Hartholt, Arno
Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool Proceedings Article
In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems, pp. 1–8, ACM, Honolulu HI USA, 2024, ISBN: 9798400703317.
Links | BibTeX | Tags: AI, DTIC, UARC, Virtual Humans
@inproceedings{mozgai_accelerating_2024,
title = {Accelerating Scoping Reviews: A Case Study in the User-Centered Design of an AI-Enabled Interdisciplinary Research Tool},
author = {Sharon A Mozgai and Cari Kaurloto and Jade G Winn and Andrew Leeds and Sarah Beland and Arman Sookiassian and Arno Hartholt},
url = {https://dl.acm.org/doi/10.1145/3613905.3637110},
doi = {10.1145/3613905.3637110},
isbn = {9798400703317},
year = {2024},
date = {2024-05-01},
urldate = {2024-06-18},
booktitle = {Extended Abstracts of the CHI Conference on Human Factors in Computing Systems},
pages = {1–8},
publisher = {ACM},
address = {Honolulu HI USA},
keywords = {AI, DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert Skip; Hartholt, Arno; Mozgai, Sharon
Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD Book Section
In: Rich, Grant J.; Kumar, V. K.; Farley, Frank H. (Ed.): Handbook of Media Psychology, pp. 187–213, Springer Nature Switzerland, Cham, 2024, ISBN: 978-3-031-56536-6 978-3-031-56537-3.
Links | BibTeX | Tags: DTIC, MedVR, Simulation, VR
@incollection{rich_settling_2024,
title = {Settling the Score: Virtual Reality as a Tool to Enhance Trauma-Focused Therapy for PTSD},
author = {Albert Skip Rizzo and Arno Hartholt and Sharon Mozgai},
editor = {Grant J. Rich and V. K. Kumar and Frank H. Farley},
url = {https://link.springer.com/10.1007/978-3-031-56537-3_14},
doi = {10.1007/978-3-031-56537-3_14},
isbn = {978-3-031-56536-6 978-3-031-56537-3},
year = {2024},
date = {2024-04-01},
urldate = {2024-06-18},
booktitle = {Handbook of Media Psychology},
pages = {187–213},
publisher = {Springer Nature Switzerland},
address = {Cham},
keywords = {DTIC, MedVR, Simulation, VR},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Leeds, Andrew; Fast, Ed; Sookiassian, Edwin; Kim, Kevin; Beland, Sarah; Kulkarni, Pranav; Mozgai, Sharon
Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms Proceedings Article
In: 2024.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_multidisciplinary_2024,
title = {Multidisciplinary Research & Development of Multi-Agents and Virtual Humans Leveraging Integrated Middleware Platforms},
author = {Arno Hartholt and Andrew Leeds and Ed Fast and Edwin Sookiassian and Kevin Kim and Sarah Beland and Pranav Kulkarni and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-95-7/article/978-1-958651-95-7_33},
doi = {10.54941/ahfe1004497},
year = {2024},
date = {2024-01-01},
urldate = {2024-04-16},
abstract = {The current pace of technological advancements has led to an ever-increasing availability of technologies to investigate and help address the challenges that contemporary society faces today. However, while this trend increases the potential for creating more relevant, effective, and efficient solutions, it also inherently increases the complexity of realizing that potential. Our work aims to manage this complexity through the creation and dissemination of integrated middleware platforms that enable researchers and developers to rapidly prototype novel solutions within the areas of modelling & simulation, virtual humans, and virtual worlds. In this paper, we discuss two related platforms: the Rapid Integration & Development Environment (RIDE) and the Virtual Human Toolkit (VHToolkit). Specifically, we explore two use cases: 1) the development of an authoring tool aimed at domain experts to rapidly create low-echelon military training scenarios, and 2) the development of a virtual human led mHealth wellness and suicide prevention app for veterans.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Mozgai, Sharon; Kaurloto, Cari; Winn, Jade; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno; Scherer, Stefan
Machine learning for semi-automated scoping reviews Journal Article
In: Intelligent Systems with Applications, vol. 19, pp. 200249, 2023, ISSN: 26673053.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@article{mozgai_machine_2023,
title = {Machine learning for semi-automated scoping reviews},
author = {Sharon Mozgai and Cari Kaurloto and Jade Winn and Andrew Leeds and Dirk Heylen and Arno Hartholt and Stefan Scherer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2667305323000741},
doi = {10.1016/j.iswa.2023.200249},
issn = {26673053},
year = {2023},
date = {2023-09-01},
urldate = {2023-08-23},
journal = {Intelligent Systems with Applications},
volume = {19},
pages = {200249},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-07-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Rizzo, Albert; Hartholt, Arno
Human-Centered Design for a Virtual Human led mHealth Intervention for Suicide Prevention Proceedings Article
In: 2023.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{mozgai_human-centered_2023,
title = {Human-Centered Design for a Virtual Human led mHealth Intervention for Suicide Prevention},
author = {Sharon Mozgai and Albert Rizzo and Arno Hartholt},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-88-9/article/978-1-958651-88-9_11},
doi = {10.54941/ahfe1004118},
year = {2023},
date = {2023-07-01},
urldate = {2024-07-11},
abstract = {Addressing the significant mental and physical healthcare needs of
Veterans requires innovative strategies to enhance access to evidence-based
care. The integration of Virtual Human (VH) agents into Mobile Health
(mHealth) applications presents a promising opportunity to overcome barriers
associated with suicide prevention and connect with Veterans. The Battle
Buddy (BB) project was conceived as a mobile wellness and suicide prevention
application, empowering Veterans with an always-available resource
concierged by an engaging and supportive conversational VH agent.
Human-centered design is essential in the development of all interactions
focused on the persuasive strategies of (1) personalization, (2)
self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise. Veterans
can interact with the BB VH during daily check-ins, learn about mental
health and wellness strategies, participate in interactive activities,
increase self-awareness of their current status, and build and work safety
plans in times of suicidal crisis. BB is designed to provide the Veteran
with easy access to a suicide prevention ecosystem in which a wealth of
evidenced-based interventions will be delivered in a non-stigmatizing
fashion by a computer-based dialogue system with virtual embodiment,
utilizing various multi-modal language cues such as text, speech, animated
facial expressions, and gestures to interact with users. This paper explores
our human-centered design process for the BB feature set to target the
negative effects of social isolation and loneliness, conditions that
challenge Veteran healthcare and suicide prevention.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Veterans requires innovative strategies to enhance access to evidence-based
care. The integration of Virtual Human (VH) agents into Mobile Health
(mHealth) applications presents a promising opportunity to overcome barriers
associated with suicide prevention and connect with Veterans. The Battle
Buddy (BB) project was conceived as a mobile wellness and suicide prevention
application, empowering Veterans with an always-available resource
concierged by an engaging and supportive conversational VH agent.
Human-centered design is essential in the development of all interactions
focused on the persuasive strategies of (1) personalization, (2)
self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise. Veterans
can interact with the BB VH during daily check-ins, learn about mental
health and wellness strategies, participate in interactive activities,
increase self-awareness of their current status, and build and work safety
plans in times of suicidal crisis. BB is designed to provide the Veteran
with easy access to a suicide prevention ecosystem in which a wealth of
evidenced-based interventions will be delivered in a non-stigmatizing
fashion by a computer-based dialogue system with virtual embodiment,
utilizing various multi-modal language cues such as text, speech, animated
facial expressions, and gestures to interact with users. This paper explores
our human-centered design process for the BB feature set to target the
negative effects of social isolation and loneliness, conditions that
challenge Veteran healthcare and suicide prevention.
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 2021.
Abstract | Links | BibTeX | Tags: VHTL
@article{hartholt_introducing_2021,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
year = {2021},
date = {2021-11-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {11},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {VHTL},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, VHTL, VR
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {DTIC, MedVR, VHTL, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert “Skip”; Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: Journal of Technology in Human Services, vol. 39, no. 3, pp. 314–347, 2021, ISSN: 1522-8835, (Publisher: Routledge _eprint: https://doi.org/10.1080/15228835.2021.1915931).
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@article{rizzo_combat_2021,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Albert “Skip” Rizzo and Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1080/15228835.2021.1915931},
doi = {10.1080/15228835.2021.1915931},
issn = {1522-8835},
year = {2021},
date = {2021-07-01},
urldate = {2023-03-31},
journal = {Journal of Technology in Human Services},
volume = {39},
number = {3},
pages = {314–347},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/15228835.2021.1915931},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Femminella, Brian; Hartholt, Arno; Rizzo, Skip
User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP) Journal Article
In: pp. 10, 2021.
Abstract | Links | BibTeX | Tags: ARL, MedVR, VHTL
@article{mozgai_user-centered_2021,
title = {User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP)},
author = {Sharon Mozgai and Brian Femminella and Arno Hartholt and Skip Rizzo},
url = {https://uploads-ssl.webflow.com/5f11f7e80d5a3b6dfdeeb614/5f9b3284d3d73e1da6a8f848_CHI_2021_Battle%20Buddy.pdf},
year = {2021},
date = {2021-01-01},
pages = {10},
abstract = {CCS Concepts: • Human-centered computing → Ubiquitous and mobile computing design and evaluation methods; HCI design and evaluation methods; User centered design; • Applied computing → Military; • Computing methodologies → Intelligent agents.},
keywords = {ARL, MedVR, VHTL},
pubstate = {published},
tppubtype = {article}
}
2020
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315–332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304–307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Proceedings Article
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2019
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231–245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205–207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238–240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Jones, Aaron P.; Bradley, Robert; Bryant, Natalie B.; Lerner, Itamar; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael P.
Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans Journal Article
In: bioRxiv, pp. 110, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_spatiotemporal_2019,
title = {Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans},
author = {Praveen K Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Aaron P. Jones and Robert Bradley and Natalie B. Bryant and Itamar Lerner and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael P. Howard},
url = {https://www.biorxiv.org/content/10.1101/672378v1.abstract},
doi = {10.1101/672378},
year = {2019},
date = {2019-06-01},
journal = {bioRxiv},
pages = {110},
abstract = {Long-term retention of memories critically depends on consolidation processes, which occur during slow-wave oscillations (SWOs) in non-rapid eye movement (NREM) sleep. We designed a non-invasive system that can tag one-shot experiences of naturalistic episodes within immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). We demonstrate that these STAMPs can be re-applied during UP states of SWOs on two consecutive nights to achieve a 19.43% improvement in the metamemory of targeted episodes at 48 hours after the one-shot viewing, compared to the control episodes. Further, we found an interaction between pre-sleep metamemory of targeted episodes and the number of STAMP applications for those episodes during sleep, and that STAMPs elicit increases in left temporal slow-spindle (9-12 Hz) power that are predictive of overnight metamemory improvements. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory based on pre-sleep performance and tracking the STAMPinduced biomarker during sleep, and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lerner, Itamar; Ketz, Nicholas A.; Jones, Aaron P.; Bryant, Natalie B.; Robert, Bradley; Skorheim, Steven W.; Hartholt, Arno; Rizzo, Albert S.; Gluck, Mark A.; Clark, Vincent P.; Pilly, Praveen K.
In: Scientific Reports, vol. 9, no. 1, 2019, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{lerner_transcranial_2019,
title = {Transcranial Current Stimulation During Sleep Facilitates Insight into Temporal Rules, but does not Consolidate Memories of Individual Sequential Experiences},
author = {Itamar Lerner and Nicholas A. Ketz and Aaron P. Jones and Natalie B. Bryant and Bradley Robert and Steven W. Skorheim and Arno Hartholt and Albert S. Rizzo and Mark A. Gluck and Vincent P. Clark and Praveen K. Pilly},
url = {http://www.nature.com/articles/s41598-018-36107-7},
doi = {10.1038/s41598-018-36107-7},
issn = {2045-2322},
year = {2019},
date = {2019-02-01},
journal = {Scientific Reports},
volume = {9},
number = {1},
abstract = {Slow-wave sleep (SWS) is known to contribute to memory consolidation, likely through the reactivation of previously encoded waking experiences. Contemporary studies demonstrate that when auditory or olfactory stimulation is administered during memory encoding and then reapplied during SWS, memory consolidation can be enhanced, an effect that is believed to rely on targeted memory reactivation (TMR) induced by the sensory stimulation. Here, we show that transcranial current stimulations (tCS) during sleep can also be used to induce TMR, resulting in the facilitation of high-level cognitive processes. Participants were exposed to repeating sequences in a realistic 3D immersive environment while being stimulated with particular tCS patterns. A subset of these tCS patterns was then reapplied during sleep stages N2 and SWS coupled to slow oscillations in a closed-loop manner. We found that in contrast to our initial hypothesis, performance for the sequences corresponding to the reapplied tCS patterns was no better than for other sequences that received stimulations only during wake or not at all. In contrast, we found that the more stimulations participants received overnight, the more likely they were to detect temporal regularities governing the learned sequences the following morning, with tCS-induced beta power modulations during sleep mediating this effect.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2017
Rizzo, Albert; Roy, Michael J.; Hartholt, Arno; Costanzo, Michelle; Highland, Krista Beth; Jovanovic, Tanja; Norrholm, Seth D.; Reist, Chris; Rothbaum, Barbara; Difede, JoAnn
Virtual Reality Applications for the Assessment and Treatment of PTSD Book Section
In: Handbook of Military Psychology, pp. 453–471, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-66190-2 978-3-319-66192-6.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{rizzo_virtual_2017,
title = {Virtual Reality Applications for the Assessment and Treatment of PTSD},
author = {Albert Rizzo and Michael J. Roy and Arno Hartholt and Michelle Costanzo and Krista Beth Highland and Tanja Jovanovic and Seth D. Norrholm and Chris Reist and Barbara Rothbaum and JoAnn Difede},
url = {http://link.springer.com/10.1007/978-3-319-66192-6_27},
doi = {10.1007/978-3-319-66192-6_27},
isbn = {978-3-319-66190-2 978-3-319-66192-6},
year = {2017},
date = {2017-12-01},
booktitle = {Handbook of Military Psychology},
pages = {453–471},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {War is one of the most challenging situations that a human being can encounter. The physical, emotional, cognitive, and psychological demands of a combat environment place tremendous stress on even the most well-prepared military people. It is no surprise that the stressful experiences, characteristics of operations in Iraq and Afghanistan, have produced significant numbers of service members (SMs) and veterans at risk for posttraumatic stress disorder (PTSD), as well as other psychosocial/behavioral health conditions. For example, as of June 2015, the Defense Medical Surveillance System reported 138,197 active duty SMs had been diagnosed with PTSD (Fischer, 2015). In a meta-analysis of studies published since 2001, 13.2% of infantry service members met the criteria for PTSD, with incidence rising dramatically to 25–30% in units with high levels of direct combat exposure (Kok, Herrell, Thomas, & Hoge, 2012). Moreover, as of early 2013, the prevalence of PTSD among discharged veterans receiving treatment at Veteran Affairs (VA) clinics was reported to be 29% (Fischer, 2013). These findings make a compelling case for a continued focus on developing and enhancing the availability of diverse evidence- based treatment options to address this military behavioral healthcare challenge. One emerging area of research and clinical focus is of the use of Virtual Reality (VR) simulation technology as a tool for delivering evidence-based approaches for the assessment and treatment of PTSD. Although in recent times, the popular media has lavishly reported on VR’s potential impact on all elements of our evolving digital culture, and has created the impression that VR is a novel technology, the reality is that VR is not a new concept, and many of its developmental roots are traceable to the 1980s and 1990s (Schnipper et al., 2015). Moreover, a large scientific literature has emerged over the last 20 years demonstrating the unique and added value that is accrued with the use of VR to address a wide range of clinical health conditions (Rizzo 1994; Rizzo et al., 1997; 2002; 2010; 2014; Rizzo, Cukor et al., 2015). Within that context, the present chapter will summarize the ways that researchers and clinicians have employed VR to create relevant simulations that can be applied to the assessment and treatment of PTSD.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Auerbach, Daniel; Mehta, Tirth R.; Hartholt, Arno
Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress) Proceedings Article
In: Proceedings of the GIFTSym5, pp. 23–35, ARL, Orlando, Florida, 2017.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{nye_building_2017,
title = {Building a Backbone for Multi-Agent Tutoring in GIFT (Work in Progress)},
author = {Benjamin D. Nye and Daniel Auerbach and Tirth R. Mehta and Arno Hartholt},
url = {https://books.google.com/books?id=PwMtDwAAQBAJ&printsec=copyright&source=gbs_pub_info_r#v=onepage&q&f=false},
year = {2017},
date = {2017-05-01},
booktitle = {Proceedings of the GIFTSym5},
pages = {23–35},
publisher = {ARL},
address = {Orlando, Florida},
abstract = {As intelligent tutoring systems (ITS) increasingly need to interoperate and co-exist, emerging systems have transitioned toward service-oriented designs to enable modularity and composability of tutoring components made and/or maintained by different research and development groups. However, as a research community, we have still not reached a point where it is trivial for a new service to be added into a system like the Generalized Intelligent Framework for Tutoring (GIFT; Sottilare, Goldberg, Brawner, & Holden, 2012). In an early paper considering this issue with respect to the GIFT architecture (Nye & Morrison, 2013), we proposed addressing this issue by building toward a lightweight multi-agent archi-tecture where certain services act as autonomous agents: “a system situated within and a part of an environment that senses that environment and acts on it, over time, in pursuit of its own agenda and so as to affect what it senses in the future” (Franklin & Graesser, 1997; p. 25). In our work in progress described here, we discuss how we are approaching the opportunity to build such capabilities into GIFT. The high level goals of our work are targeting two core goals for GIFT: A) to be a lightweight framework that will expand access to and use of ITS and B) to help GIFT to increase the intelligence and effectiveness of its services based on data over time. We are currently targeting the first goal, which will underpin the second goal. However, what does it mean to be a lightweight framework? In this context, a “lightweight framework” is framed as minimizing the following criteria: (1) hardware requirements, (2) software expertise to design services, (3) software expertise to use existing services, (4) software expertise to stand up the message-passing layer between agents, and (5) a minimal working message ontology (Nye & Morrison, 2013). Since our original paper four years ago, GIFT has made significant strides in reducing barriers related to hardware by building a cloud-based version and software expertise to use GIFT services through authoring tools. It has also developed a growing ontology of messages (e.g., https://gifttutoring.org/projects/gift/wiki/Interface_Control_Document_2016-1). With that said, despite now-extensive documentation, designing new services for GIFT is still not trivial and strong expertise is required to pass messages between GIFT modules and agents (either internal or external). To address these issues, the Building a Backbone project is working toward agent-oriented designs that build on GIFT's existing service-oriented framework. By moving from services toward agents, modules will be able to act more autonomously, enabling capabilities such as plug-and-play, hotswapping, and selecting between multiple services providing the same capabilities. These new capabilities are intended to reduce barriers to building new GIFT-compatible services and also to integrating GIFT with other service-oriented ecosystems. The first steps toward these capabilities are an ontology mapping service and an initial integration that combines GIFT, the Virtual Human Toolkit core framework for agents, and the SuperGLU framework for adding agent-oriented capabilities for coordinating services. This paper reports on work to date, with an emphasis on target capabilities, design decisions, challenges, and open research questions for this work.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Rizzo, Albert; Scherer, Scherer; DeVault, David; Gratch, Jonathan; Artstein, Ronald; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis Philippe
Detection and computational analysis of psychological signals using a virtual human interviewing agent Journal Article
In: Journal of Pain Management, pp. 311–321, 2016, ISSN: 1939-5914.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{rizzo_detection_2016,
title = {Detection and computational analysis of psychological signals using a virtual human interviewing agent},
author = {Albert Rizzo and Scherer Scherer and David DeVault and Jonathan Gratch and Ronald Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis Philippe Morency},
url = {http://www.icdvrat.org/2014/papers/ICDVRAT2014_S03N3_Rizzo_etal.pdf},
issn = {1939-5914},
year = {2016},
date = {2016-11-01},
journal = {Journal of Pain Management},
pages = {311–321},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded "Detection and Computational Analysis of Psychological Signals" project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals longitudinally that can be used to inform diagnostic assessment within a clinical context.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2015
Stratou, Giota; Morency, Louis-Philippe; DeVault, David; Hartholt, Arno; Fast, Edward; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Stacy, Marcella; Traum, David; Rizzo, Albert
A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews Proceedings Article
In: Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on, pp. 787–789, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{stratou_demonstration_2015,
title = {A Demonstration of the Perception System in SimSensei, a Virtual Human Application for Healthcare Interviews},
author = {Giota Stratou and Louis-Philippe Morency and David DeVault and Arno Hartholt and Edward Fast and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Marcella Stacy and David Traum and Albert Rizzo},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7344661},
doi = {10.1109/ACII.2015.7344661},
year = {2015},
date = {2015-09-01},
booktitle = {Affective Computing and Intelligent Interaction (ACII), 2015 International Conference on},
pages = {787–789},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. With this demo, we focus our attention on the perception part of the system, a multimodal framework which captures and analyzes user state behavior for both behavioral understanding and interactional purposes. We will demonstrate real-time user state sensing as a part of the SimSensei architecture and discuss how this technology enabled automatic analysis of behaviors related to psychological distress.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruijnes, Merijn; Akker, Rieks; Hartholt, Arno; Heylen, Dirk
Virtual Suspect William Proceedings Article
In: Intelligent Virtual Agents, pp. 67–76, Springer, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{bruijnes_virtual_2015,
title = {Virtual Suspect William},
author = {Merijn Bruijnes and Rieks Akker and Arno Hartholt and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Virtual%20Suspect%20William.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {67–76},
publisher = {Springer},
abstract = {We evaluate an algorithm which computes the responses of an agent that plays the role of a suspect in simulations of police interrogations. The algorithm is based on a cognitive model - the response model - that is centred around keeping track of interpersonal relations. The model is parametrized in such a way that different personalities of the virtual suspect can be defined. In the evaluation we defined three different personalities and had participants guess the personality based on the responses the model provided in an interaction with the participant. We investigate what factors contributed to the ability of a virtual agent to show behaviour that was recognized by participants as belonging to a persona.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Proceedings Article
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2014
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Proceedings Article
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, UARC, Virtual Humans
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Traum, David; Marsella, Stacy; Morency, Louis-Philippe; Shapiro, Ari; Gratch, Jonathan
A Shared, Modular Architecture for Developing Virtual Humans Proceedings Article
In: Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014, pp. 4–7, Boston, MA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hartholt_shared_2014,
title = {A Shared, Modular Architecture for Developing Virtual Humans},
author = {Arno Hartholt and David Traum and Stacy Marsella and Louis-Philippe Morency and Ari Shapiro and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Shared%20Modular%20Architecture%20for%20Developing%20Virtual%20Humans.pdf},
doi = {10.2390/biecoll-wasiva2014-02},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014},
pages = {4–7},
address = {Boston, MA},
abstract = {Realizing the full potential of intelligent virtual agents requires compelling characters that can engage users in meaningful and realistic social interactions, and an ability to develop these characters effectively and efficiently. Advances are needed in individual capabilities, but perhaps more importantly, fundamental questions remain as to how best to integrate these capabilities into a single framework that allows us to efficiently create characters that can engage users in meaningful and realistic social interactions. This integration requires in-depth, inter-disciplinary understanding few individuals, or even teams of individuals, possess.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Hartholt, Arno; Grimani, Mario; Leeds, Andrew; Liewer, Matt
Virtual Reality Exposure Therapy for Combat-Related Posttraumatic Stress Disorder Journal Article
In: IEEE Computer Society, vol. 47, Issue 7, no. 7, pp. 31–37, 2014.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{rizzo_virtual_2014,
title = {Virtual Reality Exposure Therapy for Combat-Related Posttraumatic Stress Disorder},
author = {Albert Rizzo and Arno Hartholt and Mario Grimani and Andrew Leeds and Matt Liewer},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Exposure%20Therapy%20for%20Treating%20Combat-Related%20PTSD.pdf},
year = {2014},
date = {2014-07-01},
journal = {IEEE Computer Society},
volume = {47, Issue 7},
number = {7},
pages = {31–37},
abstract = {Virtual reality (VR) technology is rapidly evolving to support prolonged exposure (PE) therapy, a proven treatment for combat-related posttraumatic stress disorder. Building on the successful 2007 Virtual Iraq/Afghanistan VRET system, a team of behavioral scientists, software engineers, and virtual artists has created Bravemind, a flexible VR system that offers significantly enhanced PE treatment possibilities. The first Web extra at http://youtu.be/EiYg-kMNMtQ is a video demonstration of an original early virtual reality exposure therapy (VRET) prototype that shows a small section of an Iraqi city with a landing helicopter (2004). The second Web extra at http://youtu.be/_cS-ynWZmeQ is a video demonstration of virtual reality exposure therapy (VRET) that simulates driving a Humvee in a rural part of Iraq, showcasing several encounters, including IED and road-side attacks (2007). The third Web extra at http://youtu.be/78QXX_F4mc8 is a video demonstration of virtual reality exposure therapy (VRET) that simulates an overview of several Iraqi city areas (2007). The fourth Web extra at http://youtu.be/_AnixslkVLU is a video demonstration of virtual reality exposure therapy (VRET) that simulates a patrol entering interior buildings in Iraq (2007). The fifth Web extra at http://youtu.be/S22aQ-DqKKU is a video demonstration of an original virtual reality exposure therapy (VRET) tablet interface that allows the clinician to change virtual reality settings and trigger encounters (2007). The sixth Web extra at http://youtu.be/C-fspuLo4vw is a video demonstration of the Bravemind virtual reality exposure therapy (VRET) prototype showing a variety of driving and dismounted scenarios with encounters in Iraq and Afghanistan (2013). The sixth Web extra at http://youtu.be/HSPDomDAigg is a video collection of Iraqi and Afghanistan virtual reality exposure therapy (VRET) scenarios within the Bravemind prototype (2013).},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
DeVault, David; Artstein, Ron; Benn, Grace; Dey, Teresa; Fast, Edward; Gainer, Alesia; Georgila, Kallirroi; Gratch, Jonathan; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Marsella, Stacy C.; Fabrizio, Morbini; Nazarian, Angela; Scherer, Stefan; Stratou, Giota; Suri, Apar; Traum, David; Wood, Rachel; Xu, Yuyu; Rizzo, Albert; Morency, Louis-Philippe
SimSensei Kiosk: A Virtual Human Interviewer for Healthcare Decision Support Proceedings Article
In: Proceedings of the 13th Inter-national Conference on Autonomous Agents and Multiagent Systems (AAMAS 2014), pp. 1061–1068, International Foundation for Autonomous Agents and Multiagent Systems, Paris, France, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{devault_simsensei_2014,
title = {SimSensei Kiosk: A Virtual Human Interviewer for Healthcare Decision Support},
author = {David DeVault and Ron Artstein and Grace Benn and Teresa Dey and Edward Fast and Alesia Gainer and Kallirroi Georgila and Jonathan Gratch and Arno Hartholt and Margaux Lhommet and Gale Lucas and Stacy C. Marsella and Morbini Fabrizio and Angela Nazarian and Stefan Scherer and Giota Stratou and Apar Suri and David Traum and Rachel Wood and Yuyu Xu and Albert Rizzo and Louis-Philippe Morency},
url = {https://dl.acm.org/citation.cfm?id=2617415},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of the 13th Inter-national Conference on Autonomous Agents and Multiagent Systems (AAMAS 2014)},
pages = {1061–1068},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Paris, France},
abstract = {We present SimSensei Kiosk, an implemented virtual human interviewer designed to create an engaging face-to-face inter-action where the user feels comfortable talking and sharing information. SimSensei Kiosk is also designed to create in- teractional situations favorable to the automatic assessment of distress indicators, de_ned as verbal and nonverbal behav- iors correlated with depression, anxiety or post-traumatic stress disorder (PTSD). In this paper, we summarize the de- sign methodology, performed over the past two years, which is based on three main development cycles: (1) analysis of face-to-face human interactions to identify potential distress indicators, dialogue policies and virtual human gestures, (2) development and analysis of a Wizard-of-Oz prototype sys- tem where two human operators were deciding the spoken and gestural responses, and (3) development of a fully au- tomatic virtual interviewer able to engage users in 15-25 minute interactions. We show the potential of our fully auto- matic virtual human interviewer in a user study, and situate its performance in relation to the Wizard-of-Oz prototype.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2013
Gratch, Jonathan; Hartholt, Arno; Dehghani, Morteza; Marsella, Stacy C.
Virtual Humans: A New Toolkit for Cognitive Science Research Proceedings Article
In: Cognitive Science, Berlin, Germany, 2013.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_virtual_2013,
title = {Virtual Humans: A New Toolkit for Cognitive Science Research},
author = {Jonathan Gratch and Arno Hartholt and Morteza Dehghani and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans-%20A%20New%20Toolkit%20for%20Cognitive%20Science%20Research.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {Cognitive Science},
address = {Berlin, Germany},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Traum, David; Marsella, Stacy C.; Shapiro, Ari; Stratou, Giota; Leuski, Anton; Morency, Louis-Philippe; Gratch, Jonathan
All Together Now: Introducing the Virtual Human Toolkit Proceedings Article
In: 13th International Conference on Intelligent Virtual Agents, Edinburgh, UK, 2013.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hartholt_all_2013,
title = {All Together Now: Introducing the Virtual Human Toolkit},
author = {Arno Hartholt and David Traum and Stacy C. Marsella and Ari Shapiro and Giota Stratou and Anton Leuski and Louis-Philippe Morency and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/All%20Together%20Now.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {13th International Conference on Intelligent Virtual Agents},
address = {Edinburgh, UK},
abstract = {While virtual humans are proven tools for training, education and re- search, they are far from realizing their full potential. Advances are needed in indi- vidual capabilities, such as character animation and speech synthesis, but perhaps more importantly, fundamental questions remain as to how best to integrate these capabilities into a single framework that allows us to efficiently create characters that can engage users in meaningful and realistic social interactions. This integration re- quires in-depth, inter-disciplinary understanding few individuals, or even teams of individuals, possess. We help address this challenge by introducing the ICT Virtual Human Toolkit1, which offers a flexible framework for exploring a variety of differ- ent types of virtual human systems, from virtual listeners and question-answering characters to virtual role-players. We show that due to its modularity, the Toolkit allows researchers to mix and match provided capabilities with their own, lowering the barrier of entry to this multi-disciplinary research challenge.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; John, Bruce Sheffield; Newman, Brad; Williams, Josh; Hartholt, Arno; Lethin, Clarke; Buckwalter, John Galen
Virtual Reality as a Tool for Delivering PTSD Exposure Therapy and Stress Resilience Training Journal Article
In: Military Behavioral Health, vol. 1, pp. 48–54, 2013.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{rizzo_virtual_2013,
title = {Virtual Reality as a Tool for Delivering PTSD Exposure Therapy and Stress Resilience Training},
author = {Albert Rizzo and Bruce Sheffield John and Brad Newman and Josh Williams and Arno Hartholt and Clarke Lethin and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20as%20a%20Tool%20for%20Delivering%20PTSD%20Exposure%20Therapy%20and%20Stress%20Resilience%20Training.pdf},
doi = {10.1080/21635781.2012.721064},
year = {2013},
date = {2013-01-01},
booktitle = {Post-Traumatic Stress Disorder: Future Directions in Prevention, Diagnosis, and Treatment},
journal = {Military Behavioral Health},
volume = {1},
pages = {48–54},
publisher = {Springer},
abstract = {The incidence of post-traumatic stress disorder (PTSD) in returning Operation Enduring Free- dom and Operation Iraqi Freedom military personnel has created a significant behavioral health care challenge. One emerging form of treatment for combat-related PTSD that has shown promise involves the delivery of exposure therapy using immersive virtual reality (VR). Initial outcomes from open clinical trials have been positive, and fully randomized controlled trials are currently in progress. Inspired by the initial success of our research using VR to emotionally engage and successfully treat persons undergoing exposure therapy for PTSD, we have developed a similar VR-based approach to deliver resilience training prior to an initial deployment. The STress Resilience In Virtual Environments (STRIVE) project aims to create a set of combat simulations (derived from our existing virtual Iraq/Afghanistan PTSD exposure therapy system) that are part of a multiepisode interactive narrative experience. Users can be immersed within challenging virtual combat contexts and interact with virtual characters as part of an experiential approach for learning psychoeducational material, stress manage- ment techniques, emotional coping strategies believed to enhance stress resilience. This article describes the development and evaluation of the virtual Iraq/Afghanistan exposure therapy system and then details its current transition into the STRIVE tool for predeployment stress resilience training.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
2011
Wilson, Cyrus A.; Alexander, Oleg; Tunwattanapong, Borom; Peers, Pieter; Ghosh, Abhijeet; Busch, Jay; Hartholt, Arno; Debevec, Paul
Facial Cartography: Interactive Scan Correspondence Proceedings Article
In: ACM/Eurographics Symposium on Computer Animation, 2011.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@inproceedings{wilson_facial_2011,
title = {Facial Cartography: Interactive Scan Correspondence},
author = {Cyrus A. Wilson and Oleg Alexander and Borom Tunwattanapong and Pieter Peers and Abhijeet Ghosh and Jay Busch and Arno Hartholt and Paul Debevec},
url = {http://ict.usc.edu/pubs/Facial%20Cartography-%20Interactive%20Scan%20Correspondence.pdf},
year = {2011},
date = {2011-08-01},
booktitle = {ACM/Eurographics Symposium on Computer Animation},
abstract = {We present a semi-automatic technique for computing surface correspondences between 3D facial scans in different expressions, such that scan data can be mapped into a common domain for facial animation. The technique can accurately correspond high-resolution scans of widely differing expressions – without requiring intermediate pose sequences – such that they can be used, together with reflectance maps, to create high-quality blendshape-based facial animation. We optimize correspondences through a combination of Image, Shape, and Internal forces, as well as Directable forces to allow a user to interactively guide and refine the solution. Key to our method is a novel representation, called an Active Visage, that balances the advantages of both deformable templates and correspondence computation in a 2D canonical domain. We show that our semi-automatic technique achieves more robust results than automated correspondence alone, and is more precise than is practical with unaided manual input.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Campbell, Julia; Core, Mark; Artstein, Ron; Armstrong, Lindsay; Hartholt, Arno; Wilson, Cyrus A.; Georgila, Kallirroi; Morbini, Fabrizio; Haynes, Kip; Gomboc, Dave; Birch, Mike; Bobrow, Jonathan; Lane, H. Chad; Gerten, Jillian; Leuski, Anton; Traum, David; Trimmer, Matthew; DiNinni, Rich; Bosack, Matthew; Jones, Timothy; Clark, Richard E.; Yates, Kenneth A.
Developing INOTS to Support Interpersonal Skills Practice Proceedings Article
In: IEEE Aerospace Conference, 2011.
Abstract | Links | BibTeX | Tags: Learning Sciences, Virtual Humans
@inproceedings{campbell_developing_2011,
title = {Developing INOTS to Support Interpersonal Skills Practice},
author = {Julia Campbell and Mark Core and Ron Artstein and Lindsay Armstrong and Arno Hartholt and Cyrus A. Wilson and Kallirroi Georgila and Fabrizio Morbini and Kip Haynes and Dave Gomboc and Mike Birch and Jonathan Bobrow and H. Chad Lane and Jillian Gerten and Anton Leuski and David Traum and Matthew Trimmer and Rich DiNinni and Matthew Bosack and Timothy Jones and Richard E. Clark and Kenneth A. Yates},
url = {http://ict.usc.edu/pubs/Developing%20INOTS%20to%20Support%20Interpersonal%20Skills%20Practice.pdf},
year = {2011},
date = {2011-03-01},
booktitle = {IEEE Aerospace Conference},
abstract = {The Immersive Naval Officer Training System (INOTS) is a blended learning environment that merges traditional classroom instruction with a mixed reality training setting. INOTS supports the instruction, practice and assessment of interpersonal communication skills. The goal of INOTS is to provide a consistent training experience to supplement interpersonal skills instruction for Naval officer candidates without sacrificing trainee throughput and instructor control. We developed an instructional design from cognitive task analysis interviews with experts to serve as a framework for system development. We also leveraged commercial student response technology and research technologies including natural language recognition, virtual humans, realistic graphics, intelligent tutoring and automated instructor support tools. In this paper, we describe our methodologies for developing a blended learning environment, and our challenges adding mixed reality and virtual human technologies to a traditional classroom to support interpersonal skills training.},
keywords = {Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2009
Hartholt, Arno; Gratch, Jonathan; Leuski, Anton; Morency, Louis-Philippe; Marsella, Stacy C.; Liewer, Matt; Doraiswamy, Prathibha; Weiss, Lori; LeMasters, Kim; Fast, Edward; Sadek, Ramy; Marshall, Andrew; Lee, Jina; Thiebaux, Marcus; Tsiartas, Andreas
At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hartholt_at_2009,
title = {At the Virtual Frontier: Introducing Gunslinger, a Multi- Character, Mixed-Reality, Story-Driven Experience},
author = {Arno Hartholt and Jonathan Gratch and Anton Leuski and Louis-Philippe Morency and Stacy C. Marsella and Matt Liewer and Prathibha Doraiswamy and Lori Weiss and Kim LeMasters and Edward Fast and Ramy Sadek and Andrew Marshall and Jina Lee and Marcus Thiebaux and Andreas Tsiartas},
url = {http://ict.usc.edu/pubs/At%20the%20Virtual%20Frontier-%20Introducing%20Gunslinger%20a%20Multi-%20Character%20Mixed-Reality%20Story-Driven%20Experience.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {We describe an application of intelligent virtual agents to the domain of mixed-reality interactive entertainment. Gunslinger allows users to interact with life-sized virtual humans within the context of a wild west story world. The application incorporates a novel integration of capabilities including gesture and spoken language recognition, story and dialogue reasoning, and multi-character, multi-modal behavior generation and synthesis. The article describes our design process, technological innovations, and initial feedback from user interactions with the system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2008
Traum, David; Gratch, Jonathan; Hartholt, Arno; Marsella, Stacy C.; Lee, Jina
Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 117–130, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_multi-party_2008,
title = {Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents},
author = {David Traum and Jonathan Gratch and Arno Hartholt and Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Multi-party,%20Multi-issue,%20Multi-strategy%20Negotiation.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Virtual Agents},
pages = {117–130},
address = {Tokyo, Japan},
abstract = {We present a model of negotiation for virtual agents that extends previous work to be more human-like and applicable to a broader range of situations, including more than two negotiators with different goals, and negotiating over multiple options. The agents can dynamically change their negotiating strategies based on the current values of several parameters and factors that can be updated in the course of the negotiation.We have implemented this model and done preliminary evaluation within a prototype training system and a three-party negotiation with two virtual humans and one human.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}