Publications
Search
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn; Liu, Ruying
A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2023, ISSN: 1949-3045, 2371-9850.
@article{awada_new_2023,
title = {A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll and Ruying Liu},
url = {https://ieeexplore.ieee.org/document/10286408/},
doi = {10.1109/TAFFC.2023.3324910},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D; Mee, Dillon; Core, Mark G
Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns Proceedings Article
In: 2023.
@inproceedings{nye_generative_2023,
title = {Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns},
author = {Benjamin D Nye and Dillon Mee and Mark G Core},
url = {https://ceur-ws.org/Vol-3487/paper4.pdf},
year = {2023},
date = {2023-01-01},
abstract = {After many years of relatively limited capabilities for generative language models, recent large language models (LLM’s) have demonstrated qualitatively better capabilities for understanding, synthesis, and inference on text. Due to the prominence of ChatGPT’s chat system, both the media and many educational developers have suggested using generative AI to directly tutor students. However, despite surface-level similarity between ChatGPT interactions and tutoring dialogs, generative AI has other strengths which may be substantially more relevant for intelligent tutoring (e.g., detecting misconceptions, improved language translation, content generation) and weaknesses that make it problematic for on-the-fly tutoring (e.g., hallucinations, lack of pedagogical training data). In this paper, we discuss how we are approaching generative LLM’s for tutoring dialogs, for problems such as multi- concept short answer grading and semi-supervised interactive content generation. This work shows interesting opportunities for prompt engineering approaches for short-answer classification, despite sometimes quirky behavior. The time savings for high-quality content generation for tutoring is not yet clear and further research is needed. The paper concludes with a consideration of longer-term equity and access in a world where essential capabilities require low-latency real-time connections to large, pay-peruse models. Risks and mitigating technologies for this kind of “AI digital divide” are discussed, including optimized / edge-computing LLM’s and using generative AI models as simulated students to train specialized tutoring models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Timothy S.; Gordon, Andrew S.
Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5 Book Section
In: Holloway-Attaway, Lissa; Murray, John T. (Ed.): Interactive Storytelling, vol. 14384, pp. 297–305, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-47657-0 978-3-031-47658-7, (Series Title: Lecture Notes in Computer Science).
@incollection{holloway-attaway_playing_2023,
title = {Playing Story Creation Games with Large Language Models: Experiments with GPT-3.5},
author = {Timothy S. Wang and Andrew S. Gordon},
editor = {Lissa Holloway-Attaway and John T. Murray},
url = {https://link.springer.com/10.1007/978-3-031-47658-7_28},
doi = {10.1007/978-3-031-47658-7_28},
isbn = {978-3-031-47657-0 978-3-031-47658-7},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
booktitle = {Interactive Storytelling},
volume = {14384},
pages = {297–305},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Goel, Rahul; Tse, Teresa; Smith, Lia J.; Floren, Andrew; Naylor, Bruce; Williams, M. Wright; Salas, Ramiro; Rizzo, Albert S.; Ress, David
Framework for Accurate Classification of Self-Reported Stress From Multisession Functional MRI Data of Veterans With Posttraumatic Stress Journal Article
In: Chronic Stress, vol. 7, pp. 24705470231203655, 2023, ISSN: 2470-5470, 2470-5470.
@article{goel_framework_2023,
title = {Framework for Accurate Classification of Self-Reported Stress From Multisession Functional MRI Data of Veterans With Posttraumatic Stress},
author = {Rahul Goel and Teresa Tse and Lia J. Smith and Andrew Floren and Bruce Naylor and M. Wright Williams and Ramiro Salas and Albert S. Rizzo and David Ress},
url = {http://journals.sagepub.com/doi/10.1177/24705470231203655},
doi = {10.1177/24705470231203655},
issn = {2470-5470, 2470-5470},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
journal = {Chronic Stress},
volume = {7},
pages = {24705470231203655},
abstract = {Background: Posttraumatic stress disorder (PTSD) is a significant burden among combat Veterans returning from the wars in Iraq and Afghanistan. While empirically supported treatments have demonstrated reductions in PTSD symptomatology, there remains a need to improve treatment effectiveness. Functional magnetic resonance imaging (fMRI) neurofeedback has emerged as a possible treatment to ameliorate PTSD symptom severity. Virtual reality (VR) approaches have also shown promise in increasing treatment compliance and outcomes. To facilitate fMRI neurofeedback-associated therapies, it would be advantageous to accurately classify internal brain stress levels while Veterans are exposed to trauma-associated VR imagery. Methods: Across 2 sessions, we used fMRI to collect neural responses to trauma-associated VR-like stimuli among male combat Veterans with PTSD symptoms (N = 8). Veterans reported their self-perceived stress level on a scale from 1 to 8 every 15 s throughout the fMRI sessions. In our proposed framework, we precisely sample the fMRI data on cortical gray matter, blurring the data along the gray-matter manifold to reduce noise and dimensionality while preserving maximum neural information. Then, we independently applied 3 machine learning (ML) algorithms to this fMRI data collected across 2 sessions, separately for each Veteran, to build individualized ML models that predicted their internal brain states (self-reported stress responses). Results: We accurately classified the 8-class self-reported stress responses with a mean (± standard error) root mean square error of 0.6 (± 0.1) across all Veterans using the best ML approach. Conclusions: The findings demonstrate the predictive ability of ML algorithms applied to whole-brain cortical fMRI data collected during individual Veteran sessions. The framework we have developed to preprocess whole-brain cortical fMRI data and train ML models across sessions would provide a valuable tool to enable individualized real-time fMRI neurofeedback during VR-like exposure therapy for PTSD.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.; Landicho, Earl
The impact of security countermeasures on human behavior during active shooter incidents Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 929, 2022, ISSN: 2045-2322.
@article{zhu_impact_2022,
title = {The impact of security countermeasures on human behavior during active shooter incidents},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers and Earl Landicho},
url = {https://www.nature.com/articles/s41598-022-04922-8},
doi = {10.1038/s41598-022-04922-8},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-26},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {929},
abstract = {Abstract Active shooter incidents represent an increasing threat to American society, especially in commercial and educational buildings. In recent years, a wide variety of security countermeasures have been recommended by public and governmental agencies. Many of these countermeasures are aimed to increase building security, yet their impact on human behavior when an active shooter incident occurs remains underexplored. To fill this research gap, we conducted virtual experiments to evaluate the impact of countermeasures on human behavior during active shooter incidents. A total of 162 office workers and middle/high school teachers were recruited to respond to an active shooter incident in virtual office and school buildings with or without the implementation of multiple countermeasures. The experiment results showed countermeasures significantly influenced participants’ response time and decisions (e.g., run, hide, fight). Participants’ responses and perceptions of the active shooter incident were also contingent on their daily roles, as well as building and social contexts. Teachers had more concerns for occupants’ safety than office workers. Moreover, teachers had more positive perceptions of occupants in the school, whereas office workers had more positive perceptions of occupants in the office.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burçin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah L; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Jazizadeh, Farrokh; Liu, Ruying; Zhu, Runhe; Marks, Frederick; Roll, Shawn; Seyedrezaei, Mirmahdi; Taylor, John E.; Höelscher, Christoph; Khan, Azam; Langevin, Jared; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Schaumann, Davide; Zhao, Jie
Ten questions concerning human-building interaction research for improving the quality of life Journal Article
In: Building and Environment, vol. 226, pp. 109681, 2022, ISSN: 0360-1323.
@article{becerik-gerber_ten_2022,
title = {Ten questions concerning human-building interaction research for improving the quality of life},
author = {Burçin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah L Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Farrokh Jazizadeh and Ruying Liu and Runhe Zhu and Frederick Marks and Shawn Roll and Mirmahdi Seyedrezaei and John E. Taylor and Christoph Höelscher and Azam Khan and Jared Langevin and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Davide Schaumann and Jie Zhao},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322009118},
doi = {10.1016/j.buildenv.2022.109681},
issn = {0360-1323},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {226},
pages = {109681},
abstract = {This paper seeks to address ten questions that explore the burgeoning field of Human-Building Interaction (HBI), an interdisciplinary field that represents the next frontier in convergent research and innovation to enable the dynamic interplay of human and building interactional intelligence. The field of HBI builds on several existing efforts in historically separate research fields/communities and aims to understand how buildings affect human outcomes and experiences, as well as how humans interact with, adapt to, and affect the built environment and its systems, to support buildings that can learn, enable adaptation, and evolve at different scales to improve the quality-of-life of its users while optimizing resource usage and service availability. Questions were developed by a diverse group of researchers with backgrounds in design, engineering, computer science, social science, and health science. Answers to these questions draw conclusions from what has been achieved to date as reported in the available literature and establish a foundation for future HBI research. This paper aims to encourage interdisciplinary collaborations in HBI research to change the way people interact with and perceive technology within the context of buildings and inform the design, construction, and operation of next-generation, intelligent built environments. In doing so, HBI research can realize a myriad of benefits for human users, including improved productivity, health, cognition, convenience, and comfort, all of which are essential to societal well-being.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burcin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Höelscher, Christoph; Jazizadeh, Farrokh; Khan, Azam; Langevin, Jared; Liu, Ruying; Marks, Frederick; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Roll, Shawn; Schaumann, Davide; Seyedrezaei, Mirmahdi; Taylor, John E.; Zhao, Jie; Zhu, Runhe
The field of human building interaction for convergent research and innovation for intelligent built environments Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 22092, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
@article{becerik-gerber_field_2022,
title = {The field of human building interaction for convergent research and innovation for intelligent built environments},
author = {Burcin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Christoph Höelscher and Farrokh Jazizadeh and Azam Khan and Jared Langevin and Ruying Liu and Frederick Marks and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Shawn Roll and Davide Schaumann and Mirmahdi Seyedrezaei and John E. Taylor and Jie Zhao and Runhe Zhu},
url = {https://www.nature.com/articles/s41598-022-25047-y},
doi = {10.1038/s41598-022-25047-y},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {22092},
abstract = {Human-Building Interaction (HBI) is a convergent field that represents the growing complexities of the dynamic interplay between human experience and intelligence within built environments. This paper provides core definitions, research dimensions, and an overall vision for the future of HBI as developed through consensus among 25 interdisciplinary experts in a series of facilitated workshops. Three primary areas contribute to and require attention in HBI research: humans (human experiences, performance, and well-being), buildings (building design and operations), and technologies (sensing, inference, and awareness). Three critical interdisciplinary research domains intersect these areas: control systems and decision making, trust and collaboration, and modeling and simulation. Finally, at the core, it is vital for HBI research to center on and support equity, privacy, and sustainability. Compelling research questions are posed for each primary area, research domain, and core principle. State-of-the-art methods used in HBI studies are discussed, and examples of original research are offered to illustrate opportunities for the advancement of HBI research.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Maihofer, Adam X.; Engchuan, Worrawat; Huguet, Guillaume; Klein, Marieke; MacDonald, Jeffrey R.; Shanta, Omar; Thiruvahindrapuram, Bhooma; Jean-louis, Martineau; Saci, Zohra; Jacquemont, Sebastien; Scherer, Stephen W.; Ketema, Elizabeth; Aiello, Allison E.; Amstadter, Ananda B.; Avdibegović, Esmina; Babic, Dragan; Baker, Dewleen G.; Bisson, Jonathan I.; Boks, Marco P.; Bolger, Elizabeth A.; Bryant, Richard A.; Bustamante, Angela C.; Caldas-de-Almeida, Jose Miguel; Cardoso, Graça; Deckert, Jurgen; Delahanty, Douglas L.; Domschke, Katharina; Dunlop, Boadie W.; Dzubur-Kulenovic, Alma; Evans, Alexandra; Feeny, Norah C.; Franz, Carol E.; Gautam, Aarti; Geuze, Elbert; Goci, Aferdita; Hammamieh, Rasha; Jakovljevic, Miro; Jett, Marti; Jones, Ian; Kaufman, Milissa L.; Kessler, Ronald C.; King, Anthony P.; Kremen, William S.; Lawford, Bruce R.; Lebois, Lauren A. M.; Lewis, Catrin; Liberzon, Israel; Linnstaedt, Sarah D.; Lugonja, Bozo; Luykx, Jurjen J.; Lyons, Michael J.; Mavissakalian, Matig R.; McLaughlin, Katie A.; McLean, Samuel A.; Mehta, Divya; Mellor, Rebecca; Morris, Charles Phillip; Muhie, Seid; Orcutt, Holly K.; Peverill, Matthew; Ratanatharathorn, Andrew; Risbrough, Victoria B.; Rizzo, Albert; Roberts, Andrea L.; Rothbaum, Alex O.; Rothbaum, Barbara O.; Roy-Byrne, Peter; Ruggiero, Kenneth J.; Rutten, Bart P. F.; Schijven, Dick; Seng, Julia S.; Sheerin, Christina M.; Sorenson, Michael A.; Teicher, Martin H.; Uddin, Monica; Ursano, Robert J.; Vinkers, Christiaan H.; Voisey, Joanne; Weber, Heike; Winternitz, Sherry; Xavier, Miguel; Yang, Ruoting; Young, Ross McD; Zoellner, Lori A.; Salem, Rany M.; Shaffer, Richard A.; Wu, Tianying; Ressler, Kerry J.; Stein, Murray B.; Koenen, Karestan C.; Sebat, Jonathan; Nievergelt, Caroline M.
Rare copy number variation in posttraumatic stress disorder Journal Article
In: Mol Psychiatry, vol. 27, no. 12, pp. 5062–5069, 2022, ISSN: 1476-5578, (Number: 12 Publisher: Nature Publishing Group).
@article{maihofer_rare_2022,
title = {Rare copy number variation in posttraumatic stress disorder},
author = {Adam X. Maihofer and Worrawat Engchuan and Guillaume Huguet and Marieke Klein and Jeffrey R. MacDonald and Omar Shanta and Bhooma Thiruvahindrapuram and Martineau Jean-louis and Zohra Saci and Sebastien Jacquemont and Stephen W. Scherer and Elizabeth Ketema and Allison E. Aiello and Ananda B. Amstadter and Esmina Avdibegović and Dragan Babic and Dewleen G. Baker and Jonathan I. Bisson and Marco P. Boks and Elizabeth A. Bolger and Richard A. Bryant and Angela C. Bustamante and Jose Miguel Caldas-de-Almeida and Graça Cardoso and Jurgen Deckert and Douglas L. Delahanty and Katharina Domschke and Boadie W. Dunlop and Alma Dzubur-Kulenovic and Alexandra Evans and Norah C. Feeny and Carol E. Franz and Aarti Gautam and Elbert Geuze and Aferdita Goci and Rasha Hammamieh and Miro Jakovljevic and Marti Jett and Ian Jones and Milissa L. Kaufman and Ronald C. Kessler and Anthony P. King and William S. Kremen and Bruce R. Lawford and Lauren A. M. Lebois and Catrin Lewis and Israel Liberzon and Sarah D. Linnstaedt and Bozo Lugonja and Jurjen J. Luykx and Michael J. Lyons and Matig R. Mavissakalian and Katie A. McLaughlin and Samuel A. McLean and Divya Mehta and Rebecca Mellor and Charles Phillip Morris and Seid Muhie and Holly K. Orcutt and Matthew Peverill and Andrew Ratanatharathorn and Victoria B. Risbrough and Albert Rizzo and Andrea L. Roberts and Alex O. Rothbaum and Barbara O. Rothbaum and Peter Roy-Byrne and Kenneth J. Ruggiero and Bart P. F. Rutten and Dick Schijven and Julia S. Seng and Christina M. Sheerin and Michael A. Sorenson and Martin H. Teicher and Monica Uddin and Robert J. Ursano and Christiaan H. Vinkers and Joanne Voisey and Heike Weber and Sherry Winternitz and Miguel Xavier and Ruoting Yang and Ross McD Young and Lori A. Zoellner and Rany M. Salem and Richard A. Shaffer and Tianying Wu and Kerry J. Ressler and Murray B. Stein and Karestan C. Koenen and Jonathan Sebat and Caroline M. Nievergelt},
url = {https://www.nature.com/articles/s41380-022-01776-4},
doi = {10.1038/s41380-022-01776-4},
issn = {1476-5578},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Mol Psychiatry},
volume = {27},
number = {12},
pages = {5062–5069},
abstract = {Posttraumatic stress disorder (PTSD) is a heritable (h2 = 24–71%) psychiatric illness. Copy number variation (CNV) is a form of rare genetic variation that has been implicated in the etiology of psychiatric disorders, but no large-scale investigation of CNV in PTSD has been performed. We present an association study of CNV burden and PTSD symptoms in a sample of 114,383 participants (13,036 cases and 101,347 controls) of European ancestry. CNVs were called using two calling algorithms and intersected to a consensus set. Quality control was performed to remove strong outlier samples. CNVs were examined for association with PTSD within each cohort using linear or logistic regression analysis adjusted for population structure and CNV quality metrics, then inverse variance weighted meta-analyzed across cohorts. We examined the genome-wide total span of CNVs, enrichment of CNVs within specified gene-sets, and CNVs overlapping individual genes and implicated neurodevelopmental regions. The total distance covered by deletions crossing over known neurodevelopmental CNV regions was significant (beta = 0.029},
note = {Number: 12
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Harvey, Philip D.; Depp, Colin A.; Rizzo, Albert A.; Strauss, Gregory P.; Spelber, David; Carpenter, Linda L.; Kalin, Ned H.; Krystal, John H.; McDonald, William M.; Nemeroff, Charles B.; Rodriguez, Carolyn I.; Widge, Alik S.; Torous, John
Technology and Mental Health: State of the Art for Assessment and Treatment Journal Article
In: AJP, vol. 179, no. 12, pp. 897–914, 2022, ISSN: 0002-953X, 1535-7228.
@article{harvey_technology_2022,
title = {Technology and Mental Health: State of the Art for Assessment and Treatment},
author = {Philip D. Harvey and Colin A. Depp and Albert A. Rizzo and Gregory P. Strauss and David Spelber and Linda L. Carpenter and Ned H. Kalin and John H. Krystal and William M. McDonald and Charles B. Nemeroff and Carolyn I. Rodriguez and Alik S. Widge and John Torous},
url = {http://ajp.psychiatryonline.org/doi/10.1176/appi.ajp.21121254},
doi = {10.1176/appi.ajp.21121254},
issn = {0002-953X, 1535-7228},
year = {2022},
date = {2022-12-01},
urldate = {2023-08-22},
journal = {AJP},
volume = {179},
number = {12},
pages = {897–914},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lugrin, Birgit; Pelachaud, Catherine; André, Elisabeth; Aylett, Ruth; Bickmore, Timothy; Breazeal, Cynthia; Broekens, Joost; Dautenhahn, Kerstin; Gratch, Jonathan; Kopp, Stefan; Nadel, Jacqueline; Paiva, Ana; Wykowska, Agnieszka
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 561–626, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{lugrin_challenge_2022,
title = {Challenge Discussion on Socially Interactive Agents: Considerations on Social Interaction, Computational Architectures, Evaluation, and Ethics},
author = {Birgit Lugrin and Catherine Pelachaud and Elisabeth André and Ruth Aylett and Timothy Bickmore and Cynthia Breazeal and Joost Broekens and Kerstin Dautenhahn and Jonathan Gratch and Stefan Kopp and Jacqueline Nadel and Ana Paiva and Agnieszka Wykowska},
url = {https://doi.org/10.1145/3563659.3563677},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {561–626},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Traum, David
Socially Interactive Agent Dialogue Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 45–76, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{traum_socially_2022,
title = {Socially Interactive Agent Dialogue},
author = {David Traum},
url = {https://doi.org/10.1145/3563659.3563663},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {45–76},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Larry; Kolacz, Jacek; Rizzo, Albert; Scherer, Stefan; Soleymani, Mohammad
Speech Behavioral Markers Align on Symptom Factors in Psychological Distress Proceedings Article
In: 2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, 2022, (ISSN: 2156-8111).
@inproceedings{zhang_speech_2022,
title = {Speech Behavioral Markers Align on Symptom Factors in Psychological Distress},
author = {Larry Zhang and Jacek Kolacz and Albert Rizzo and Stefan Scherer and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/abstract/document/9953849},
doi = {10.1109/ACII55700.2022.9953849},
year = {2022},
date = {2022-10-01},
booktitle = {2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
abstract = {Automatic detection of psychological disorders has gained significant attention in recent years due to the rise in their prevalence. However, the majority of studies have overlooked the complexity of disorders in favor of a “present/not present” dichotomy in representing disorders. Recent psychological research challenges favors transdiagnostic approaches, moving beyond general disorder classifications to symptom level analysis, as symptoms are often not exclusive to individual disorder classes. In our study, we investigated the link between speech signals and psychological distress symptoms in a corpus of 333 screening interviews from the Distress Analysis Interview Corpus (DAIC). Given the semi-structured organization of interviews, we aggregated speech utterances from responses to shared questions across interviews. We employed deterministic sample selection in classification to rank salient questions for eliciting symptom-specific behaviors in order to predict symptom presence. Some questions include “Do you find therapy helpful?” and “When was the last time you felt happy?”. The prediction results align closely to the factor structure of psychological distress symptoms, linking speech behaviors primarily to somatic and affective alterations in both depression and PTSD. This lends support for the transdiagnostic validity of speech markers for detecting such symptoms. Surprisingly, we did not find a strong link between speech markers and cognitive or psychomotor alterations. This is surprising, given the complexity of motor and cognitive actions required in speech production. The results of our analysis highlight the importance of aligning affective computing research with psychological research to investigate the use of automatic behavioral sensing to assess psychiatric risk.},
note = {ISSN: 2156-8111},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Hu, Qingyong; Yu, Zifan; Thomas, Hugues; Feng, Andrew; Hou, Yu; McCullough, Kyle; Ren, Fengbo; Soibelman, Lucio
STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset Miscellaneous
2022, (arXiv:2203.09065 [cs]).
@misc{chen_stpls3d_2022,
title = {STPLS3D: A Large-Scale Synthetic and Real Aerial Photogrammetry 3D Point Cloud Dataset},
author = {Meida Chen and Qingyong Hu and Zifan Yu and Hugues Thomas and Andrew Feng and Yu Hou and Kyle McCullough and Fengbo Ren and Lucio Soibelman},
url = {http://arxiv.org/abs/2203.09065},
year = {2022},
date = {2022-10-01},
urldate = {2023-08-22},
publisher = {arXiv},
abstract = {Although various 3D datasets with different functions and scales have been proposed recently, it remains challenging for individuals to complete the whole pipeline of large-scale data collection, sanitization, and annotation. Moreover, the created datasets usually suffer from extremely imbalanced class distribution or partial low-quality data samples. Motivated by this, we explore the procedurally synthetic 3D data generation paradigm to equip individuals with the full capability of creating large-scale annotated photogrammetry point clouds. Specifically, we introduce a synthetic aerial photogrammetry point clouds generation pipeline that takes full advantage of open geospatial data sources and off-the-shelf commercial packages. Unlike generating synthetic data in virtual games, where the simulated data usually have limited gaming environments created by artists, the proposed pipeline simulates the reconstruction process of the real environment by following the same UAV flight pattern on different synthetic terrain shapes and building densities, which ensure similar quality, noise pattern, and diversity with real data. In addition, the precise semantic and instance annotations can be generated fully automatically, avoiding the expensive and time-consuming manual annotation. Based on the proposed pipeline, we present a richly-annotated synthetic 3D aerial photogrammetry point cloud dataset, termed STPLS3D, with more than 16 $kmˆ2$ of landscapes and up to 18 fine-grained semantic categories. For verification purposes, we also provide a parallel dataset collected from four areas in the real environment. Extensive experiments conducted on our datasets demonstrate the effectiveness and quality of the proposed synthetic dataset.},
note = {arXiv:2203.09065 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Book Section
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Natural Language, UARC, Virtual Humans
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S.; Wang, Timothy S.
Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates Book Section
In: Mitchell, Alex; Vosmeer, Mirjam (Ed.): Interactive Storytelling, vol. 13138, pp. 71–79, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-92299-3 978-3-030-92300-6.
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{gordon_narrative_2021,
title = {Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates},
author = {Andrew S. Gordon and Timothy S. Wang},
editor = {Alex Mitchell and Mirjam Vosmeer},
url = {https://link.springer.com/10.1007/978-3-030-92300-6_7},
doi = {10.1007/978-3-030-92300-6_7},
isbn = {978-3-030-92299-3 978-3-030-92300-6},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-22},
booktitle = {Interactive Storytelling},
volume = {13138},
pages = {71–79},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Feng, Andrew; Hou, Yu; McCullough, Kyle; Prasad, Pratusha Bhuvana; Soibelman, Lucio
Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach Journal Article
In: 2021.
Abstract | Links | BibTeX | Tags: DTIC, Simulation, UARC
@article{chen_ground_2021,
title = {Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach},
author = {Meida Chen and Andrew Feng and Yu Hou and Kyle McCullough and Pratusha Bhuvana Prasad and Lucio Soibelman},
url = {https://arxiv.org/abs/2109.12221},
doi = {10.48550/ARXIV.2109.12221},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-27},
abstract = {In recent years, photogrammetry has been widely used in many areas to create photorealistic 3D virtual data representing the physical environment. The innovation of small unmanned aerial vehicles (sUAVs) has provided additional high-resolution imaging capabilities with low cost for mapping a relatively large area of interest. These cutting-edge technologies have caught the US Army and Navy's attention for the purpose of rapid 3D battlefield reconstruction, virtual training, and simulations. Our previous works have demonstrated the importance of information extraction from the derived photogrammetric data to create semantic-rich virtual environments (Chen et al., 2019). For example, an increase of simulation realism and fidelity was achieved by segmenting and replacing photogrammetric trees with game-ready tree models. In this work, we further investigated the semantic information extraction problem and focused on the ground material segmentation and object detection tasks. The main innovation of this work was that we leveraged both the original 2D images and the derived 3D photogrammetric data to overcome the challenges faced when using each individual data source. For ground material segmentation, we utilized an existing convolutional neural network architecture (i.e., 3DMV) which was originally designed for segmenting RGB-D sensed indoor data. We improved its performance for outdoor photogrammetric data by introducing a depth pooling layer in the architecture to take into consideration the distance between the source images and the reconstructed terrain model. To test the performance of our improved 3DMV, a ground truth ground material database was created using data from the One World Terrain (OWT) data repository. Finally, a workflow for importing the segmented ground materials into a virtual simulation scene was introduced, and visual results are reported in this paper.},
keywords = {DTIC, Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
He, Zihao; Tavabi, Leili; Lerman, Kristina; Soleymani, Mohammad
Speaker Turn Modeling for Dialogue Act Classification Proceedings Article
In: Findings of the Association for Computational Linguistics: EMNLP 2021, pp. 2150–2157, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{he_speaker_2021,
title = {Speaker Turn Modeling for Dialogue Act Classification},
author = {Zihao He and Leili Tavabi and Kristina Lerman and Mohammad Soleymani},
url = {https://aclanthology.org/2021.findings-emnlp.185},
doi = {10.18653/v1/2021.findings-emnlp.185},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2021},
pages = {2150–2157},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Cheng, Junyan; Fostiropoulos, Iordanis; Boehm, Barry; Soleymani, Mohammad
Multimodal Phased Transformer for Sentiment Analysis Proceedings Article
In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2447–2458, Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{cheng_multimodal_2021,
title = {Multimodal Phased Transformer for Sentiment Analysis},
author = {Junyan Cheng and Iordanis Fostiropoulos and Barry Boehm and Mohammad Soleymani},
url = {https://aclanthology.org/2021.emnlp-main.189},
doi = {10.18653/v1/2021.emnlp-main.189},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
pages = {2447–2458},
publisher = {Association for Computational Linguistics},
address = {Online and Punta Cana, Dominican Republic},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Bennett, Winston “Wink”; Kelsey, Elaine; Nye, Benjamin
Attention and Engagement in Virtual Environments: Measuring the Unobservable Proceedings Article
In: 2021.
Links | BibTeX | Tags: AR, DTIC, Machine Learning, UARC, VR
@inproceedings{bell_attention_2021,
title = {Attention and Engagement in Virtual Environments: Measuring the Unobservable},
author = {Benjamin Bell and Winston “Wink” Bennett and Elaine Kelsey and Benjamin Nye},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=95758&CID=862#View},
year = {2021},
date = {2021-01-01},
keywords = {AR, DTIC, Machine Learning, UARC, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Front. Robot. AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Front. Robot. AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Jajodia, Aditya; Karpurapu, Abhilash; Merchant, Chirag
Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors Proceedings Article
In: Roll, Ido; McNamara, Danielle; Sosnovsky, Sergey; Luckin, Rose; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, pp. 372–377, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-78270-2.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{wang_charisma_2021,
title = {Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors},
author = {Ning Wang and Aditya Jajodia and Abhilash Karpurapu and Chirag Merchant},
editor = {Ido Roll and Danielle McNamara and Sergey Sosnovsky and Rose Luckin and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-030-78270-2_66},
doi = {10.1007/978-3-030-78270-2_66},
isbn = {978-3-030-78270-2},
year = {2021},
date = {2021-01-01},
booktitle = {Artificial Intelligence in Education},
pages = {372–377},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication. Research on charisma on a specific type of leader in a specific type of organization – teachers in the classroom - has indicated the positive influence of a teacher’s charismatic behaviors, often referred to as immediacy behaviors, on student learning. How do we realize such behaviors in a virtual tutor? How do such behaviors impact student learning? In this paper, we discuss the design of a charismatic virtual human tutor. We developed verbal and nonverbal (with the focus on voice) charismatic strategies and realized such strategies through scripted tutorial dialogues and pre-recorded voices. A study with the virtual human tutor has shown an intriguing impact of charismatic behaviors on student learning.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Shichen; Chen, Weikai; Li, Hao; Hill, Randall
Equivariant Point Network for 3D Point Cloud Analysis Proceedings Article
In: pp. 14514–14523, 2021.
Links | BibTeX | Tags: UARC, VGL
@inproceedings{chen_equivariant_2021,
title = {Equivariant Point Network for 3D Point Cloud Analysis},
author = {Haiwei Chen and Shichen Liu and Weikai Chen and Hao Li and Randall Hill},
url = {https://openaccess.thecvf.com/content/CVPR2021/html/Chen_Equivariant_Point_Network_for_3D_Point_Cloud_Analysis_CVPR_2021_paper.html},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
pages = {14514–14523},
keywords = {UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Pallavicini, Federica; Giglioli, Irene Alice Chicchi; Kim, Gerard Jounghyun; Alcañiz, Mariano; Rizzo, Albert
Editorial: Virtual Reality, Augmented Reality and Video Games for Addressing the Impact of COVID-19 on Mental Health Journal Article
In: Frontiers in Virtual Reality, vol. 2, 2021, ISSN: 2673-4192.
Links | BibTeX | Tags: MedVR, UARC
@article{pallavicini_editorial_2021,
title = {Editorial: Virtual Reality, Augmented Reality and Video Games for Addressing the Impact of COVID-19 on Mental Health},
author = {Federica Pallavicini and Irene Alice Chicchi Giglioli and Gerard Jounghyun Kim and Mariano Alcañiz and Albert Rizzo},
url = {https://www.frontiersin.org/articles/10.3389/frvir.2021.719358},
issn = {2673-4192},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
journal = {Frontiers in Virtual Reality},
volume = {2},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Core, Mark G.; Jaiswa, Shikhar; Ghosal, Aviroop; Auerbach, Daniel
Acting Engaged: Leveraging Play Persona Archetypes for Semi-Supervised Classification of Engagement Technical Report
International Educational Data Mining Society 2021, (Publication Title: International Educational Data Mining Society ERIC Number: ED615498).
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@techreport{nye_acting_2021,
title = {Acting Engaged: Leveraging Play Persona Archetypes for Semi-Supervised Classification of Engagement},
author = {Benjamin D. Nye and Mark G. Core and Shikhar Jaiswa and Aviroop Ghosal and Daniel Auerbach},
url = {https://eric.ed.gov/?id=ED615498},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
institution = {International Educational Data Mining Society},
abstract = {Engaged and disengaged behaviors have been studied across a variety of educational contexts. However, tools to analyze engagement typically require custom-coding and calibration for a system. This limits engagement detection to systems where experts are available to study patterns and build detectors. This work studies a new approach to classify engagement patterns without expert input, by using a play persona methodology where labeled archetype data is generated by novice testers acting out different engagement patterns in a system. Domain-agnostic task features (e.g., response time to an activity, scores/correctness, task difficulty) are extracted from standardized data logs for both archetype and authentic user sessions. A semi-supervised methodology was used to label engagement; bottom-up clusters were combined with archetype data to build a classifier. This approach was analyzed with a focus on cold-start performance on small samples, using two metrics: consistency with larger full-sample cluster assignments and stability of points staying in the same cluster once assigned. These were compared against a baseline of clustering without an incrementally trained classifier. Findings on a data set from a branching multiple-choice scenario-based tutoring system indicated that approximately 52 unlabeled samples and 51 play-test labeled samples were sufficient to classify holdout sessions at 85% consistency with a full set of 145 unsupervised samples. Additionally, alignment to play persona samples for the full set matched expert labels for clusters. Use-cases and limitations of this approach are discussed. [For the full proceedings, see ED615472.]},
note = {Publication Title: International Educational Data Mining Society
ERIC Number: ED615498},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {techreport}
}
Nye, Benjamin D.; Core, Mark G.; Ghosal, Aviroop; Walker, Peter B.
Metrics for Engagement in Games and Simulations for Learning Book Section
In: Using Cognitive and Affective Metrics in Educational Simulations and Games, Routledge, 2021, (Num Pages: 24).
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@incollection{nye_metrics_2021,
title = {Metrics for Engagement in Games and Simulations for Learning},
author = {Benjamin D. Nye and Mark G. Core and Aviroop Ghosal and Peter B. Walker},
url = {https://www.taylorfrancis.com/chapters/edit/10.4324/9780429282201-5/metrics-engagement-games-simulations-learning-benjamin-nye-mark-core-aviroop-ghosal-peter-walker},
year = {2021},
date = {2021-01-01},
booktitle = {Using Cognitive and Affective Metrics in Educational Simulations and Games},
publisher = {Routledge},
abstract = {Games and simulations can be more engaging than other educational tools (e.g., textbooks, videos, problem sets), and this engagement can lead to improved short- and long-term learning. However, engagement in game-based learning is not automatic, and instead requires iterative design. In this work, we explore and compare metrics from research on learning sciences and from game design, considering different time scales of human action, ranging from biological engagement (e.g., eye gaze) up to lasting social ties (e.g., community building). Certain game-design approaches used for commercial games may be useful for game-based learning, such as establishing bottom-line metrics aligned to why the game was built or analyzing engagement in terms of facets or archetypes rather than on a unidirectional scale. Further research is required to study the interaction between engagement at different time scales, particularly for cases where higher long-term engagement is indicated by lower short-term engagement (e.g., skipping easy content).},
note = {Num Pages: 24},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2020
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Skoll, Devin; Miller, Jennifer C.; Saxon, Leslie A.
COVID-19 testing and infection surveillance: Is a combined digital contact-tracing and mass-testing solution feasible in the United States? Journal Article
In: Cardiovascular Digital Health Journal, vol. 1, no. 3, pp. 149–159, 2020, ISSN: 2666-6936.
Abstract | Links | BibTeX | Tags: CBC, UARC
@article{skoll_covid-19_2020,
title = {COVID-19 testing and infection surveillance: Is a combined digital contact-tracing and mass-testing solution feasible in the United States?},
author = {Devin Skoll and Jennifer C. Miller and Leslie A. Saxon},
url = {https://www.sciencedirect.com/science/article/pii/S2666693620300360},
doi = {10.1016/j.cvdhj.2020.09.004},
issn = {2666-6936},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Cardiovascular Digital Health Journal},
volume = {1},
number = {3},
pages = {149–159},
abstract = {Background
In December 2019, the novel COVID-19 virus spread from a cluster of pneumonia cases in Wuhan, China, to every corner of the globe, creating a worldwide pandemic pushing hospital systems past capacity and bringing economies worldwide to a halt. The COVID-19 pandemic is unique in comparison to prior coronavirus epidemics in its superior ability to be spread by asymptomatic and presymptomatic patients, allowing the virus to silently evade traditional symptoms-based screening approaches. Countries have implemented cutting-edge digital solutions to enhance traditional contact-tracing methodologies in combination with novel testing strategies to combat the virus, with variable levels of success. Despite having one of the most advanced and expensive health care systems in the world, the United States (U.S.) response is arguably one of the world’s largest failures, as it leads the globe in case number as well as deaths. Until a successful vaccine can be broadly distributed, it is imperative that the U.S. curb the viral spread by rapidly developing a framework implementing both enhanced tracing and testing strategies balancing the needs of public health while respecting individual liberties. This review will explore the role of technology-augmented contact-based surveillance in tracking the outbreak in select countries in comparison to the current U.S. approach. It will evaluate barriers in the U.S. to implementing similar technologies, focusing on privacy concerns and a lack of unified testing and tracing strategy. Finally, it will explore strategies for rapidly scaling testing in a cost-effective manner.},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
In December 2019, the novel COVID-19 virus spread from a cluster of pneumonia cases in Wuhan, China, to every corner of the globe, creating a worldwide pandemic pushing hospital systems past capacity and bringing economies worldwide to a halt. The COVID-19 pandemic is unique in comparison to prior coronavirus epidemics in its superior ability to be spread by asymptomatic and presymptomatic patients, allowing the virus to silently evade traditional symptoms-based screening approaches. Countries have implemented cutting-edge digital solutions to enhance traditional contact-tracing methodologies in combination with novel testing strategies to combat the virus, with variable levels of success. Despite having one of the most advanced and expensive health care systems in the world, the United States (U.S.) response is arguably one of the world’s largest failures, as it leads the globe in case number as well as deaths. Until a successful vaccine can be broadly distributed, it is imperative that the U.S. curb the viral spread by rapidly developing a framework implementing both enhanced tracing and testing strategies balancing the needs of public health while respecting individual liberties. This review will explore the role of technology-augmented contact-based surveillance in tracking the outbreak in select countries in comparison to the current U.S. approach. It will evaluate barriers in the U.S. to implementing similar technologies, focusing on privacy concerns and a lack of unified testing and tracing strategy. Finally, it will explore strategies for rapidly scaling testing in a cost-effective manner.
Miller, Jennifer C; Barrett, Trevor; Patel, Neil; Souza, Andrew; Wood, John; Saxon, Leslie A
In: Circulation, vol. 142, no. Suppl_3, pp. A15845–A15845, 2020, (Publisher: American Heart Association).
Abstract | Links | BibTeX | Tags: CBC, UARC
@article{miller_heart_2020,
title = {Heart Heroes: A Gamified Mhealth Platform to Measure Continuous Cardiac Health Data in the Outpatient Setting of Adolescent Patients With Known and Suspected Heart Disease},
author = {Jennifer C Miller and Trevor Barrett and Neil Patel and Andrew Souza and John Wood and Leslie A Saxon},
url = {https://www.ahajournals.org/doi/abs/10.1161/circ.142.suppl_3.15845},
doi = {10.1161/circ.142.suppl_3.15845},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Circulation},
volume = {142},
number = {Suppl_3},
pages = {A15845–A15845},
abstract = {Introduction: Adolescents with heart disease report difficulty in communication about their health as a major inhibiting factor in their care. MHealth technologies collect health data in daily life and enable health data sharing between the provider and patient. The adolescent population has a high level of engagement with mobile devices and a willingness to use them for health-related activities.
Hypothesis: We hypothesized that our novel gamified mHealth platform Heart Hero can engage adolescent patients in the collection of cardiac health data in their daily life.
Methods: We designed the research app using ResearchKit to collect continuous physiological data from the Apple Watch and daily survey data on well-being, stress, medical adherence, and cardiac symptoms. Patients were provided the app, iPhone, and Apple Watch and enrolled for 27 days. A final in-app survey was provided to assess feedback. We enrolled 28 patients total who were scheduled for outpatient cardiopulmonary exercise testing.
Results: Mean age was 14.3 years old (SD +/-3.08) with a 1:1 M:F ratio. 61% of patients were ≥ 15 years of age. 94% of patients completed the final survey. Subjects on average completed 64% (SEM +/-5) of the daily quizzes with an average daily adherence of over 50% wearing the watch. 100% reported they liked using the watch and app, and 89% would like to continue wearing the Apple Watch. 53% reported the study encouraged them to exercise more while 21% reported encouragement to walk more. Fig 1 demonstrates A) Apple Watch and b) daily survey data collected from a patient throughout the study.
Conclusions: In conclusion, Heart Hero is a mHealth platform which can successfully be used to collect continuous health data from the adolescent population with high engagement characterized by adherence and positive patient feedback. Adherence to the app was notably superior to the initial 5 ResearchKit applications enrolling adult patients.
Download figure},
note = {Publisher: American Heart Association},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
Hypothesis: We hypothesized that our novel gamified mHealth platform Heart Hero can engage adolescent patients in the collection of cardiac health data in their daily life.
Methods: We designed the research app using ResearchKit to collect continuous physiological data from the Apple Watch and daily survey data on well-being, stress, medical adherence, and cardiac symptoms. Patients were provided the app, iPhone, and Apple Watch and enrolled for 27 days. A final in-app survey was provided to assess feedback. We enrolled 28 patients total who were scheduled for outpatient cardiopulmonary exercise testing.
Results: Mean age was 14.3 years old (SD +/-3.08) with a 1:1 M:F ratio. 61% of patients were ≥ 15 years of age. 94% of patients completed the final survey. Subjects on average completed 64% (SEM +/-5) of the daily quizzes with an average daily adherence of over 50% wearing the watch. 100% reported they liked using the watch and app, and 89% would like to continue wearing the Apple Watch. 53% reported the study encouraged them to exercise more while 21% reported encouragement to walk more. Fig 1 demonstrates A) Apple Watch and b) daily survey data collected from a patient throughout the study.
Conclusions: In conclusion, Heart Hero is a mHealth platform which can successfully be used to collect continuous health data from the adolescent population with high engagement characterized by adherence and positive patient feedback. Adherence to the app was notably superior to the initial 5 ResearchKit applications enrolling adult patients.
Download figure
Miller, Jennifer C.; Skoll, Devin; Saxon, Leslie A.
Home Monitoring of Cardiac Devices in the Era of COVID-19 Journal Article
In: Curr Cardiol Rep, vol. 23, no. 1, pp. 1, 2020, ISSN: 1534-3170.
Abstract | Links | BibTeX | Tags: CBC, UARC
@article{miller_home_2020,
title = {Home Monitoring of Cardiac Devices in the Era of COVID-19},
author = {Jennifer C. Miller and Devin Skoll and Leslie A. Saxon},
url = {https://doi.org/10.1007/s11886-020-01431-w},
doi = {10.1007/s11886-020-01431-w},
issn = {1534-3170},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Curr Cardiol Rep},
volume = {23},
number = {1},
pages = {1},
abstract = {Despite the promise of remote patient monitoring (RPM), this technology remained underutilized secondary to a lack of data transparency and systems issues until the COVID-19 pandemic ushered in a new era of telehealth and virtual solutions out of necessity. This review will explore the data supporting the use of RPM via both implantable and wearable devices in the field of cardiology and the role of home monitoring using RPM in the era of COVID-19.},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Natural Language, UARC, Virtual Humans
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {ARO-Coop, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pacheco, Luz; Merchant, Chirag; Skistad, Kristian; Jethwani, Aayushi
The Design of Charismatic Behaviors for Virtual Humans Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{wang_design_2020,
title = {The Design of Charismatic Behaviors for Virtual Humans},
author = {Ning Wang and Luz Pacheco and Chirag Merchant and Kristian Skistad and Aayushi Jethwani},
url = {https://doi.org/10.1145/3383652.3423867},
doi = {10.1145/3383652.3423867},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '20},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal and nonverbal (with the focus on voice) charismatic strategies based on the analysis of behaviors of charismatic leaders. We developed scripted speech dialogues with the verbal strategies and recorded the speeches with actors using the nonverbal strategies. The dialogue is further implemented in a virtual human, embedded in a virtual classroom, to give a lecture on the human circulatory system. We conducted a study with the virtual human to assess the impact of charismatic verbal and nonverbal behaviors on perceived charisma. The results show the positive impact of the use of verbal strategies and how the use of voice can influence such impact. The results shed light on the next steps needed to automatically generate charismatic speech, voices, and gestures for virtual characters.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315–332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
Abstract | Links | BibTeX | Tags: ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Feng, Andrew; Gordon, Andrew S.
Recognizing Multiplayer Behaviors Using Synthetic Training Data Proceedings Article
In: 2020 IEEE Conference on Games (CoG), pp. 463–470, 2020, (ISSN: 2325-4289).
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{feng_recognizing_2020,
title = {Recognizing Multiplayer Behaviors Using Synthetic Training Data},
author = {Andrew Feng and Andrew S. Gordon},
doi = {10.1109/CoG47356.2020.9231742},
year = {2020},
date = {2020-08-01},
booktitle = {2020 IEEE Conference on Games (CoG)},
pages = {463–470},
abstract = {Accurate recognition of group behaviors is essential to the design of engaging networked multiplayer games. However, contemporary data-driven machine learning solutions are difficult to apply during the game development process, given that no authentic gameplay data is yet available for use as training data. In this paper, we investigate the use of synthetic training data, i.e., gameplay data that is generated by AI-controlled agent teams programmed to perform each of the behaviors to be recognized in groups of human players. The particular task we focus on is to recognize group movement formations in player-controlled avatars in a realistic virtual world. We choose five typical military team movement patterns for the formation recognition task and train machine learning models using procedurally generated unit trajectories as training data. The experiments were conducted using ResNet and EfficientNet, which are two popular convolutional neural network architectures for image classifications. The synthetic data is augmented by creating variations in image rotation, unit spacing, team size, and positional perturbations to bridge the gap between synthetic and human gameplay data. We demonstrate that high-accuracy behavior recognition can be achieved using deep neural networks by applying the aforementioned data augmentation methods to simulated gameplay data.},
note = {ISSN: 2325-4289},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Saxon, Leslie A.; Varma, Niraj; Epstein, Laurence M.; Ganz, Leonard I.; Epstein, Andrew E.
Rates of Adoption and Outcomes After Firmware Updates for Food and Drug Administration Cybersecurity Safety Advisories Journal Article
In: Circulation: Arrhythmia and Electrophysiology, vol. 13, no. 8, pp. e008364, 2020, (Publisher: American Heart Association).
Links | BibTeX | Tags: CBC, UARC
@article{saxon_rates_2020,
title = {Rates of Adoption and Outcomes After Firmware Updates for Food and Drug Administration Cybersecurity Safety Advisories},
author = {Leslie A. Saxon and Niraj Varma and Laurence M. Epstein and Leonard I. Ganz and Andrew E. Epstein},
url = {https://www.ahajournals.org/doi/full/10.1161/CIRCEP.120.008364},
doi = {10.1161/CIRCEP.120.008364},
year = {2020},
date = {2020-08-01},
urldate = {2023-03-31},
journal = {Circulation: Arrhythmia and Electrophysiology},
volume = {13},
number = {8},
pages = {e008364},
note = {Publisher: American Heart Association},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Gratch, Jonathan
The Effects of Experience on Deception in Human-Agent Negotiation Journal Article
In: Journal of Artificial Intelligence Research, vol. 68, pp. 633–660, 2020, ISSN: 1076-9757.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{mell_effects_2020,
title = {The Effects of Experience on Deception in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jonathan Gratch},
url = {https://www.jair.org/index.php/jair/article/view/11924},
doi = {10.1613/jair.1.11924},
issn = {1076-9757},
year = {2020},
date = {2020-08-01},
urldate = {2023-03-31},
journal = {Journal of Artificial Intelligence Research},
volume = {68},
pages = {633–660},
abstract = {Negotiation is the complex social process by which multiple parties come to mutual agreement over a series of issues. As such, it has proven to be a key challenge problem for designing adequately social AIs that can effectively navigate this space. Artificial AI agents that are capable of negotiating must be capable of realizing policies and strategies that govern offer acceptances, offer generation, preference elicitation, and more. But the next generation of agents must also adapt to reflect their users’ experiences.
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.
Rakofsky, Jeffrey J.; Talbot, Thomas B.; Dunlop, Boadie W.
A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency Journal Article
In: Academic Psychiatry, 2020, ISSN: 1042-9670, 1545-7230.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{rakofsky_virtual_2020,
title = {A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency},
author = {Jeffrey J. Rakofsky and Thomas B. Talbot and Boadie W. Dunlop},
url = {http://link.springer.com/10.1007/s40596-020-01286-x},
doi = {10.1007/s40596-020-01286-x},
issn = {1042-9670, 1545-7230},
year = {2020},
date = {2020-07-01},
journal = {Academic Psychiatry},
abstract = {Objectives A virtual standardized patient-based assessment simulator was developed to address biases and practical limitations in existing methods for evaluating residents’ proficiency in psychopharmacological knowledge and practice. Methods The simulator was designed to replicate an outpatient psychiatric clinic experience. The virtual patient reported symptoms of a treatment-resistant form of major depressive disorder (MDD), requiring the learner to use various antidepressants in order for the patient to fully remit. Test scores were based on the proportion of correct responses to questions asked by the virtual patient about possible side effects, dosing, and titration decisions, which depended upon the patient’s tolerability and response to the learner’s selected medications. The validation paradigm included a novice-expert performance comparison across 4th year medical students, psychiatric residents from all four post-graduate year classes, and psychiatry department faculty, and a correlational analysis of simulator performance with the PRITE Somatic Treatments subscale score. Post-test surveys evaluated the test takers’ subjective impressions of the simulator. Results Forty-three subjects completed the online exam and survey. Total mean scores on the exam differed significantly across all the learner groups in a step-wise manner from students to faculty (F = 6.10},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Ruilong; Xiu, Yuliang; Saito, Shunsuke; Huang, Zeng; Olszewski, Kyle; Li, Hao
Monocular Real-Time Volumetric Performance Capture Journal Article
In: ResearchGate, pp. 30, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{li_monocular_2020,
title = {Monocular Real-Time Volumetric Performance Capture},
author = {Ruilong Li and Yuliang Xiu and Shunsuke Saito and Zeng Huang and Kyle Olszewski and Hao Li},
url = {https://www.researchgate.net/publication/343279742_Monocular_Real-Time_Volumetric_Performance_Capture},
year = {2020},
date = {2020-07-01},
journal = {ResearchGate},
pages = {30},
abstract = {We present the first approach to volumetric performance capture and novel-view rendering at real-time speed from monocular video, eliminating the need for expensive multi-view systems or cumbersome pre-acquisition of a personalized template model. Our system reconstructs a fully textured 3D human from each frame by leveraging Pixel-Aligned Implicit Function (PIFu). While PIFu achieves high-resolution reconstruction in a memory-efficient manner, its computationally expensive inference prevents us from deploying such a system for real-time applications. To this end, we propose a novel hierarchical surface localization algorithm and a direct rendering method without explicitly extracting surface meshes. By culling unnecessary regions for evaluation in a coarse-to-fine manner, we successfully accelerate the reconstruction by two orders of magnitude from the baseline without compromising the quality. Furthermore, we introduce an Online Hard Example Mining (OHEM) technique that effectively suppresses failure modes due to the rare occurrence of challenging examples. We adaptively update the sampling probability of the training data based on the current reconstruction accuracy, which effectively alleviates reconstruction artifacts. Our experiments and evaluations demonstrate the robustness of our system to various challenging angles, illuminations, poses, and clothing styles. We also show that our approach compares favorably with the state-of-the-art monocular performance capture. Our proposed approach removes the need for multi-view studio settings and enables a consumer-accessible solution for volumetric capture.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
Abstract | Links | BibTeX | Tags: ARO-Coop, UARC, Virtual Humans
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {ARO-Coop, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Proceedings Article
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222–3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Yanov, Volodymyr; Traum, David
Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers Proceedings Article
In: Proceedings of the Twelfth Language Resources and Evaluation Conference, pp. 726–734, European Language Resources Association, Marseille, France, 2020, ISBN: 979-10-95546-34-4.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{georgila_predicting_2020,
title = {Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers},
author = {Kallirroi Georgila and Carla Gordon and Volodymyr Yanov and David Traum},
url = {https://aclanthology.org/2020.lrec-1.91},
isbn = {979-10-95546-34-4},
year = {2020},
date = {2020-05-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Twelfth Language Resources and Evaluation Conference},
pages = {726–734},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We collected a corpus of dialogues in a Wizard of Oz (WOz) setting in the Internet of Things (IoT) domain. We asked users participating in these dialogues to rate the system on a number of aspects, namely, intelligence, naturalness, personality, friendliness, their enjoyment, overall quality, and whether they would recommend the system to others. Then we asked dialogue observers, i.e., Amazon Mechanical Turkers (MTurkers), to rate these dialogues on the same aspects. We also generated simulated dialogues between dialogue policies and simulated users and asked MTurkers to rate them again on the same aspects. Using linear regression, we developed dialogue evaluation functions based on features from the simulated dialogues and the MTurkers' ratings, the WOz dialogues and the MTurkers' ratings, and the WOz dialogues and the WOz participants' ratings. We applied all these dialogue evaluation functions to a held-out portion of our WOz dialogues, and we report results on the predictive power of these different types of dialogue evaluation functions. Our results suggest that for three conversational aspects (intelligence, naturalness, overall quality) just training evaluation functions on simulated data could be sufficient.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Katz, Andrea C.; Norr, Aaron M.; Buck, Benjamin; Fantelli, Emily; Edwards-Stewart, Amanda; Koenen-Woods, Patricia; Zetocha, Kimberlee; Smolenski, Derek J.; Holloway, Kevin; Rothbaum, Barbara O.; Difede, JoAnn; Rizzo, Albert; Skopp, Nancy; Mishkind, Matt; Gahm, Gregory; Reger, Greg M.; Andrasik, Frank
In: Psychological Trauma: Theory, Research, Practice, and Policy, 2020, ISSN: 1942-969X, 1942-9681.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{katz_changes_2020,
title = {Changes in physiological reactivity in response to the trauma memory during prolonged exposure and virtual reality exposure therapy for posttraumatic stress disorder.},
author = {Andrea C. Katz and Aaron M. Norr and Benjamin Buck and Emily Fantelli and Amanda Edwards-Stewart and Patricia Koenen-Woods and Kimberlee Zetocha and Derek J. Smolenski and Kevin Holloway and Barbara O. Rothbaum and JoAnn Difede and Albert Rizzo and Nancy Skopp and Matt Mishkind and Gregory Gahm and Greg M. Reger and Frank Andrasik},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/tra0000567},
doi = {10.1037/tra0000567},
issn = {1942-969X, 1942-9681},
year = {2020},
date = {2020-04-01},
journal = {Psychological Trauma: Theory, Research, Practice, and Policy},
abstract = {This study is among the first to examine how physiological processes change throughout PTSD treatment and the first to compare standard exposure therapy to therapy augmented with virtual reality (VR) in active-duty soldiers with PTSD. Results showed that soldiers in VR therapy had smaller physical reactions to trauma memories compared to those who did not receive treatment, whereas those who got standard treatment did not. These findings provide insight into possible mechanisms of PTSD treatment, point to potential objective indicators of early treatment response in active-duty soldiers, and suggest that VR treatment might lead to earlier symptom reduction.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Ruilong; Bladin, Karl; Zhao, Yajie; Chinara, Chinmay; Ingraham, Owen; Xiang, Pengda; Ren, Xinglei; Prasad, Pratusha; Kishore, Bipin; Xing, Jun; Li, Hao
Learning Formation of Physically-Based Face Attributes Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{li_learning_2020,
title = {Learning Formation of Physically-Based Face Attributes},
author = {Ruilong Li and Karl Bladin and Yajie Zhao and Chinmay Chinara and Owen Ingraham and Pengda Xiang and Xinglei Ren and Pratusha Prasad and Bipin Kishore and Jun Xing and Hao Li},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d407/1m3oiaP9ouQ},
doi = {10.1109/CVPR42600.2020.00347},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {Based on a combined data set of 4000 high resolution facial scans, we introduce a non-linear morphable face model, capable of producing multifarious face geometry of pore-level resolution, coupled with material attributes for use in physically-based rendering. We aim to maximize the variety of face identities, while increasing the robustness of correspondence between unique components, including middle-frequency geometry, albedo maps, specular intensity maps and high-frequency displacement details. Our deep learning based generative model learns to correlate albedo and geometry, which ensures the anatomical correctness of the generated assets. We demonstrate potential use of our generative model for novel identity generation, model fitting, interpolation, animation, high fidelity data visualization, and low-to-high resolution data domain transferring. We hope the release of this generative model will encourage further cooperation between all graphics, vision, and data focused professionals, while demonstrating the cumulative value of every individual’s complete biometric profile.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Ceylan, Duygu; Xing, Jun; Echevarria, Jose; Chen, Zhili; Chen, Weikai; Li, Hao
Intuitive, Interactive Beard and Hair Synthesis with Generative Models Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{olszewski_intuitive_2020,
title = {Intuitive, Interactive Beard and Hair Synthesis with Generative Models},
author = {Kyle Olszewski and Duygu Ceylan and Jun Xing and Jose Echevarria and Zhili Chen and Weikai Chen and Hao Li},
url = {http://arxiv.org/abs/2004.06848},
doi = {10.1109/CVPR42600.2020.00747},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {We present an interactive approach to synthesizing realistic variations in facial hair in images, ranging from subtle edits to existing hair to the addition of complex and challenging hair in images of clean-shaven subjects. To circumvent the tedious and computationally expensive tasks of modeling, rendering and compositing the 3D geometry of the target hairstyle using the traditional graphics pipeline, we employ a neural network pipeline that synthesizes realistic and detailed images of facial hair directly in the target image in under one second. The synthesis is controlled by simple and sparse guide strokes from the user defining the general structural and color properties of the target hairstyle. We qualitatively and quantitatively evaluate our chosen method compared to several alternative approaches. We show compelling interactive editing results with a prototype user interface that allows novice users to progressively refine the generated image to match their desired hairstyle, and demonstrate that our approach also allows for flexible and high-fidelity scalp hair synthesis.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Guizani, Skander; Jaksha, Evan
Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_52, pp. 11, 2020.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{davis_establishing_2020,
title = {Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations},
author = {Dan M Davis and Skander Guizani and Evan Jaksha},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_52},
pages = {11},
abstract = {This paper asserts that quantification and verification of Battlefield simulations is necessary to assess, verify, and guide the researchers, military commanders, and users in both the simulations’ development and their implementation. The authors present their observations on previous development activities that were hampered by lack of effective metrics and present their arguments that much of this was driven by a lack of standards. Tracing back using commonly accepted System Engineering practices, they show how lack of such standards makes even to the development of effective metrics problematic. The paper documents the experiences and enumerates the potential pitfalls of these shortcomings. Both the authors' experiences in military service and the technical literature supporting their theses are adduced to support their analysis of the current technical research and development environment. Then the paper evaluates several System Engineering tools to further investigate and establish the ultimate goals of these formalized processes. Using their current project in establishing virtual on-line mentors as an exemplar of the way such tools would be effective, the authors make a case for the needs for metrics standards that both are accepted by consensus and are ultimately directed at providing the warfighter with all of the training possible before putting that warfighters in harm's way and imperiling the missions for which they are putting themselves at risk. Examples of the nature and reaction to simulator training, virtual human interaction, computer agent interfaces and implementation issues are given to further illuminate for the reader the possible extensions of these approaches into the reader's own research as well as calling for a more community-wide recognition of the needs for standards both for implementation and for metrics to assess Battlefield Simulation utility to the warfighter. Future investigations, analysis and action are considered and evaluated},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Davis, Dan M; Rosenberg, Milton; Davis, Mark C
Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_39, pp. 11, 2020.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{davis_proactive_2020,
title = {Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence},
author = {Dan M Davis and Milton Rosenberg and Mark C Davis},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_39},
pages = {11},
abstract = {There is a continuing need for battlefield simulations and virtual humans. Most recently, the authors have been focused on the creation of virtual conversation environments to leverage the mentoring skills of selected individuals by creating large libraries of short video clips of advice which are then presented to the user in response to their questions. In these endeavors two issues have arisen; the inconsistency of the definitions used and the need to ameliorate the impacts of short-tour intervals on team formation. This paper will address both of these issues, review existing research, document some early research into these impediments, and discuss the similarities of these issues to those faced by the standards community writ large. They will cite and review the work of Professor Bruce Tuckman: Forming, Storming, Norming, and Performing. The benefits of using virtual humans to enhance these processes are outlined. The need for and design of proactive Natural Language Processing-enabled virtual humans and computer agents is set forth and analyzed. The paper will lay out the research goals, identify the semantic differences, and report on the potential impacts of those differences. In its totality, this paper intends to demonstrate that, in addition to the need to evangelize about the necessity of standards, this community has a lot to contribute to researchers, developers, and implementers faced with destructive differences in terminology, understanding and practice. All of this data and analysis will be presented in a way that should make sure that the insights garnered therefrom are accessible by members of this and other communities and they can be implemented and modified, as is most effective. Future advances now in development are discussed, along with the utility of these new capabilities and approaches.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Shmueli-Scheuer, Michal; Artstein, Ron; Khazaeni, Yasaman; Fang, Hao; Liao, Q. Vera
user2agent: 2nd Workshop on User-Aware Conversational Agents Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 9–10, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{shmueli-scheuer_user2agent_2020,
title = {user2agent: 2nd Workshop on User-Aware Conversational Agents},
author = {Michal Shmueli-Scheuer and Ron Artstein and Yasaman Khazaeni and Hao Fang and Q. Vera Liao},
url = {https://doi.org/10.1145/3379336.3379356},
doi = {10.1145/3379336.3379356},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {9–10},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IUI '20},
abstract = {Conversational agents are becoming increasingly popular. These systems present an extremely rich and challenging research space for addressing many aspects of user awareness and adaptation, such as user profiles, contexts, personalities, emotions, social dynamics, conversational styles, etc. Adaptive interfaces are of long-standing interest for the HCI community. Meanwhile, new machine learning approaches are introduced in the current generation of conversational agents, such as deep learning, reinforcement learning, and active learning. It is imperative to consider how various aspects of user-awareness should be handled by these new techniques. The goal of this workshop is to bring together researchers in HCI, user modeling, and the AI and NLP communities from both industry and academia, who are interested in advancing the state-of-the-art on the topic of user-aware conversational agents. Through a focused and open exchange of ideas and discussions, we will work to identify central research topics in user-aware conversational agents and develop a strong interdisciplinary foundation to address them.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
Abstract | Links | BibTeX | Tags: Graphics, Narrative, STG, UARC
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {Graphics, Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1–32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Shinagawa, Seitaro; Yoshino, Koichiro; Alavi, Seyed Hossein; Georgila, Kallirroi; Traum, David; Sakti, Sakriani; Nakamura, Satoshi
An Interactive Image Editing System Using an Uncertainty-Based Confirmation Strategy Journal Article
In: IEEE Access, vol. 8, pp. 98471–98480, 2020, ISSN: 2169-3536, (Conference Name: IEEE Access).
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@article{shinagawa_interactive_2020,
title = {An Interactive Image Editing System Using an Uncertainty-Based Confirmation Strategy},
author = {Seitaro Shinagawa and Koichiro Yoshino and Seyed Hossein Alavi and Kallirroi Georgila and David Traum and Sakriani Sakti and Satoshi Nakamura},
url = {https://ieeexplore.ieee.org/abstract/document/9099288},
doi = {10.1109/ACCESS.2020.2997012},
issn = {2169-3536},
year = {2020},
date = {2020-01-01},
journal = {IEEE Access},
volume = {8},
pages = {98471–98480},
abstract = {We propose an interactive image editing system that has a confirmation dialogue strategy using an entropy-based uncertainty calculation on its generated images with Deep Convolutional Generative Adversarial Networks (DCGAN). DCGAN is an image generative model that learns an image manifold of a given dataset and enables continuous change of an image. Our proposed image editing system combines DCGAN with a natural language interface that accepts image editing requests in natural language. Although such a system is helpful for human users, it often faces uncertain requests to generate acceptable images. A promising approach to solve this problem is introducing a dialogue process that shows multiple candidates and confirms the user's intention. However, confirming every editing request creates redundant dialogues. To achieve more efficient dialogues, we propose an entropy-based dialogue strategy that decides when the system should confirm, and enables effective image editing through a dialogue that reduces redundant confirmations. We conducted image editing dialogue experiments using an avatar face illustration dataset for editing by natural language requests. Through quantitative and qualitative analysis, our results show that our entropy-based confirmation strategy achieved an effective dialogue by generating images desired by users.},
note = {Conference Name: IEEE Access},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Delogu, Francesca; Rodriguez, Kepa J.; Poesio, Massimo
Annotating a broad range of anaphoric phenomena, in a variety of genres: the ARRAU Corpus Journal Article
In: Natural Language Engineering, vol. 26, no. 1, pp. 95–128, 2020, ISSN: 1351-3249, 1469-8110, (Publisher: Cambridge University Press).
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@article{uryupina_annotating_2020,
title = {Annotating a broad range of anaphoric phenomena, in a variety of genres: the ARRAU Corpus},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Francesca Delogu and Kepa J. Rodriguez and Massimo Poesio},
url = {https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/annotating-a-broad-range-of-anaphoric-phenomena-in-a-variety-of-genres-the-arrau-corpus/17E7FA2CB2E36C213E2649479593B6B0},
doi = {10.1017/S1351324919000056},
issn = {1351-3249, 1469-8110},
year = {2020},
date = {2020-01-01},
urldate = {2023-03-31},
journal = {Natural Language Engineering},
volume = {26},
number = {1},
pages = {95–128},
abstract = {This paper presents the second release of arrau, a multigenre corpus of anaphoric information created over 10 years to provide data for the next generation of coreference/anaphora resolution systems combining different types of linguistic and world knowledge with advanced discourse modeling supporting rich linguistic annotations. The distinguishing features of arrau include the following: treating all NPs as markables, including non-referring NPs, and annotating their (non-) referentiality status; distinguishing between several categories of non-referentiality and annotating non-anaphoric mentions; thorough annotation of markable boundaries (minimal/maximal spans, discontinuous markables); annotating a variety of mention attributes, ranging from morphosyntactic parameters to semantic category; annotating the genericity status of mentions; annotating a wide range of anaphoric relations, including bridging relations and discourse deixis; and, finally, annotating anaphoric ambiguity. The current version of the dataset contains 350K tokens and is publicly available from LDC. In this paper, we discuss in detail all the distinguishing features of the corpus, so far only partially presented in a number of conference and workshop papers, and we also discuss the development between the first release of arrau in 2008 and this second one.},
note = {Publisher: Cambridge University Press},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
Bell, Benjamin; Kelsey, Elaine; Nye, Benjamin; Bennett, Winston (“Wink”)
Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training Proceedings Article
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, pp. 271–282, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-50788-6.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{bell_adapting_2020,
title = {Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training},
author = {Benjamin Bell and Elaine Kelsey and Benjamin Nye and Winston (“Wink”) Bennett},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/chapter/10.1007/978-3-030-50788-6_20},
doi = {10.1007/978-3-030-50788-6_20},
isbn = {978-3-030-50788-6},
year = {2020},
date = {2020-01-01},
booktitle = {Adaptive Instructional Systems},
pages = {271–282},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {The USAF has established a new approach to Specialized Undergraduate Pilot Training (SUPT) called Pilot Training Next (PTN) that integrates traditional flying sorties with VR-enabled ground-based training devices and data-driven proficiency tracking to achieve training efficiencies, improve readiness, and increase throughput. Eduworks and USC’s Institute for Creative Technologies are developing machine learning (ML) models that can measure user engagement during any computer-mediated training (simulation, courseware) and offer recommendations for restoring lapses in engagement. We are currently developing and testing this approach, called the Observational Motivation and Engagement Generalized Appliance (OMEGA) in a PTN context. Two factors motivate this work. First, one goal of PTN is for an instructor pilot (IP) to simultaneously monitor multiple simulator rides. Being alerted to distraction, attention and engagement can help an IP manage multiple students at the same time, with recommendations for restoring engagement providing further instructional support. Second, the virtual environment provides a rich source of raw data that machine learning models can use to associate user activity with user engagement. We have created a testbed for data capture in order to construct the ML models, based on theoretical foundations we developed previously. We are running pilots through multiple PTN scenarios and collecting formative data from instructors to evaluate the utility of the recommendations OMEGA generates regarding how lapsed engagement can be restored. We anticipate findings that validate the use of ML models for learning to detect engagement from the rich data sources characteristic of virtual environments. These findings will be applicable across a broad range of conventional and VR training applications.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Proceedings Article
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113–131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; McAlinden, Ryan; Soibelman, Lucio
Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations Journal Article
In: Journal of Management in Engineering, vol. 36, no. 2, pp. 04019046, 2019, ISSN: 0742-597X, 1943-5479.
Abstract | Links | BibTeX | Tags: STG, UARC
@article{chen_photogrammetric_2019,
title = {Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations},
author = {Meida Chen and Andrew Feng and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29ME.1943-5479.0000737},
doi = {10.1061/(ASCE)ME.1943-5479.0000737},
issn = {0742-597X, 1943-5479},
year = {2019},
date = {2019-11-01},
journal = {Journal of Management in Engineering},
volume = {36},
number = {2},
pages = {04019046},
abstract = {Photogrammetric techniques have dramatically improved over the last few years, enabling the creation of visually compelling three-dimensional (3D) meshes using unmanned aerial vehicle imagery. These high-quality 3D meshes have attracted notice from both academicians and industry practitioners in developing virtual environments and simulations. However, photogrammetric generated point clouds and meshes do not allow both user-level and system-level interaction because they do not contain the semantic information to distinguish between objects. Thus, segmenting generated point clouds and meshes and extracting the associated object information is a necessary step. A framework for point cloud and mesh classification and segmentation is presented in this paper. The proposed framework was designed considering photogrammetric data-quality issues and provides a novel way of extracting object information, including (1) individual tree locations and related features and (2) building footprints. Experiments were conducted to rank different point descriptors and evaluate supervised machine-learning algorithms for segmenting photogrammetric generated point clouds. The proposed framework was validated using data collected at the University of Southern California (USC) and the Muscatatuck Urban Training Center (MUTC). DOI: 10.1061/(ASCE) ME.1943-5479.0000737. © 2019 American Society of Civil Engineers.},
keywords = {STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
An, Capt Eric; Nolty, Anne A T; Amano, Stacy S; Rizzo, Albert A; Buckwalter, J Galen; Rensberger, Jared
Heart Rate Variability as an Index of Resilience Journal Article
In: Military Medicine, 2019, ISSN: 0026-4075, 1930-613X.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{an_heart_2019,
title = {Heart Rate Variability as an Index of Resilience},
author = {Capt Eric An and Anne A T Nolty and Stacy S Amano and Albert A Rizzo and J Galen Buckwalter and Jared Rensberger},
url = {https://academic.oup.com/milmed/advance-article/doi/10.1093/milmed/usz325/5586497},
doi = {10.1093/milmed/usz325},
issn = {0026-4075, 1930-613X},
year = {2019},
date = {2019-10-01},
journal = {Military Medicine},
abstract = {Introduction: Resilience is the ability to maintain or quickly return to a stable physical and psychological equilibrium despite experiencing stressful events. Flexibility of the autonomic nervous system is particularly important for adaptive stress responses and may contribute to individual differences in resilience. Power spectrum analysis of heart rate variability (HRV) allows measurement of sympathovagal balance, which helps to evaluate autonomic flexibility. The present study investigated HRV as a broad index of resilience. Materials and Methods: Twenty-four male participants from the Army National Guard Special Forces completed psychological measures known to relate to resilience and had HRV measured while undergoing stressful virtual environment scenarios. Pearson product-moment correlations were used to explore the relationships between HRV and resilience factors. All research was conducted with the oversight of the Human Subjects Review Committee of Fuller Theological Seminary. Results: Trends toward significance were reported in order to provide results that would reasonably be expected in a study of higher power. Trends between resilience factors and HRV were found only during specific stress-inducing simulations (see Tables III). Conclusion: Greater resilience to stress was associated with HRV during nonstress periods. Higher levels of resilience to traumatic events were associated with HRV during circumstances that were more stressful and emotionally distressing. Post hoc analysis revealed that specific factors including flexibility, emotional control, and spirituality were driving the relationship between general resilience and HRV following emotionally laden stressors. Less stress vulnerability was associated with HRV following intermittent brief stressors. In sum, HRV appears to represent some aspects of an individual’s overall resilience profile. Although resilience remains a complex, multidimensional construct, HRV shows promise as a global psychophysiological index of resilience. This study also offers important perspectives concerning ways to optimize both physical and psychological health.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}