Publications
Search
Leitner, Maxyn; Greenwald, Eric; Montgomery, Ryan; Wang, Ning
Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education Proceedings Article
In: Proceedings of the 30th International Conference on Computers in Education, 2022.
@inproceedings{leitner_design_2022,
title = {Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ryan Montgomery and Ning Wang},
url = {https://par.nsf.gov/servlets/purl/10440195},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 30th International Conference on Computers in Education},
abstract = {Artificial Intelligence (AI) is increasingly vital to our everyday lives. Future generations will not only consume AI, but work with AI-driven tools and contribute to the development of AI. As such, students will need exposure to AI knowledge at a younger age. Despite this need, relatively little is currently known about how to most effectively provide AI education to K-12 (kindergarten through 12th grade) students. In this paper, we discuss the design of an educational game for high-school AI education called ARIN-561. The game centered around two agents – a player character and a companion robot, as the story and learning experience unfold through conversations between the two agents and explorations that bond the two agents A series of studies were carried out at high schools in the United States to evaluate the efficacy of the game. Results indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: Human Factors in Virtual Environments and Game Design, AHFE Open Acces, 2022, ISBN: 978-1-958651-26-1, (ISSN: 27710718 Issue: 50).
@inproceedings{talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
isbn = {978-1-958651-26-1},
year = {2022},
date = {2022-01-01},
urldate = {2023-04-03},
booktitle = {Human Factors in Virtual Environments and Game Design},
volume = {50},
publisher = {AHFE Open Acces},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
note = {ISSN: 27710718
Issue: 50},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Proceedings Article
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22–34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Greenwald, Eric; Montgomery, Ryan; Leitner, Maxyn
ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 528–531, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
@inproceedings{wang_arin-561_2022,
title = {ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students},
author = {Ning Wang and Eric Greenwald and Ryan Montgomery and Maxyn Leitner},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-031-11647-6_108},
doi = {10.1007/978-3-031-11647-6_108},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {528–531},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Artificial Intelligence (AI) is increasingly vital to our future generations, who will join a workforce that utilizes AI-driven tools and contributes to the advancement of AI. Today’s students will need exposure to AI knowledge at a younger age. Relatively little is currently known about how to most effectively provide AI education to K-12 students. In this paper, we discuss the design and evaluation of an educational game for high-school AI education called ARIN-561. Results from pilot studies indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Proceedings Article
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580–590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163–167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: 2022.
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt
Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning Proceedings Article
In: GSA, 2021.
@inproceedings{davis_augment_2021,
title = {Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning},
author = {Matt Davis},
url = {https://gsa.confex.com/gsa/2021AM/webprogram/Paper371425.html},
year = {2021},
date = {2021-10-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790–14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2020
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Shmueli-Scheuer, Michal; Artstein, Ron; Khazaeni, Yasaman; Fang, Hao; Liao, Q. Vera
user2agent: 2nd Workshop on User-Aware Conversational Agents Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 9–10, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{shmueli-scheuer_user2agent_2020,
title = {user2agent: 2nd Workshop on User-Aware Conversational Agents},
author = {Michal Shmueli-Scheuer and Ron Artstein and Yasaman Khazaeni and Hao Fang and Q. Vera Liao},
url = {https://doi.org/10.1145/3379336.3379356},
doi = {10.1145/3379336.3379356},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {9–10},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IUI '20},
abstract = {Conversational agents are becoming increasingly popular. These systems present an extremely rich and challenging research space for addressing many aspects of user awareness and adaptation, such as user profiles, contexts, personalities, emotions, social dynamics, conversational styles, etc. Adaptive interfaces are of long-standing interest for the HCI community. Meanwhile, new machine learning approaches are introduced in the current generation of conversational agents, such as deep learning, reinforcement learning, and active learning. It is imperative to consider how various aspects of user-awareness should be handled by these new techniques. The goal of this workshop is to bring together researchers in HCI, user modeling, and the AI and NLP communities from both industry and academia, who are interested in advancing the state-of-the-art on the topic of user-aware conversational agents. Through a focused and open exchange of ideas and discussions, we will work to identify central research topics in user-aware conversational agents and develop a strong interdisciplinary foundation to address them.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
Abstract | Links | BibTeX | Tags: Graphics, Narrative, STG, UARC
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {Graphics, Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Kelsey, Elaine; Nye, Benjamin; Bennett, Winston (“Wink”)
Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training Proceedings Article
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, pp. 271–282, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-50788-6.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{bell_adapting_2020,
title = {Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training},
author = {Benjamin Bell and Elaine Kelsey and Benjamin Nye and Winston (“Wink”) Bennett},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/chapter/10.1007/978-3-030-50788-6_20},
doi = {10.1007/978-3-030-50788-6_20},
isbn = {978-3-030-50788-6},
year = {2020},
date = {2020-01-01},
booktitle = {Adaptive Instructional Systems},
pages = {271–282},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {The USAF has established a new approach to Specialized Undergraduate Pilot Training (SUPT) called Pilot Training Next (PTN) that integrates traditional flying sorties with VR-enabled ground-based training devices and data-driven proficiency tracking to achieve training efficiencies, improve readiness, and increase throughput. Eduworks and USC’s Institute for Creative Technologies are developing machine learning (ML) models that can measure user engagement during any computer-mediated training (simulation, courseware) and offer recommendations for restoring lapses in engagement. We are currently developing and testing this approach, called the Observational Motivation and Engagement Generalized Appliance (OMEGA) in a PTN context. Two factors motivate this work. First, one goal of PTN is for an instructor pilot (IP) to simultaneously monitor multiple simulator rides. Being alerted to distraction, attention and engagement can help an IP manage multiple students at the same time, with recommendations for restoring engagement providing further instructional support. Second, the virtual environment provides a rich source of raw data that machine learning models can use to associate user activity with user engagement. We have created a testbed for data capture in order to construct the ML models, based on theoretical foundations we developed previously. We are running pilots through multiple PTN scenarios and collecting formative data from instructors to evaluate the utility of the recommendations OMEGA generates regarding how lapsed engagement can be restored. We anticipate findings that validate the use of ML models for learning to detect engagement from the rich data sources characteristic of virtual environments. These findings will be applicable across a broad range of conventional and VR training applications.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Proceedings Article
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113–131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Proceedings Article
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833–842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Haring, Kerstin S.; Tobias, Jessica; Waligora, Justin; Phillips, Elizabeth; Tenhundfeld, Nathan L; LUCAS, Gale; Visser, Ewart J; GRATCH, Jonathan; Tossell, Chad
Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing Proceedings Article
In: Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE, New Delhi, India, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{haring_conflict_2019,
title = {Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing},
author = {Kerstin S. Haring and Jessica Tobias and Justin Waligora and Elizabeth Phillips and Nathan L Tenhundfeld and Gale LUCAS and Ewart J Visser and Jonathan GRATCH and Chad Tossell},
url = {https://ieeexplore.ieee.org/abstract/document/8956414},
doi = {10.1109/RO-MAN46459.2019.8956414},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
address = {New Delhi, India},
abstract = {Socially intelligent artificial agents and robots are anticipated to become ubiquitous in home, work, and military environments. With the addition of such agents to human teams it is crucial to evaluate their role in the planning, decision making, and conflict mediation processes. We conducted a study to evaluate the utility of a virtual agent that provided mission planning support in a three-person human team during a military strategic mission planning scenario. The team consisted of a human team lead who made the final decisions and three supporting roles, two humans and the artificial agent. The mission outcome was experimentally designed to fail and introduced a conflict between the human team members and the leader. This conflict was mediated by the artificial agent during the debriefing process through discuss or debate and open communication strategies of conflict resolution [1]. Our results showed that our teams experienced conflict. The teams also responded socially to the virtual agent, although they did not find the agent beneficial to the mediation process. Finally, teams collaborated well together and perceived task proficiency increased for team leaders. Socially intelligent agents show potential for conflict mediation, but need careful design and implementation to improve team processes and collaboration.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xing, Jun; Nagano, Koki; Chen, Weikai; Xu, Haotian; Wei, Li-yi; Zhao, Yajie; Lu, Jingwan; Kim, Byungmoon; Li, Hao
HairBrush for Immersive Data-Driven Hair Modeling Proceedings Article
In: Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19, pp. 263–279, ACM Press, New Orleans, LA, USA, 2019, ISBN: 978-1-4503-6816-2.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{xing_hairbrush_2019,
title = {HairBrush for Immersive Data-Driven Hair Modeling},
author = {Jun Xing and Koki Nagano and Weikai Chen and Haotian Xu and Li-yi Wei and Yajie Zhao and Jingwan Lu and Byungmoon Kim and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3332165.3347876},
doi = {10.1145/3332165.3347876},
isbn = {978-1-4503-6816-2},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19},
pages = {263–279},
publisher = {ACM Press},
address = {New Orleans, LA, USA},
abstract = {While hair is an essential component of virtual humans, it is also one of the most challenging digital assets to create. Existing automatic techniques lack the generality and flexibility to create rich hair variations, while manual authoring interfaces often require considerable artistic skills and efforts, especially for intricate 3D hair structures that can be difficult to navigate. We propose an interactive hair modeling system that can help create complex hairstyles in minutes or hours that would otherwise take much longer with existing tools. Modelers, including novice users, can focus on the overall hairstyles and local hair deformations, as our system intelligently suggests the desired hair parts. Our method combines the flexibility of manual authoring and the convenience of data-driven automation. Since hair contains intricate 3D structures such as buns, knots, and strands, they are inherently challenging to create using traditional 2D interfaces. Our system provides a new 3D hair authoring interface for immersive interaction in virtual reality (VR). Users can draw high-level guide strips, from which our system predicts the most plausible hairstyles via a deep neural network trained from a professionally curated dataset. Each hairstyle in our dataset is composed of multiple variations, serving as blend-shapes to fit the user drawings via global blending and local deformation. The fitted hair models are visualized as interactive suggestions that the user can select, modify, or ignore. We conducted a user study to confirm that our system can significantly reduce manual labor while improve the output quality for modeling a variety of head and facial hairstyles that are challenging to create via existing techniques.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Kamireddy, Sreekar
A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 171–178, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, UARC
@inproceedings{pynadath_markovian_2019,
title = {A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction},
author = {David V. Pynadath and Ning Wang and Sreekar Kamireddy},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3351905},
doi = {10.1145/3349537.3351905},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {171–178},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {Trust calibration is critical to the success of human-agent interaction (HAI). However, individual differences are ubiquitous in people’s trust relationships with autonomous systems. To assist its heterogeneous human teammates calibrate their trust in it, an agent must first dynamically model them as individuals, rather than communicating with them all in the same manner. It can then generate expectations of its teammates’ behavior and optimize its own communication based on the current state of the trust relationship it has with them. In this work, we examine how an agent can generate accurate expectations given observations of only the teammate’s trust-related behaviors (e.g., did the person follow or ignore its advice?). In addition to this limited input, we also seek a specific output: accurately predicting its human teammate’s future trust behavior (e.g., will the person follow or ignore my next suggestion?). In this investigation, we construct a model capable of generating such expectations using data gathered in a humansubject study of behavior in a simulated human-robot interaction (HRI) scenario. We first analyze the ability of measures from a presurvey on trust-related traits to accurately predict subsequent trust behaviors. However, as the interaction progresses, this effect is dwarfed by the direct experience. We therefore analyze the ability of sequences of prior behavior by the teammate to accurately predict subsequent trust behaviors. Such behavioral sequences have shown to be indicative of the subjective beliefs of other teammates, and we show here that they have a predictive power as well.},
keywords = {MedVR, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205–207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95–104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Messner, Eva-Maria; Song, Siyang; Liu, Shuo; Zhao, Ziping; Mallol-Ragolta, Adria; Ren, Zhao; Soleymani, Mohammad; Pantic, Maja; Schuller, Björn; Valstar, Michel; Cummins, Nicholas; Cowie, Roddy; Tavabi, Leili; Schmitt, Maximilian; Alisamir, Sina; Amiriparian, Shahin
AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition Proceedings Article
In: Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19, pp. 3–12, ACM Press, Nice, France, 2019, ISBN: 978-1-4503-6913-8.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ringeval_avec_2019,
title = {AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition},
author = {Fabien Ringeval and Eva-Maria Messner and Siyang Song and Shuo Liu and Ziping Zhao and Adria Mallol-Ragolta and Zhao Ren and Mohammad Soleymani and Maja Pantic and Björn Schuller and Michel Valstar and Nicholas Cummins and Roddy Cowie and Leili Tavabi and Maximilian Schmitt and Sina Alisamir and Shahin Amiriparian},
url = {http://dl.acm.org/citation.cfm?doid=3347320.3357688},
doi = {10.1145/3347320.3357688},
isbn = {978-1-4503-6913-8},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19},
pages = {3–12},
publisher = {ACM Press},
address = {Nice, France},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2019) 'State-of-Mind, Detecting Depression with AI, and Cross-cultural Affect Recognition' is the ninth competition event aimed at the comparison of multimedia processing and machine learning methods for automatic audiovisual health and emotion analysis, with all participants competing strictly under the same conditions. The goal of the Challenge is to provide a common benchmark test set for multimodal information processing and to bring together the health and emotion recognition communities, as well as the audiovisual processing communities, to compare the relative merits of various approaches to health and emotion recognition from real-life data. This paper presents the major novelties introduced this year, the challenge guidelines, the data used, and the performance of the baseline systems on the three proposed tasks: state-of-mind recognition, depression assessment with AI, and cross-cultural affect sensing, respectively.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Gratch, Jonathan
Smiles Signal Surprise in a Social Dilemma Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lei_smiles_2019,
title = {Smiles Signal Surprise in a Social Dilemma},
author = {Su Lei and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {This study examines spontaneous facial expressions in an iterated prisoner’s dilemma with financial stakes. Our goal was to identify typical facial expressions associated with key events during the interaction (e.g., cooperation or exploitation) and contrast these reactions with alternative theories of the meaning of facial expressions. Specifically, we examined if expressions reflect individual self-interest (e.g., winning) or social motives (e.g., promoting fairness) and the extent to which surprise might moderate the intensity of facial displays. In contrast to predictions of scientific and folk theories of expression, smiles were the only expressions consistently elicited, regardless of the reward or fairness of outcomes. Further, these smiles serve as a reliable indicator of the surprisingness of the event, but not its pleasure (contradicting research on both the meaning of smiles and indicators of surprise). To our knowledge, this is the first study to indicate that smiles signal surprise.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Aydogan, Reyhan; Baarslag, Tim; Jonker, Catholijn M.
The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_likeability-success_2019,
title = {The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Reyhan Aydogan and Tim Baarslag and Catholijn M. Jonker},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {We present the results of the 2nd Annual Human-Agent League of the Automated Negotiating Agent Competition. Building on the success of the previous year’s results, a new challenge was issued that focused exploring the likeability-success tradeoff in negotiations. By examining a series of repeated negotiations, actions may affect the relationship between automated negotiating agents and their human competitors over time. The results presented herein support a more complex view of human-agent negotiation and capture of integrative potential (win-win solutions). We show that, although likeability is generally seen as a tradeoff to winning, agents are able to remain well-liked while winning if integrative potential is not discovered in a given negotiation. The results indicate that the top-performing agent in this competition took advantage of this loophole by engaging in favor exchange across negotiations (cross-game logrolling). These exploratory results provide information about the effects of different submitted “black-box” agents in humanagent negotiation and provide a state-of-the-art benchmark for human-agent design.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Gratch, Jonathan; Parkinson, Brian; Shore, Danielle
Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the, pp. 7, IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hoegen_signals_2019,
title = {Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context},
author = {Rens Hoegen and Jonathan Gratch and Brian Parkinson and Danielle Shore},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the},
pages = {7},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {In social decision-making tasks, facial expressions are informative signals that indicate motives and intentions. As people are aware that their expressions influence partner behavior, expressions may be strategically regulated in competitive environments to influence a social partner’s decisionmaking. In this work, we examine facial expressions and their strategic regulation within the context of an iterated prisoner’s dilemma. Utilizing video-cued rating procedures, we examine several key questions about the functionality of facial expressions in social decision-making. First, we assess the extent to which emotion and expression regulation are accurately detected from dynamic facial expressions in interpersonal interactions. Second, we explore which facial cues are utilized to evaluate emotion and regulation information. Finally, we investigate the role of context in participants’ emotion and regulation judgments. Results show that participants accurately perceive facial emotion and expression regulation, although they are better at recognizing emotions than regulation. Using automated expression analysis and stepwise regression, we constructed models that use action units from participant videos to predict their video-cued emotion and regulation ratings. We show that these models perform similarly and, in some cases, better than participants do. Moreover, these models demonstrate that game state information improves predictive accuracy, thus implying that context information is important in the evaluation of facial expressions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161–167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-Lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-Ann
Can a Signing Virtual Human Engage a Baby's Attention? Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 162–169, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_can_2019,
title = {Can a Signing Virtual Human Engage a Baby's Attention?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-Lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329463},
doi = {10.1145/3308532.3329463},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {162–169},
publisher = {ACM Press},
address = {Paris, France},
abstract = {The child developmental period of ages 6-12 months marks a widely understood “critical period” for healthy language learning, during which, failure to receive exposure to language can place babies at risk for language and reading problems spanning life. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period. Technology has been used to augment linguistic input (e.g., auditory devices; language videotapes) but research finds limitations in learning. We evaluated an AI system that uses an Avatar (provides language and socially contingent interactions) and a robot (aids attention to the Avatar) to facilitate infants’ ability to learn aspects of American Sign Language (ASL), and asked three questions: (1) Can babies with little/no exposure to ASL distinguish among the Avatar’s different conversational modes (Linguistic Nursery Rhymes; Social Gestures; Idle/nonlinguistic postures; 3rd person observer)? (2) Can an Avatar stimulate babies’ production of socially contingent responses, and crucially, nascent language responses? (3) What is the impact of parents’ presence/absence of conversational participation? Surprisingly, babies (i) spontaneously distinguished among Avatar conversational modes, (ii) produced varied socially contingent responses to Avatar’s modes, and (iii) parents influenced an increase in babies’ response tokens to some Avatar modes, but the overall categories and pattern of babies’ behavioral responses remained proportionately similar irrespective of parental participation. Of note, babies produced the greatest percentage of linguistic responses to the Avatar’s Linguistic Nursery Rhymes versus other Avatar conversational modes. This work demonstrates the potential for Avatars to facilitate language learning in young babies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238–240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Lehr, Janina; Krämer, Nicole; Gratch, Jonathan
The Effectiveness of Social Influence Tactics when Used by a Virtual Agent Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 22–29, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_effectiveness_2019,
title = {The Effectiveness of Social Influence Tactics when Used by a Virtual Agent},
author = {Gale M. Lucas and Janina Lehr and Nicole Krämer and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329464},
doi = {10.1145/3308532.3329464},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {22–29},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research in social science distinguishes between two types of social influence: informational and normative. Informational social influence is driven by the desire to evaluate ambiguous situations correctly, whereas normative social influence is driven by the desire to be liked and gain social acceptance from another person. Although we know from research that humans can effectively use either of these techniques to persuade other humans, scholars have yet to examine the relative effectiveness of informational versus normative social influence when used by virtual agents. We report a study in which users interact with a system that persuades them either using informational or normative social influence. Furthermore, to compare agents to human interlocutors, users are told that the system is either teleoperated by a human (avatar) or fully-automated (agent). Using this design, we are able to compare the effectiveness of virtual agents (vs humans) in employing informational versus normative social influence. Participants interacted with the system, which employed a Wizard-of-Oz operated virtual agent that tried to persuade the user to agree with its rankings on a “survival task.” Controlling for initial divergence in rankings between user and the agent, there was a significant main effect such that informational social influence resulted in greater influence than normative influence. However, this was qualified by an interaction that approached significance; users were, if anything, more persuaded by informational influence when they believe the agent was AI (compared to a human), whereas there was no difference between the agent and avatar in the normative influence condition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Roediger, Sarah; Lucas, Gale; Gratch, Jonathan
Assessing Common Errors Students Make When Negotiating Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 30–37, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{johnson_assessing_2019,
title = {Assessing Common Errors Students Make When Negotiating},
author = {Emmanuel Johnson and Sarah Roediger and Gale Lucas and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329470},
doi = {10.1145/3308532.3329470},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {30–37},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research has shown that virtual agents can be effective tools for teaching negotiation. Virtual agents provide an opportunity for students to practice their negotiation skills which leads to better outcomes. However, these negotiation training agents often lack the ability to understand the errors students make when negotiating, thus limiting their effectiveness as training tools. In this article, we argue that automated opponent-modeling techniques serve as effective methods for diagnosing important negotiation mistakes. To demonstrate this, we analyze a large number of participant traces generated while negotiating with a set of automated opponents. We show that negotiators’ performance is closely tied to their understanding of an opponent’s preferences. We further show that opponent modeling techniques can diagnose specific errors including: failure to elicit diagnostic information from an opponent, failure to utilize the information that was elicited, and failure to understand the transparency of an opponent. These results show that opponent modeling techniques can be effective methods for diagnosing and potentially correcting crucial negotiation errors.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Minha; Lucas, Gale; Mell, Johnathan; Johnson, Emmanuel; Gratch, Jonathan
What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 38–45, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lee_whats_2019,
title = {What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations},
author = {Minha Lee and Gale Lucas and Johnathan Mell and Emmanuel Johnson and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329465},
doi = {10.1145/3308532.3329465},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {38–45},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In this article we examine how perceptions of a virtual agent’s mind shape behavior in human-agent negotiations. We varied descriptions and communicative behavior of virtual agents on two dimensions according to the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude). Participants then engaged in negotiations with the different agents. People scored more points and engaged in shorter negotiations with agents described to be cognitively intelligent, and got lower points and had longer negotiations with agents that were described to be cognitively unintelligent. Accordingly, agents described as having low-agency ended up earning more points than those with high-agency. Within the negotiations themselves, participants sent more happy and surprise emojis and emotionally valenced messages to agents described to be emotional. This high degree of described patiency also affected perceptions of the agent’s moral standing and relatability. In short, manipulating the perceived mind of agents affects how people negotiate with them. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 212–214, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_expert-model_2019,
title = {An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329433},
doi = {10.1145/3308532.3329433},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {212–214},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing -more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other, more limited techniques (such as linear regression models or boosted decision trees). We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S
(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition Proceedings Article
In: In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_symmetry_2019,
title = {(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition},
author = {Paul S Rosenbloom},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_6.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {A range of dichotomies from across the cognitive sciences are reduced to either (a)symmetry or (non)monotonicity. Taking the cross-product of these two elemental dichotomies then yields a deeper understanding of both two key trichotomies –based on control and content hierarchies – and the Common Model of Cognition, with results that bear on the structure of integrative cognitive architectures, models and systems, and on their commonalities, differences and gaps.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S; Ustun, Volkan
An Architectural Integration of Temporal Motivation Theory for Decision Making Proceedings Article
In: In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_architectural_2019,
title = {An Architectural Integration of Temporal Motivation Theory for Decision Making},
author = {Paul S Rosenbloom and Volkan Ustun},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_7.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {Temporal Motivation Theory (TMT) is incorporated into the Sigma cognitive architecture to explore the ability of this combination to yield human-like decision making. In conjunction with Lazy Reinforcement Learning (LRL), which provides the inputs required for this form of decision making, experiments are run on a simple reinforcement learning task, a preference reversal task, and an uncertain two-choice task.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sohail, Usman; Traum, David
A Blissymbolics Translation System Proceedings Article
In: Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies, pp. 32–36, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{sohail_blissymbolics_2019,
title = {A Blissymbolics Translation System},
author = {Usman Sohail and David Traum},
url = {http://aclweb.org/anthology/W19-1705},
doi = {10.18653/v1/W19-1705},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Blissymbolics (Bliss) is a pictographic writing system that is used by people with communication disorders. Bliss attempts to create a writing system that makes words easier to distinguish by using pictographic symbols that encapsulate meaning rather than sound, as the English alphabet does for example. Users of Bliss rely on human interpreters to use Bliss. We created a translation system from Bliss to natural English with the hopes of decreasing the reliance on human interpreters by the Bliss community. We first discuss the basic rules of Blissymbolics. Then we point out some of the challenges associated with developing computer assisted tools for Blissymbolics. Next we talk about our ongoing work in developing a translation system, including current limitations, and future work. We conclude with a set of examples showing the current capabilities of our translation system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Natsume, Ryota; Saito, Shunsuke; Huang, Zeng; Chen, Weikai; Ma, Chongyang; Li, Hao; Morishima, Shigeo
SiCloPe: Silhouette-Based Clothed People Proceedings Article
In: Proceedings of CVPR, pp. 11, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{natsume_siclope_2019,
title = {SiCloPe: Silhouette-Based Clothed People},
author = {Ryota Natsume and Shunsuke Saito and Zeng Huang and Weikai Chen and Chongyang Ma and Hao Li and Shigeo Morishima},
url = {http://openaccess.thecvf.com/content_CVPR_2019/html/Natsume_SiCloPe_Silhouette-Based_Clothed_People_CVPR_2019_paper.html},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of CVPR},
pages = {11},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {We introduce a new silhouette-based representation for modeling clothed human bodies using deep generative models. Our method can reconstruct a complete and textured 3D model of a person wearing clothes from a single input picture. Inspired by the visual hull algorithm, our implicit representation uses 2D silhouettes and 3D joints of a body pose to describe the immense shape complexity and variations of clothed people. Given a segmented 2D silhouette of a person and its inferred 3D joints from the input picture, we first synthesize consistent silhouettes from novel view points around the subject. The synthesized silhouettes which are the most consistent with the input segmentation are fed into a deep visual hull algorithm for robust 3D shape prediction. We then infer the texture of the subject’s back view using the frontal image and segmentation mask as input to a conditional generative adversarial network. Our experiments demonstrate that our silhouette-based model is an effective representation and the appearance of the back view can be predicted reliably using an image-to-image translation network. While classic methods based on parametric models often fail for single-view images of subjects with challenging clothing, our approach can still produce successful results, which are comparable to those obtained from multi-view input.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhou, Yi; Barnes, Connelly; Lu, Jingwan; Yang, Jimei; Li, Hao
On the Continuity of Rotation Representations in Neural Networks Proceedings Article
In: Proceedings of CVPR, pp. 9, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{zhou_continuity_2019,
title = {On the Continuity of Rotation Representations in Neural Networks},
author = {Yi Zhou and Connelly Barnes and Jingwan Lu and Jimei Yang and Hao Li},
url = {http://openaccess.thecvf.com/content_CVPR_2019/html/Zhou_On_the_Continuity_of_Rotation_Representations_in_Neural_Networks_CVPR_2019_paper.html},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of CVPR},
pages = {9},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {In neural networks, it is often desirable to work with various representations of the same space. For example, 3D rotations can be represented with quaternions or Euler angles. In this paper, we advance a definition of a continuous representation, which can be helpful for training deep neural networks. We relate this to topological concepts such as homeomorphism and embedding. We then investigate what are continuous and discontinuous representations for 2D, 3D, and n-dimensional rotations. We demonstrate that for 3D rotations, all representations are discontinuous in the real Euclidean spaces of four or fewer dimensions. Thus, widely used representations such as quaternions and Euler angles are discontinuous and difficult for neural networks to learn. We show that the 3D rotations have continuous representations in 5D and 6D, which are more suitable for learning. We also present continuous representations for the general case of the n dimensional rotation group SO(n). While our main focus is on rotations, we also show that our constructions apply to other groups such as the orthogonal group and similarity transforms. We finally present empirical results, which show that our continuous rotation representations outperform discontinuous ones for several practical problems in graphics and vision, including a simple autoencoder sanity test, a rotation estimator for 3D point clouds, and an inverse kinematics solver for 3D human poses.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Shree, Jaya; Liu, Emily; Gordon, Andrew; Hobbs, Jerry
Deep Natural Language Understanding of News Text Proceedings Article
In: Proceedings of the First Workshop on Narrative Understanding, pp. 19–27, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags: Narrative
@inproceedings{shree_deep_2019,
title = {Deep Natural Language Understanding of News Text},
author = {Jaya Shree and Emily Liu and Andrew Gordon and Jerry Hobbs},
url = {https://www.aclweb.org/anthology/papers/W/W19/W19-2403/},
doi = {10.18653/v1/W19-2403},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the First Workshop on Narrative Understanding},
pages = {19–27},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Early proposals for the deep understanding of natural language text advocated an approach of “interpretation as abduction,” where the meaning of a text was derived as an explanation that logically entailed the input words, given a knowledge base of lexical and commonsense axioms. While most subsequent NLP research has instead pursued statistical and data-driven methods, the approach of interpretation as abduction has seen steady advancements in both theory and software implementations. In this paper, we summarize advances in deriving the logical form of the text, encoding commonsense knowledge, and technologies for scalable abductive reasoning. We then explore the application of these advancements to the deep understanding of a paragraph of news text, where the subtle meaning of words and phrases are resolved by backward chaining on a knowledge base of 80 hand-authored axioms.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, Yale; Soleymani, Mohammad
Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{song_polysemous_2019,
title = {Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval},
author = {Yale Song and Mohammad Soleymani},
url = {https://arxiv.org/abs/1906.04402},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {10},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {Visual-semantic embedding aims to find a shared latent space where related visual and textual instances are close to each other. Most current methods learn injective embedding functions that map an instance to a single point in the shared space. Unfortunately, injective embedding cannot effectively handle polysemous instances with multiple possible meanings; at best, it would find an average representation of different meanings. This hinders its use in real-world scenarios where individual instances and their cross-modal associations are often ambiguous. In this work, we introduce Polysemous Instance Embedding Networks (PIE-Nets) that compute multiple and diverse representations of an instance by combining global context with locally-guided features via multi-head self-attention and residual learning. To learn visual-semantic embedding, we tie-up two PIE-Nets and optimize them jointly in the multiple instance learning framework. Most existing work on cross-modal retrieval focus on image-text pairs of data. Here, we also tackle a more challenging case of video-text retrieval. To facilitate further research in video-text retrieval, we release a new dataset of 50K video-sentence pairs collected from social media, dubbed MRW (my reaction when). We demonstrate our approach on both image-text and video-text retrieval scenarios using MS-COCO, TGIF, and our new MRW dataset.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Agarwal, Shruti; Farid, Hany; Gu, Yuming; He, Mingming; Nagano, Koki; Li, Hao
Protecting World Leaders Against Deep Fakes Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp. 8, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{agarwal_protecting_2019,
title = {Protecting World Leaders Against Deep Fakes},
author = {Shruti Agarwal and Hany Farid and Yuming Gu and Mingming He and Koki Nagano and Hao Li},
url = {http://openaccess.thecvf.com/content_CVPRW_2019/papers/Media%20Forensics/Agarwal_Protecting_World_Leaders_Against_Deep_Fakes_CVPRW_2019_paper.pdf},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
pages = {8},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {The creation of sophisticated fake videos has been largely relegated to Hollywood studios or state actors. Recent advances in deep learning, however, have made it significantly easier to create sophisticated and compelling fake videos. With relatively modest amounts of data and computing power, the average person can, for example, create a video of a world leader confessing to illegal activity leading to a constitutional crisis, a military leader saying something racially insensitive leading to civil unrest in an area of military activity, or a corporate titan claiming that their profits are weak leading to global stock manipulation. These so called deep fakes pose a significant threat to our democracy, national security, and society. To contend with this growing threat, we describe a forensic technique that models facial expressions and movements that typify an individual’s speaking pattern. Although not visually apparent, these correlations are often violated by the nature of how deep-fake videos are created and can, therefore, be used for authentication.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-ann
Can a Virtual Human Facilitate Language Learning in a Young Baby? Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gilani_can_2019,
title = {Can a Virtual Human Facilitate Language Learning in a Young Baby?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-ann Petitto},
url = {https://dl.acm.org/citation.cfm?id=3332035},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {There is a significant paucity of work on language learning systems for young infants [2, 5, 19] despite the widely understood critical importance that this developmental period has for healthy language and cognitive growth, and related reading and academic success [6, 14]. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period [18]. This causes potentially devastating impact on children's linguistic, cognitive, and social skills [9, 10, 15, 16, 20]. We introduced an AI system, called RAVE (Robot, AVatar, thermal Enhanced language learning tool), designed specifically for babies within the age range of 6-12 months [8, 17]. RAVE consists of two agents: a virtual human (provides language and socially contingent interactions) and an embodied robot (provides socially engaging physical cues to babies and directs babies' attention to the virtual human). Detailed description of the system's constituent components and dialogue algorithms are presented in [17] and [8].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Core, Mark G; Nye, Benjamin D; Karumbaiah, Shamya; Auerbach, Daniel; Ram, Maya
Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 9, IFAAMAS, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{georgila_using_2019,
title = {Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training},
author = {Kallirroi Georgila and Mark G Core and Benjamin D Nye and Shamya Karumbaiah and Daniel Auerbach and Maya Ram},
url = {http://www.ifaamas.org/Proceedings/aamas2019/pdfs/p737.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {9},
publisher = {IFAAMAS},
address = {Montreal, Canada},
abstract = {Reinforcement Learning (RL) has been applied successfully to Intelligent Tutoring Systems (ITSs) in a limited set of well-defined domains such as mathematics and physics. This work is unique in using a large state space and for applying RL to tutoring interpersonal skills. Interpersonal skills are increasingly recognized as critical to both social and economic development. In particular, this work enhances an ITS designed to teach basic counseling skills that can be applied to challenging issues such as sexual harassment and workplace conflict. An initial data collection was used to train RL policies for the ITS, and an evaluation with human participants compared a hand-crafted ITS which had been used for years with students (control) versus the new ITS guided by RL policies. The RL condition differed from the control condition most notably in the strikingly large quantity of guidance it provided to learners. Both systems were effective and there was an overall significant increase from pre- to post-test scores. Although learning gains did not differ significantly between conditions, learners had a significantly higher self-rating of confidence in the RL condition. Confidence and learning gains were both part of the reward function used to train the RL policies, and it could be the case that there was the most room for improvement in confidence, an important learner emotion. Thus, RL was successful in improving an ITS for teaching interpersonal skills without the need to prune the state space (as previously done).},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Koeman, Vincent J; Hindriks, Koen V; Gratch, Jonathan; Jonker, Catholijn M
Recognising and Explaining Bidding Strategies in Negotiation Support Systems Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 3, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{koeman_recognising_2019,
title = {Recognising and Explaining Bidding Strategies in Negotiation Support Systems},
author = {Vincent J Koeman and Koen V Hindriks and Jonathan Gratch and Catholijn M Jonker},
url = {https://dl.acm.org/citation.cfm?id=3332011},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {3},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {To improve a negotiator's ability to recognise bidding strategies, we pro-actively provide explanations that are based on the opponent's bids and the negotiator's guesses about the opponent's strategy. We introduce an aberration detection mechanism for recognising strategies and the notion of an explanation matrix. The aberration detection mechanism identifies when a bid falls outside the range of expected behaviour for a specific strategy. The explanation matrix is used to decide when to provide what explanations. We evaluated our work experimentally in a task in which participants are asked to identify their opponent's strategy in the environment of a negotiation support system, namely the Pocket Negotiator (PN). We implemented our explanation mechanism in the PN and experimented with different explanation matrices. As the number of correct guesses increases with explanations, indirectly, these experiments show the effectiveness of our aberration detection mechanism. Our experiments with over 100 participants show that suggesting consistent strategies is more effective than explaining why observed behaviour is inconsistent.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert S.
Clinical virtual reality in mental health and rehabilitation: a brief review of the future! Proceedings Article
In: Fulop, Gabor F.; Hanson, Charles M.; Andresen, Bjørn F. (Ed.): Infrared Technology and Applications XLV, pp. 51–74, SPIE, Baltimore, United States, 2019, ISBN: 978-1-5106-2669-0 978-1-5106-2670-6.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{rizzo_clinical_2019,
title = {Clinical virtual reality in mental health and rehabilitation: a brief review of the future!},
author = {Albert S. Rizzo},
editor = {Gabor F. Fulop and Charles M. Hanson and Bjørn F. Andresen},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11002/2524302/Clinical-virtual-reality-in-mental-health-and-rehabilitation–a/10.1117/12.2524302.full},
doi = {10.1117/12.2524302},
isbn = {978-1-5106-2669-0 978-1-5106-2670-6},
year = {2019},
date = {2019-05-01},
booktitle = {Infrared Technology and Applications XLV},
volume = {37},
number = {1},
pages = {51--74},
publisher = {SPIE},
address = {Baltimore, United States},
abstract = {The paper details some of the history of Clinical Virtual Reality (VR) as it has evolved over the last 25 years and provides a brief overview of the key scientific findings for making a judgment regarding its value in the areas of mental health and rehabilitation. This write-up is designed be a companion piece to my SPIE keynote on the topic of, “Is Clinical Virtual Reality Ready for Primetime?” As such, the paper is packed with citations to key scientific research in this area that should provide readers who are interested in this topic with a roadmap for further exploration of the literature. After presenting a brief history of the area, a discussion follows as to the theory, research, and pragmatic issues that support the view that this VR use case is theoretically informed, has a large and convincing scientific literature to support its clinical application, and that recent technology advances and concomitant cost reductions have made clinical implementation feasible and pragmatically supported. The paper concludes with the perspective that Clinical VR applications will soon become indispensable tools in the toolbox of psychological researchers and practitioners and will only grow in relevance and popularity in the future.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Proceedings Article
In: Proceedings of IWSDS 2019, pp. 12, Siracusa, Italy, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gervits_classication-based_2019,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://www.semanticscholar.org/paper/A-Classification-Based-Approach-to-Automating-Gervits-Leuski/262cf9e3a14e370d46a5e65f7872b32482d9ea69?tab=abstract&citingPapersSort=is-influential&citingPapersLimit=10&citingPapersOffset=0&year%5B0%5D=&year%5B1%5D=&citedPapersSort=is-influential&citedPapersLimit=10&citedPapersOffset=10},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of IWSDS 2019},
pages = {12},
address = {Siracusa, Italy},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Panlener, William; Krum, David M; Jones, J Adam
Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality Proceedings Article
In: Proceedings of the IEEE Southeast Conference 2019, pp. 7, IEEE, Huntsville, AL, 2019.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{panlener_effects_2019,
title = {Effects of Horizontal Field of View Extension on Spatial Judgments in Virtual Reality},
author = {William Panlener and David M Krum and J Adam Jones},
url = {https://www.researchgate.net/publication/332448571_Effects_of_Horizontal_Field_of_View_Extension_on_Spatial_Judgments_in_Virtual_Reality},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of the IEEE Southeast Conference 2019},
pages = {7},
publisher = {IEEE},
address = {Huntsville, AL},
abstract = {It is known that observers tend to misperceive distances during spatial judgment tasks in virtual reality. Virtual environments restrict field of view as compared to real environments. We explore whether horizontal field of view restriction affects real or perceived ocular convergence. We also explore effects that the size and symmetry of field of view may have. We find that convergence is not impacted by altering field of view, but the subjective median plane is affected. We also find that distance is better estimated in wider fields of view, and that lateral bias in estimation is correlated to the symmetry of the field.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Phelps, Christi L; Stassi, Frederica J
Pedagogical Tools to Enhance Analytic Skills: Interactive Virtual Tutorial Environments Proceedings Article
In: Proceedings of MODSIM World 2019, pp. 12, Norfolk, VA, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences
@inproceedings{davis_pedagogical_2019,
title = {Pedagogical Tools to Enhance Analytic Skills: Interactive Virtual Tutorial Environments},
author = {Dan M Davis and Christi L Phelps and Frederica J Stassi},
url = {http://www.modsimworld.org/conference-papers/2019},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of MODSIM World 2019},
pages = {12},
address = {Norfolk, VA},
abstract = {This paper examines the use of literature studies to enhance communication and critical thinking skills in technical students through the application of emerging Virtual Reality (VR) technologies to enable that pedagogical approach. The current state of analytic skills among students in Science, Technology, Engineering and Mathematics (STEM) tracks are outlined, focusing on the critical years in secondary schools. Their prospective needs as they advance into tertiary education and the needs of the technical community for improvement are presented. The requirements flowing from that analysis will be discussed in the light of programs implemented at the Sato Academy, with reports of both successes and missteps. In some detail, the use of the study of literature is described and discussed. The authors present their case for constructivist and Socratic approaches to fully engage and effectively inculcate communication proficiency, including conformance with standards, e.g. Next Generation Science Standards (NGSS). These results are then compared to the demands of college and professional leaders who are currently being burdened with having to provide disruptive remedial efforts. The methods found to be successful are considered, both in terms of their application and their extensibility to other fields. Also highlighted will be areas in which time and personnel constraints hindered achievement. A number of possible responses to these impediments will be presented, evaluating the feasibility of each. The paper will then focus on the advances in virtual humans and conversational avatars. Recent research into using large libraries of video-clips to create engaging on-line virtual tutorial conversations will be presented. Data as to the receptivity of students to conversing with computer-generated interlocutors is presented, along with a discussion as to how this technology is applicable to teaching the analysis of literature. The benefits of and the barriers to virtual tutorial environments are outlined and analyzed.},
keywords = {Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Young, Nancy L H; Davis, Mark C; Carolina, North
Enhancements for Homeschooling and ADL: Virtual Humans, Technologies and Insights Proceedings Article
In: Proceedings of MODSIM World 2019, pp. 12, Norfolk, VA, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences
@inproceedings{davis_enhancements_2019,
title = {Enhancements for Homeschooling and ADL: Virtual Humans, Technologies and Insights},
author = {Dan M Davis and Nancy L H Young and Mark C Davis and North Carolina},
url = {http://www.modsimworld.org/conference-papers/2019},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of MODSIM World 2019},
pages = {12},
address = {Norfolk, VA},
abstract = {Homeschooling and DoD Advanced Distributed Learning (ADL) have many goals in common, so increasing the collaborative research and collegial information exchange between their respective communities would be mutually advantageous. The emerging capabilities of virtual humans provide a useful prototype of how both homeschooling and ADL can benefit from emerging technological advances. This paper begins with an examination of the home schooling movement in the United States, including a review of its foundations, demographics, results and trends. In examining the goals of homeschooling parents, the four major reasons cited by at least half of those parents are considered and explicated: desire to find environment most compatible to users, provision of ethics foundations, inclusion of accountability instruction and dissatisfaction with other pedagogical approaches. Also meriting review are the hurdles faced by homeschool teachers and students, followed by an item-by-item comparison with analogous challenges for ADL provisioners and learners. A short analysis of the constraints on the two communities focuses on similarities and differences between family limitations and defense organization restrictions. The authors then present data on the current scope, instantiations, and achievements of the two efforts. Many of the technologies currently in use are reviewed and discussed, concentrating on computer-aided education and distributed learning. Emerging technologies based on artificial intelligence, natural language processing, and virtual humans are described and considered. Their uses in various contexts provide sufficient data to quantify the impact on subjects and the authors adduce findings from research to support their thesis that increased use of these technologies would be beneficial both to homeschooled students and to DoD Learners. The paper closes with an evaluation of the arc of current research, the recognition of prenascent capabilities (e.g. quantum computing), the burgeoning needs of both communities, and the need to nurture a synergistic exchange between homeschool advocates and ADL architects.},
keywords = {Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Dennison, Mark S.; Krum, David M.
Unifying Research to Address Motion Sickness Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 1858–1859, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72811-377-7.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC
@inproceedings{dennison_unifying_2019,
title = {Unifying Research to Address Motion Sickness},
author = {Mark S. Dennison and David M. Krum},
url = {https://ieeexplore.ieee.org/document/8798297/},
doi = {10.1109/VR.2019.8798297},
isbn = {978-1-72811-377-7},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {1858–1859},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Be it discussed as cybersickness, immersive sickness, simulator sickness, or virtual reality sickness, the ill effects of visuo-vestibular mismatch in immersive environments are of great concern for the wider adoption of virtual reality and related technologies. In this position paper, we discuss a unified research approach that may address motion sickness and identify critical research topics.},
keywords = {ARL, DoD, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
Abstract | Links | BibTeX | Tags: DoD, Graphics, MedVR, UARC, Virtual Humans
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417–425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {DoD, Graphics, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sinhwa; Chanenson, Jake; Cowal, Peter; Weaver, Madeleine
Advancing Ethical Decision Making in Virtual Reality Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 2, IEEE, Osaka, Japan, 2019.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{kang_advancing_2019,
title = {Advancing Ethical Decision Making in Virtual Reality},
author = {Sinhwa Kang and Jake Chanenson and Peter Cowal and Madeleine Weaver},
url = {https://ieeexplore.ieee.org/document/8798151},
doi = {10.1109/VR.2019.8798151},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {2},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Virtual reality (VR) has been widely utilized for training and education purposes because of pedagogical, safety, and economic benefits. The investigation of moral judgment is a particularly interesting VR application, related to training. For this study, we designed a withinsubject experiment manipulating the role of study participants in a Trolley Dilemma scenario: either victim or driver. We conducted a pilot study with four participants and describe preliminary results and implications in this poster.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bernardet, Ulysses; Kanq, Sin-Hwa; Feng, Andrew; DiPaola, Steve; Shapiro, Ari
Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study Proceedings Article
In: 2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE), pp. 1–9, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72813-219-8.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{bernardet_speech_2019,
title = {Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study},
author = {Ulysses Bernardet and Sin-Hwa Kanq and Andrew Feng and Steve DiPaola and Ari Shapiro},
url = {https://ieeexplore.ieee.org/document/8714737/},
doi = {10.1109/VHCIE.2019.8714737},
isbn = {978-1-72813-219-8},
year = {2019},
date = {2019-03-01},
booktitle = {2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)},
pages = {1–9},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Human speech production requires the dynamic regulation of air through the vocal system. While virtual character systems commonly are capable of speech output, they rarely take breathing during speaking – speech breathing – into account. We believe that integrating dynamic speech breathing systems in virtual characters can significantly contribute to augmenting their realism. Here, we present a novel control architecture aimed at generating speech breathing in virtual characters. This architecture is informed by behavioral, linguistic and anatomical knowledge of human speech breathing. Based on textual input and controlled by a set of lowand high-level parameters, the system produces dynamic signals in real-time that control the virtual character’s anatomy (thorax, abdomen, head, nostrils, and mouth) and sound production (speech and breathing).},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}