Publications
Search
Visser, Ewart J.; Topoglu, Yigit; Joshi, Shawn; Krueger, Frank; Phillips, Elizabeth; Gratch, Jonathan; Tossell, Chad C.; Ayaz, Hasan
Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance Journal Article
In: Sensors, vol. 22, no. 3, pp. 1287, 2022, ISSN: 1424-8220.
@article{de_visser_designing_2022,
title = {Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance},
author = {Ewart J. Visser and Yigit Topoglu and Shawn Joshi and Frank Krueger and Elizabeth Phillips and Jonathan Gratch and Chad C. Tossell and Hasan Ayaz},
url = {https://www.mdpi.com/1424-8220/22/3/1287},
doi = {10.3390/s22031287},
issn = {1424-8220},
year = {2022},
date = {2022-02-01},
urldate = {2022-09-28},
journal = {Sensors},
volume = {22},
number = {3},
pages = {1287},
abstract = {To understand how to improve interactions with dog-like robots, we evaluated the importance of “dog-like” framing and physical appearance on interaction, hypothesizing multiple interactive benefits of each. We assessed whether framing Aibo as a puppy (i.e., in need of development) versus simply a robot would result in more positive responses and interactions. We also predicted that adding fur to Aibo would make it appear more dog-like, likable, and interactive. Twenty-nine participants engaged with Aibo in a 2 × 2 (framing × appearance) design by issuing commands to the robot. Aibo and participant behaviors were monitored per second, and evaluated via an analysis of commands issued, an analysis of command blocks (i.e., chains of commands), and using a T-pattern analysis of participant behavior. Participants were more likely to issue the “Come Here” command than other types of commands. When framed as a puppy, participants used Aibo’s dog name more often, praised it more, and exhibited more unique, interactive, and complex behavior with Aibo. Participants exhibited the most smiling and laughing behaviors with Aibo framed as a puppy without fur. Across conditions, after interacting with Aibo, participants felt Aibo was more trustworthy, intelligent, warm, and connected than at their initial meeting. This study shows the benefits of introducing a socially robotic agent with a particular frame and importance on realism (i.e., introducing the robot dog as a puppy) for more interactive engagement.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: 2022.
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhou, Jincheng; Ustun, Volkan
PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture Book Section
In: Goertzel, Ben; Iklé, Matthew; Potapov, Alexey (Ed.): Artificial General Intelligence, vol. 13154, pp. 355–366, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93757-7 978-3-030-93758-4.
@incollection{zhou_pysigma_2022,
title = {PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture},
author = {Jincheng Zhou and Volkan Ustun},
editor = {Ben Goertzel and Matthew Iklé and Alexey Potapov},
url = {https://link.springer.com/10.1007/978-3-030-93758-4_36},
doi = {10.1007/978-3-030-93758-4_36},
isbn = {978-3-030-93757-7 978-3-030-93758-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-21},
booktitle = {Artificial General Intelligence},
volume = {13154},
pages = {355–366},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163–167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic Journal Article
In: ASME Journal of Engineering for Sustainable Buildings and Cities, vol. 2, no. 4, pp. 041001, 2021, ISSN: 2642-6641, 2642-6625.
@article{awada_associations_2021,
title = {Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://asmedigitalcollection.asme.org/sustainablebuildings/article/2/4/041001/1122847/Associations-Among-Home-Indoor-Environmental},
doi = {10.1115/1.4052822},
issn = {2642-6641, 2642-6625},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-26},
journal = {ASME Journal of Engineering for Sustainable Buildings and Cities},
volume = {2},
number = {4},
pages = {041001},
abstract = {Abstract The outbreak of SARS-CoV-2 virus forced office workers to conduct their daily work activities from home over an extended period. Given this unique situation, an opportunity emerged to study the satisfaction of office workers with indoor environmental quality (IEQ) factors of their houses where work activities took place and associate these factors with mental and physical health. We designed and administered a questionnaire that was open for 45 days during the COVID-19 pandemic and received valid data from 988 respondents. The results show that low satisfaction with natural lighting, glare, and humidity predicted eye-related symptoms, while low satisfaction with noise was a strong predictor of fatigue or tiredness, headaches or migraines, anxiety, and depression or sadness. Nose- and throat-related symptoms and skin-related symptoms were only uniquely predicted by low satisfaction with humidity. Low satisfaction with glare uniquely predicted an increase in musculoskeletal discomfort. Symptoms related to mental stress, rumination, or worry were predicted by low satisfaction with air quality and noise. Finally, low satisfaction with noise and indoor temperature predicted the prevalence of symptoms related to trouble concentrating, maintaining attention, or focus. Workers with higher income were more satisfied with humidity, air quality, and indoor temperature and had better overall mental health. Older individuals had increased satisfaction with natural lighting, humidity, air quality, noise, and indoor temperature. Findings from this study can inform future design practices that focus on hybrid home-work environments by highlighting the impact of IEQ factors on occupant well-being.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804–3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation Journal Article
In: Advanced Engineering Informatics, vol. 50, pp. 101431, 2021, ISSN: 14740346.
@article{adami_effectiveness_2021,
title = {Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S147403462100183X},
doi = {10.1016/j.aei.2021.101431},
issn = {14740346},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101431},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790–14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert “Skip”; Goodwin, Grace J.; Vito, Alyssa N. De; Bell, Joshua D.
Recent advances in virtual reality and psychology: Introduction to the special issue. Journal Article
In: Translational Issues in Psychological Science, vol. 7, no. 3, pp. 213–217, 2021, ISSN: 2332-2179, 2332-2136.
@article{rizzo_recent_2021,
title = {Recent advances in virtual reality and psychology: Introduction to the special issue.},
author = {Albert “Skip” Rizzo and Grace J. Goodwin and Alyssa N. De Vito and Joshua D. Bell},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/tps0000316},
doi = {10.1037/tps0000316},
issn = {2332-2179, 2332-2136},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-13},
journal = {Translational Issues in Psychological Science},
volume = {7},
number = {3},
pages = {213–217},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139–144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Wang, Timothy S.
Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates Book Section
In: Mitchell, Alex; Vosmeer, Mirjam (Ed.): Interactive Storytelling, vol. 13138, pp. 71–79, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-92299-3 978-3-030-92300-6.
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{gordon_narrative_2021,
title = {Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates},
author = {Andrew S. Gordon and Timothy S. Wang},
editor = {Alex Mitchell and Mirjam Vosmeer},
url = {https://link.springer.com/10.1007/978-3-030-92300-6_7},
doi = {10.1007/978-3-030-92300-6_7},
isbn = {978-3-030-92299-3 978-3-030-92300-6},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-22},
booktitle = {Interactive Storytelling},
volume = {13138},
pages = {71–79},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Feng, Andrew; Hou, Yu; McCullough, Kyle; Prasad, Pratusha Bhuvana; Soibelman, Lucio
Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach Journal Article
In: 2021.
Abstract | Links | BibTeX | Tags: DTIC, Simulation, UARC
@article{chen_ground_2021,
title = {Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach},
author = {Meida Chen and Andrew Feng and Yu Hou and Kyle McCullough and Pratusha Bhuvana Prasad and Lucio Soibelman},
url = {https://arxiv.org/abs/2109.12221},
doi = {10.48550/ARXIV.2109.12221},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-27},
abstract = {In recent years, photogrammetry has been widely used in many areas to create photorealistic 3D virtual data representing the physical environment. The innovation of small unmanned aerial vehicles (sUAVs) has provided additional high-resolution imaging capabilities with low cost for mapping a relatively large area of interest. These cutting-edge technologies have caught the US Army and Navy's attention for the purpose of rapid 3D battlefield reconstruction, virtual training, and simulations. Our previous works have demonstrated the importance of information extraction from the derived photogrammetric data to create semantic-rich virtual environments (Chen et al., 2019). For example, an increase of simulation realism and fidelity was achieved by segmenting and replacing photogrammetric trees with game-ready tree models. In this work, we further investigated the semantic information extraction problem and focused on the ground material segmentation and object detection tasks. The main innovation of this work was that we leveraged both the original 2D images and the derived 3D photogrammetric data to overcome the challenges faced when using each individual data source. For ground material segmentation, we utilized an existing convolutional neural network architecture (i.e., 3DMV) which was originally designed for segmenting RGB-D sensed indoor data. We improved its performance for outdoor photogrammetric data by introducing a depth pooling layer in the architecture to take into consideration the distance between the source images and the reconstructed terrain model. To test the performance of our improved 3DMV, a ground truth ground material database was created using data from the One World Terrain (OWT) data repository. Finally, a workflow for importing the segmented ground materials into a virtual simulation scene was introduced, and visual results are reported in this paper.},
keywords = {DTIC, Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Book Section
In: Marchi, Erik; Siniscalchi, Sabato Marco; Cumani, Sandro; Salerno, Valerio Mario; Li, Haizhou (Ed.): Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems, pp. 115–127, Springer, Singapore, 2021, ISBN: 9789811593239.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@incollection{gervits_classification-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
editor = {Erik Marchi and Sabato Marco Siniscalchi and Sandro Cumani and Valerio Mario Salerno and Haizhou Li},
url = {https://doi.org/10.1007/978-981-15-9323-9_10},
doi = {10.1007/978-981-15-9323-9_10},
isbn = {9789811593239},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems},
pages = {115–127},
publisher = {Springer},
address = {Singapore},
series = {Lecture Notes in Electrical Engineering},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {incollection}
}
He, Zihao; Tavabi, Leili; Lerman, Kristina; Soleymani, Mohammad
Speaker Turn Modeling for Dialogue Act Classification Proceedings Article
In: Findings of the Association for Computational Linguistics: EMNLP 2021, pp. 2150–2157, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{he_speaker_2021,
title = {Speaker Turn Modeling for Dialogue Act Classification},
author = {Zihao He and Leili Tavabi and Kristina Lerman and Mohammad Soleymani},
url = {https://aclanthology.org/2021.findings-emnlp.185},
doi = {10.18653/v1/2021.findings-emnlp.185},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2021},
pages = {2150–2157},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Cheng, Junyan; Fostiropoulos, Iordanis; Boehm, Barry; Soleymani, Mohammad
Multimodal Phased Transformer for Sentiment Analysis Proceedings Article
In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2447–2458, Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{cheng_multimodal_2021,
title = {Multimodal Phased Transformer for Sentiment Analysis},
author = {Junyan Cheng and Iordanis Fostiropoulos and Barry Boehm and Mohammad Soleymani},
url = {https://aclanthology.org/2021.emnlp-main.189},
doi = {10.18653/v1/2021.emnlp-main.189},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
pages = {2447–2458},
publisher = {Association for Computational Linguistics},
address = {Online and Punta Cana, Dominican Republic},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Bennett, Winston “Wink”; Kelsey, Elaine; Nye, Benjamin
Attention and Engagement in Virtual Environments: Measuring the Unobservable Proceedings Article
In: 2021.
Links | BibTeX | Tags: AR, DTIC, Machine Learning, UARC, VR
@inproceedings{bell_attention_2021,
title = {Attention and Engagement in Virtual Environments: Measuring the Unobservable},
author = {Benjamin Bell and Winston “Wink” Bennett and Elaine Kelsey and Benjamin Nye},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=95758&CID=862#View},
year = {2021},
date = {2021-01-01},
keywords = {AR, DTIC, Machine Learning, UARC, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Proceedings Article
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, DTIC, Natural Language, Virtual Humans
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21–29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {ARL, Dialogue, DTIC, Natural Language, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Zhu, Runhe; Becerik-Gerber, Burcin; Lucas, Gale; Southers, Erroll
An integrated emotional and physiological assessment for VR-based active shooter incident experiments Journal Article
In: Advanced Engineering Informatics, vol. 47, pp. 101227, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, VR
@article{awada_integrated_2021,
title = {An integrated emotional and physiological assessment for VR-based active shooter incident experiments},
author = {Mohamad Awada and Runhe Zhu and Burcin Becerik-Gerber and Gale Lucas and Erroll Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034620301968},
doi = {10.1016/j.aei.2020.101227},
issn = {14740346},
year = {2021},
date = {2021-01-01},
urldate = {2022-10-24},
journal = {Advanced Engineering Informatics},
volume = {47},
pages = {101227},
keywords = {DTIC, VR},
pubstate = {published},
tppubtype = {article}
}
0000
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}