Publications
Search
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Inproceedings
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902--1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Patrick B.; Xiao, Yijing; Fukumura, Yoko E.; Awada, Mohamad; Aryal, Ashrant; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Ergonomic assessment of office worker postures using 3D automated joint angle assessment Journal Article
In: Advanced Engineering Informatics, vol. 52, pp. 101596, 2022, ISSN: 14740346.
@article{rodrigues_ergonomic_2022,
title = {Ergonomic assessment of office worker postures using 3D automated joint angle assessment},
author = {Patrick B. Rodrigues and Yijing Xiao and Yoko E. Fukumura and Mohamad Awada and Ashrant Aryal and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034622000672},
doi = {10.1016/j.aei.2022.101596},
issn = {14740346},
year = {2022},
date = {2022-04-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {52},
pages = {101596},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Fujiwara, Ken; Hoegen, Rens; Gratch, Jonathan; Dunbar, Norah E.
Synchrony facilitates altruistic decision making for non-human avatars Journal Article
In: Computers in Human Behavior, vol. 128, pp. 107079, 2022, ISSN: 07475632.
@article{fujiwara_synchrony_2022,
title = {Synchrony facilitates altruistic decision making for non-human avatars},
author = {Ken Fujiwara and Rens Hoegen and Jonathan Gratch and Norah E. Dunbar},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0747563221004027},
doi = {10.1016/j.chb.2021.107079},
issn = {07475632},
year = {2022},
date = {2022-03-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {128},
pages = {107079},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Visser, Ewart J.; Topoglu, Yigit; Joshi, Shawn; Krueger, Frank; Phillips, Elizabeth; Gratch, Jonathan; Tossell, Chad C.; Ayaz, Hasan
Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance Journal Article
In: Sensors, vol. 22, no. 3, pp. 1287, 2022, ISSN: 1424-8220.
@article{de_visser_designing_2022,
title = {Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance},
author = {Ewart J. Visser and Yigit Topoglu and Shawn Joshi and Frank Krueger and Elizabeth Phillips and Jonathan Gratch and Chad C. Tossell and Hasan Ayaz},
url = {https://www.mdpi.com/1424-8220/22/3/1287},
doi = {10.3390/s22031287},
issn = {1424-8220},
year = {2022},
date = {2022-02-01},
urldate = {2022-09-28},
journal = {Sensors},
volume = {22},
number = {3},
pages = {1287},
abstract = {To understand how to improve interactions with dog-like robots, we evaluated the importance of “dog-like” framing and physical appearance on interaction, hypothesizing multiple interactive benefits of each. We assessed whether framing Aibo as a puppy (i.e., in need of development) versus simply a robot would result in more positive responses and interactions. We also predicted that adding fur to Aibo would make it appear more dog-like, likable, and interactive. Twenty-nine participants engaged with Aibo in a 2 × 2 (framing × appearance) design by issuing commands to the robot. Aibo and participant behaviors were monitored per second, and evaluated via an analysis of commands issued, an analysis of command blocks (i.e., chains of commands), and using a T-pattern analysis of participant behavior. Participants were more likely to issue the “Come Here” command than other types of commands. When framed as a puppy, participants used Aibo’s dog name more often, praised it more, and exhibited more unique, interactive, and complex behavior with Aibo. Participants exhibited the most smiling and laughing behaviors with Aibo framed as a puppy without fur. Across conditions, after interacting with Aibo, participants felt Aibo was more trustworthy, intelligent, warm, and connected than at their initial meeting. This study shows the benefits of introducing a socially robotic agent with a particular frame and importance on realism (i.e., introducing the robot dog as a puppy) for more interactive engagement.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Espy-Wilson, Carol; Ward, Nigel G.; Alwan, Abeer; Artzi, Yoav; Bansal, Mohit; Blankenship, Gil; Chai, Joyce; Daumé, Hal; Dey, Debadeepta; Harper, Mary; Howard, Thomas; Kennington, Casey; Kruijff-Korbayová, Ivana; Manocha, Dinesh; Matuszek, Cynthia; Mead, Ross; Mooney, Raymond; Moore, Roger K.; Ostendorf, Mari; Pon-Barry, Heather; Rudnicky, Alexander I.; Scheutz, Matthias; Amant, Robert St.; Sun, Tong; Tellex, Stefanie; Traum, David; Yu, Zhou
Spoken language interaction with robots: Recommendations for future research Journal Article
In: Computer Speech & Language, vol. 71, pp. 101255, 2022, ISSN: 08852308.
@article{marge_spoken_2022,
title = {Spoken language interaction with robots: Recommendations for future research},
author = {Matthew Marge and Carol Espy-Wilson and Nigel G. Ward and Abeer Alwan and Yoav Artzi and Mohit Bansal and Gil Blankenship and Joyce Chai and Hal Daumé and Debadeepta Dey and Mary Harper and Thomas Howard and Casey Kennington and Ivana Kruijff-Korbayová and Dinesh Manocha and Cynthia Matuszek and Ross Mead and Raymond Mooney and Roger K. Moore and Mari Ostendorf and Heather Pon-Barry and Alexander I. Rudnicky and Matthias Scheutz and Robert St. Amant and Tong Sun and Stefanie Tellex and David Traum and Zhou Yu},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0885230821000620},
doi = {10.1016/j.csl.2021.101255},
issn = {08852308},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-23},
journal = {Computer Speech & Language},
volume = {71},
pages = {101255},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Inproceedings
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661--674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baarslag, Tim; Kaisers, Michael; Gerding, Enrico H.; Jonker, Catholijn M.; Gratch, Jonathan
In: Karagözoğlu, Emin; Hyndman, Kyle B. (Ed.): Bargaining, pp. 387–406, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-76665-8 978-3-030-76666-5.
@incollection{baarslag_self-sufficient_2022,
title = {Self-sufficient, Self-directed, and Interdependent Negotiation Systems: A Roadmap Toward Autonomous Negotiation Agents},
author = {Tim Baarslag and Michael Kaisers and Enrico H. Gerding and Catholijn M. Jonker and Jonathan Gratch},
editor = {Emin Karagözoğlu and Kyle B. Hyndman},
url = {https://link.springer.com/10.1007/978-3-030-76666-5_18},
doi = {10.1007/978-3-030-76666-5_18},
isbn = {978-3-030-76665-8 978-3-030-76666-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-27},
booktitle = {Bargaining},
pages = {387--406},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hou, Yu; Chen, Meida; Volk, Rebekka; Soibelman, Lucio
In: Journal of Building Engineering, vol. 45, pp. 103380, 2022, ISSN: 23527102.
@article{hou_investigation_2022,
title = {Investigation on performance of RGB point cloud and thermal information data fusion for 3D building thermal map modeling using aerial images under different experimental conditions},
author = {Yu Hou and Meida Chen and Rebekka Volk and Lucio Soibelman},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352710221012389},
doi = {10.1016/j.jobe.2021.103380},
issn = {23527102},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
journal = {Journal of Building Engineering},
volume = {45},
pages = {103380},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Statistical Methods for Annotation Analysis Book
Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03753-5 978-3-031-03763-4.
@book{paun_statistical_2022,
title = {Statistical Methods for Annotation Analysis},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://link.springer.com/10.1007/978-3-031-03763-4},
doi = {10.1007/978-3-031-03763-4},
isbn = {978-3-031-03753-5 978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Aster, Hans-Christoph; Romanos, Marcel; Walitza, Susanne; Gerlach, Manfred; Mühlberger, Andreas; Rizzo, Albert; Andreatta, Marta; Hasenauer, Natalie; Hartrampf, Philipp E.; Nerlich, Kai; Reiners, Christoph; Lorenz, Reinhard; Buck, Andreas K.; Deserno, Lorenz
In: Frontiers in Psychiatry, vol. 13, 2022, ISSN: 1664-0640.
@article{aster_responsivity_2022,
title = {Responsivity of the Striatal Dopamine System to Methylphenidate—A Within-Subject I-123-β-CIT-SPECT Study in Male Children and Adolescents With Attention-Deficit/Hyperactivity Disorder},
author = {Hans-Christoph Aster and Marcel Romanos and Susanne Walitza and Manfred Gerlach and Andreas Mühlberger and Albert Rizzo and Marta Andreatta and Natalie Hasenauer and Philipp E. Hartrampf and Kai Nerlich and Christoph Reiners and Reinhard Lorenz and Andreas K. Buck and Lorenz Deserno},
url = {https://www.frontiersin.org/articles/10.3389/fpsyt.2022.804730},
issn = {1664-0640},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Psychiatry},
volume = {13},
abstract = {Background:Methylphenidate (MPH) is the first-line pharmacological treatment of attention-deficit/hyperactivity disorder (ADHD). MPH binds to the dopamine (DA) transporter (DAT), which has high density in the striatum. Assessments of the striatal dopamine transporter by single positron emission computed tomography (SPECT) in childhood and adolescent patients are rare but can provide insight on how the effects of MPH affect DAT availability. The aim of our within-subject study was to investigate the effect of MPH on DAT availability and how responsivity to MPH in DAT availability is linked to clinical symptoms and cognitive functioning.MethodsThirteen adolescent male patients (9–16 years) with a diagnosis of ADHD according to the DSM-IV and long-term stimulant medication (for at least 6 months) with MPH were assessed twice within 7 days using SPECT after application of I-123-β-CIT to examine DAT binding potential (DAT BP). SPECT measures took place in an on- and off-MPH status balanced for order across participants. A virtual reality continuous performance test was performed at each time point. Further clinical symptoms were assessed for baseline off-MPH.ResultsOn-MPH status was associated with a highly significant change (−29.9%) of striatal DAT BP as compared to off-MPH (t = −4.12},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Inproceedings
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163--167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stokes, Jared D.; Rizzo, Albert; Geng, Joy J.; Schweitzer, Julie B.
Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking Journal Article
In: Frontiers in Virtual Reality, vol. 3, 2022, ISSN: 2673-4192.
@article{stokes_measuring_2022,
title = {Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking},
author = {Jared D. Stokes and Albert Rizzo and Joy J. Geng and Julie B. Schweitzer},
url = {https://www.frontiersin.org/articles/10.3389/frvir.2022.855895},
issn = {2673-4192},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Virtual Reality},
volume = {3},
abstract = {Objective: Distractions inordinately impair attention in children with Attention-Deficit Hyperactivity Disorder (ADHD) but examining this behavior under real-life conditions poses a challenge for researchers and clinicians. Virtual reality (VR) technologies may mitigate the limitations of traditional laboratory methods by providing a more ecologically relevant experience. The use of eye-tracking measures to assess attentional functioning in a VR context in ADHD is novel. In this proof of principle project, we evaluate the temporal dynamics of distraction via eye-tracking measures in a VR classroom setting with 20 children diagnosed with ADHD between 8 and 12 years of age.Method: We recorded continuous eye movements while participants performed math, Stroop, and continuous performance test (CPT) tasks with a series of “real-world” classroom distractors presented. We analyzed the impact of the distractors on rates of on-task performance and on-task, eye-gaze (i.e., looking at a classroom whiteboard) versus off-task eye-gaze (i.e., looking away from the whiteboard).Results: We found that while children did not always look at distractors themselves for long periods of time, the presence of a distractor disrupted on-task gaze at task-relevant whiteboard stimuli and lowered rates of task performance. This suggests that children with attention deficits may have a hard time returning to tasks once those tasks are interrupted, even if the distractor itself does not hold attention. Eye-tracking measures within the VR context can reveal rich information about attentional disruption.Conclusions: Leveraging virtual reality technology in combination with eye-tracking measures is well-suited to advance the understanding of mechanisms underlying attentional impairment in naturalistic settings. Assessment within these immersive and well-controlled simulated environments provides new options for increasing our understanding of distractibility and its potential impact on the development of interventions for children with ADHD.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Chinara, Chinmay
In: 2022.
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt; Nye, Benjamin; Sinatra, Gale; Swartout, William; Sjӧberg, Molly; Porter, Molly; Nelson, David; Kennedy, Alana; Herrick, Imogen; Weeks, Danaan DeNeve; Lindsey, Emily
Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits Journal Article
In: Palaeontol Electron, 2022, ISSN: 19353952, 10948074.
@article{davis_designing_2022,
title = {Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits},
author = {Matt Davis and Benjamin Nye and Gale Sinatra and William Swartout and Molly Sjӧberg and Molly Porter and David Nelson and Alana Kennedy and Imogen Herrick and Danaan DeNeve Weeks and Emily Lindsey},
url = {https://palaeo-electronica.org/content/2022/3524-la-brea-tar-pits-paleoart},
doi = {10.26879/1191},
issn = {19353952, 10948074},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-15},
journal = {Palaeontol Electron},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Zhou, Jincheng; Ustun, Volkan
PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture Incollection
In: Goertzel, Ben; Iklé, Matthew; Potapov, Alexey (Ed.): Artificial General Intelligence, vol. 13154, pp. 355–366, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93757-7 978-3-030-93758-4.
@incollection{zhou_pysigma_2022,
title = {PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture},
author = {Jincheng Zhou and Volkan Ustun},
editor = {Ben Goertzel and Matthew Iklé and Alexey Potapov},
url = {https://link.springer.com/10.1007/978-3-030-93758-4_36},
doi = {10.1007/978-3-030-93758-4_36},
isbn = {978-3-030-93757-7 978-3-030-93758-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-21},
booktitle = {Artificial General Intelligence},
volume = {13154},
pages = {355--366},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ionescu, Alina; Daele, Tom Van; Rizzo, Albert; Blair, Carolyn; Best, Paul
360° Videos for Immersive Mental Health Interventions: a Systematic Review Journal Article
In: J. technol. behav. sci., vol. 6, no. 4, pp. 631–651, 2021, ISSN: 2366-5963.
@article{ionescu_360_2021,
title = {360° Videos for Immersive Mental Health Interventions: a Systematic Review},
author = {Alina Ionescu and Tom Van Daele and Albert Rizzo and Carolyn Blair and Paul Best},
url = {https://doi.org/10.1007/s41347-021-00221-7},
doi = {10.1007/s41347-021-00221-7},
issn = {2366-5963},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-13},
journal = {J. technol. behav. sci.},
volume = {6},
number = {4},
pages = {631--651},
abstract = {Virtual reality is increasingly recognized as a powerful method for clinical interventions in the mental health field, but has yet to achieve mainstream adoption in routine mental healthcare settings. A similar, yet slightly different technology, immersive 360° videos might have the potential to cover this gap, by requiring both lower costs and less technical skills to construct and operate such virtual environments. This systematic review therefore aims to identify, evaluate, and summarize mental health interventions using immersive 360° videos to support an understanding of their implementation in daily clinical practice. The quality of the 14 selected studies was evaluated using a critical appraisal tool, addressing populations with clinical levels of psychopathological symptoms, somatic conditions associated with psychological implications, and other at-risk groups. Immersive 360° videos successfully increased users’ feelings of presence, given their realistic features, and therefore yielded positive outcomes in clinical interventions where presence is considered as an essential precondition. Because the technical skills required to create immersive 360° video footage are fairly limited, most of the interventions using this approach have been created by mental health researchers or clinicians themselves. Immersive 360° videos are still in an early phase of implementation as a tool for clinical interventions for mental health, resulting in high heterogeneity in focus, procedures, and research designs. An important next step for making use of this technology may therefore involve the creation of standardized procedures, as a means to increase the quality of research and evidence-based interventions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Inproceedings
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1--12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Inproceedings
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771--781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Inproceedings
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25--30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839--12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804--3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Dialogue, Virtual Humans
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Dialogue, Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Inproceedings
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Abrams, Mitchell; Baker, Anthony L.; Hudson, Taylor; Lukin, Stephanie; Traum, David; Voss, Clare
Context is key: Annotating situated dialogue relations in multi-floor dialogue Inproceedings
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_context_2021,
title = {Context is key: Annotating situated dialogue relations in multi-floor dialogue},
author = {Claire Bonial and Mitchell Abrams and Anthony L. Baker and Taylor Hudson and Stephanie Lukin and David Traum and Clare Voss},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-3006/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148--155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghasem, Wesley; Valenzuela, Joshua; Saxon, Leslie A.
Player Tracking Technology and Data for Injury Prevention in the National Football League Journal Article
In: Curr Sports Med Rep, vol. 20, no. 9, pp. 436–439, 2021, ISSN: 1537-8918.
@article{ghasem_player_2021,
title = {Player Tracking Technology and Data for Injury Prevention in the National Football League},
author = {Wesley Ghasem and Joshua Valenzuela and Leslie A. Saxon},
url = {https://journals.lww.com/10.1249/JSR.0000000000000873},
doi = {10.1249/JSR.0000000000000873},
issn = {1537-8918},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
journal = {Curr Sports Med Rep},
volume = {20},
number = {9},
pages = {436--439},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Incollection
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Virtual Humans
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433--462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195--202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90--97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139--144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Visualization of social emotional appraisal process of an agent Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1–2, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-021-3.
Links | BibTeX | Tags: Emotions, Virtual Humans
@inproceedings{sato_visualization_2021,
title = {Visualization of social emotional appraisal process of an agent},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9666329/},
doi = {10.1109/ACIIW52867.2021.9666329},
isbn = {978-1-66540-021-3},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
pages = {1--2},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Schuster, Bianca A.; Fraser, Dagmar S.; Bosch, Jasper J. F.; Sowden, Sophie; Gordon, Andrew S.; Huh, Dongsung; Cook, Jennifer L.
Kinematics and observer-animator kinematic similarity predict mental state attribution from Heider–Simmel style animations Journal Article
In: Sci Rep, vol. 11, no. 1, pp. 18266, 2021, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags:
@article{schuster_kinematics_2021,
title = {Kinematics and observer-animator kinematic similarity predict mental state attribution from Heider–Simmel style animations},
author = {Bianca A. Schuster and Dagmar S. Fraser and Jasper J. F. Bosch and Sophie Sowden and Andrew S. Gordon and Dongsung Huh and Jennifer L. Cook},
url = {https://www.nature.com/articles/s41598-021-97660-2},
doi = {10.1038/s41598-021-97660-2},
issn = {2045-2322},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-29},
journal = {Sci Rep},
volume = {11},
number = {1},
pages = {18266},
abstract = {The ability to ascribe mental states, such as beliefs or desires to oneself and other individuals forms an integral part of everyday social interaction. Animations tasks, in which observers watch videos of interacting triangles, have been extensively used to test mental state attribution in a variety of clinical populations. Compared to control participants, individuals with clinical conditions such as autism typically offer less appropriate mental state descriptions of such videos. Recent research suggests that stimulus kinematics and movement similarity (between the video and the observer) may contribute to mental state attribution difficulties. Here we present a novel adaptation of the animations task, suitable to track and compare animation generator and -observer kinematics. Using this task and a population-derived stimulus database, we confirmed the hypotheses that an animation’s jerk and jerk similarity between observer and animator significantly contribute to the correct identification of an animation. By employing random forest analysis to explore other stimulus characteristics, we reveal that other indices of movement similarity, including acceleration- and rotation-based similarity, also predict performance. Our results highlight the importance of movement similarity between observer and animator and raise new questions about reasons why some clinical populations exhibit difficulties with this task.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert “Skip”; Goodwin, Grace J.; Vito, Alyssa N. De; Bell, Joshua D.
Recent advances in virtual reality and psychology: Introduction to the special issue. Journal Article
In: Translational Issues in Psychological Science, vol. 7, no. 3, pp. 213–217, 2021, ISSN: 2332-2179, 2332-2136.
Links | BibTeX | Tags: DTIC, MedVR, VR
@article{rizzo_recent_2021,
title = {Recent advances in virtual reality and psychology: Introduction to the special issue.},
author = {Albert “Skip” Rizzo and Grace J. Goodwin and Alyssa N. De Vito and Joshua D. Bell},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/tps0000316},
doi = {10.1037/tps0000316},
issn = {2332-2179, 2332-2136},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-13},
journal = {Translational Issues in Psychological Science},
volume = {7},
number = {3},
pages = {213--217},
keywords = {DTIC, MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
et al A Rizzo,
Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond! Inproceedings
In: Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021), Serpa, Portugal, 2021.
Links | BibTeX | Tags: MedVR, Virtual Humans, VR
@inproceedings{a_rizzo_et_al_normative_2021,
title = {Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond!},
author = {et al A Rizzo},
url = {http://studio.hei-lab.ulusofona.pt/archive/},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021)},
address = {Serpa, Portugal},
keywords = {MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Lucas, Gale; Becerik-Gerber, Burcin; Roll, Shawn
Working from home during the COVID-19 pandemic: Impact on office worker productivity and work experience Journal Article
In: WOR, vol. 69, no. 4, pp. 1171–1189, 2021, ISSN: 10519815, 18759270.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_working_2021,
title = {Working from home during the COVID-19 pandemic: Impact on office worker productivity and work experience},
author = {Mohamad Awada and Gale Lucas and Burcin Becerik-Gerber and Shawn Roll},
url = {https://www.medra.org/servlet/aliasResolver?alias=iospress&doi=10.3233/WOR-210301},
doi = {10.3233/WOR-210301},
issn = {10519815, 18759270},
year = {2021},
date = {2021-08-01},
urldate = {2022-09-26},
journal = {WOR},
volume = {69},
number = {4},
pages = {1171--1189},
abstract = {BACKGROUND: With the COVID-19 pandemic, organizations embraced Work From Home (WFH). An important component of transitioning to WFH is the effect on workers, particularly related to their productivity and work experience. OBJECTIVES: The objective of this study is to examine how worker-, workspace-, and work-related factors affected productivity and time spent at a workstation on a typical WFH day during the pandemic. METHODS: An online questionnaire was designed and administered to collect the necessary information. Data from 988 respondents were included in the analyses. RESULTS: Overall perception of productivity level among workers did not change relative to their in-office productivity before the pandemic. Female, older, and high-income workers were likely to report increased productivity. Productivity was positively influenced by better mental and physical health statuses, having a teenager, increased communication with coworkers and having a dedicated room for work. Number of hours spent at a workstation increased by approximately 1.5 hours during a typical WFH day. Longer hours were reported by individuals who had school age children, owned an office desk or an adjustable chair, and had adjusted their work hours. CONCLUSION: The findings highlight key factors for employers and employees to consider for improving the WFH experience.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Dukes, Daniel; Abrams, Kathryn; Adolphs, Ralph; Ahmed, Mohammed E.; Beatty, Andrew; Berridge, Kent C.; Broomhall, Susan; Brosch, Tobias; Campos, Joseph J.; Clay, Zanna; Clément, Fabrice; Cunningham, William A.; Damasio, Antonio; Damasio, Hanna; D’Arms, Justin; Davidson, Jane W.; Gelder, Beatrice; Deonna, Julien; Sousa, Ronnie; Ekman, Paul; Ellsworth, Phoebe C.; Fehr, Ernst; Fischer, Agneta; Foolen, Ad; Frevert, Ute; Grandjean, Didier; Gratch, Jonathan; Greenberg, Leslie; Greenspan, Patricia; Gross, James J.; Halperin, Eran; Kappas, Arvid; Keltner, Dacher; Knutson, Brian; Konstan, David; Kret, Mariska E.; LeDoux, Joseph E.; Lerner, Jennifer S.; Levenson, Robert W.; Loewenstein, George; Manstead, Antony S. R.; Maroney, Terry A.; Moors, Agnes; Niedenthal, Paula; Parkinson, Brian; Pavlidis, Ioannis; Pelachaud, Catherine; Pollak, Seth D.; Pourtois, Gilles; Roettger-Roessler, Birgitt; Russell, James A.; Sauter, Disa; Scarantino, Andrea; Scherer, Klaus R.; Stearns, Peter; Stets, Jan E.; Tappolet, Christine; Teroni, Fabrice; Tsai, Jeanne; Turner, Jonathan; Reekum, Carien Van; Vuilleumier, Patrik; Wharton, Tim; Sander, David
The rise of affectivism Journal Article
In: Nat Hum Behav, vol. 5, no. 7, pp. 816–820, 2021, ISSN: 2397-3374.
Links | BibTeX | Tags: Emotions
@article{dukes_rise_2021,
title = {The rise of affectivism},
author = {Daniel Dukes and Kathryn Abrams and Ralph Adolphs and Mohammed E. Ahmed and Andrew Beatty and Kent C. Berridge and Susan Broomhall and Tobias Brosch and Joseph J. Campos and Zanna Clay and Fabrice Clément and William A. Cunningham and Antonio Damasio and Hanna Damasio and Justin D’Arms and Jane W. Davidson and Beatrice Gelder and Julien Deonna and Ronnie Sousa and Paul Ekman and Phoebe C. Ellsworth and Ernst Fehr and Agneta Fischer and Ad Foolen and Ute Frevert and Didier Grandjean and Jonathan Gratch and Leslie Greenberg and Patricia Greenspan and James J. Gross and Eran Halperin and Arvid Kappas and Dacher Keltner and Brian Knutson and David Konstan and Mariska E. Kret and Joseph E. LeDoux and Jennifer S. Lerner and Robert W. Levenson and George Loewenstein and Antony S. R. Manstead and Terry A. Maroney and Agnes Moors and Paula Niedenthal and Brian Parkinson and Ioannis Pavlidis and Catherine Pelachaud and Seth D. Pollak and Gilles Pourtois and Birgitt Roettger-Roessler and James A. Russell and Disa Sauter and Andrea Scarantino and Klaus R. Scherer and Peter Stearns and Jan E. Stets and Christine Tappolet and Fabrice Teroni and Jeanne Tsai and Jonathan Turner and Carien Van Reekum and Patrik Vuilleumier and Tim Wharton and David Sander},
url = {http://www.nature.com/articles/s41562-021-01130-8},
doi = {10.1038/s41562-021-01130-8},
issn = {2397-3374},
year = {2021},
date = {2021-07-01},
urldate = {2022-09-28},
journal = {Nat Hum Behav},
volume = {5},
number = {7},
pages = {816--820},
keywords = {Emotions},
pubstate = {published},
tppubtype = {article}
}
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021-1,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-04-30},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-05-06},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bonial, Claire; Abrams, Mitchell; Traum, David; Voss, Clare
Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains Inproceedings
In: Proceedings of the 14th International Conference on Computational Semantics (IWCS), pp. 173–183, Association for Computational Linguistics, Groningen, The Netherlands (online), 2021.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_builder_2021,
title = {Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains},
author = {Claire Bonial and Mitchell Abrams and David Traum and Clare Voss},
url = {https://aclanthology.org/2021.iwcs-1.17},
year = {2021},
date = {2021-06-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 14th International Conference on Computational Semantics (IWCS)},
pages = {173--183},
publisher = {Association for Computational Linguistics},
address = {Groningen, The Netherlands (online)},
abstract = {We adopt, evaluate, and improve upon a two-step natural language understanding (NLU) pipeline that incrementally tames the variation of unconstrained natural language input and maps to executable robot behaviors. The pipeline first leverages Abstract Meaning Representation (AMR) parsing to capture the propositional content of the utterance, and second converts this into “Dialogue-AMR,” which augments standard AMR with information on tense, aspect, and speech acts. Several alternative approaches and training datasets are evaluated for both steps and corresponding components of the pipeline, some of which outperform the original. We extend the Dialogue-AMR annotation schema to cover a different collaborative instruction domain and evaluate on both domains. With very little training data, we achieve promising performance in the new domain, demonstrating the scalability of this approach.},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Aryal, Ashrant; Becerik-Gerber, Burcin; Lucas, Gale M.; Roll, Shawn C.
Intelligent Agents to Improve Thermal Satisfaction by Controlling Personal Comfort Systems Under Different Levels of Automation Journal Article
In: IEEE Internet Things J., vol. 8, no. 8, pp. 7089–7100, 2021, ISSN: 2327-4662, 2372-2541.
@article{aryal_intelligent_2021,
title = {Intelligent Agents to Improve Thermal Satisfaction by Controlling Personal Comfort Systems Under Different Levels of Automation},
author = {Ashrant Aryal and Burcin Becerik-Gerber and Gale M. Lucas and Shawn C. Roll},
url = {https://ieeexplore.ieee.org/document/9260148/},
doi = {10.1109/JIOT.2020.3038378},
issn = {2327-4662, 2372-2541},
year = {2021},
date = {2021-04-01},
urldate = {2022-10-24},
journal = {IEEE Internet Things J.},
volume = {8},
number = {8},
pages = {7089--7100},
keywords = {AI},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
Abstract | Links | BibTeX | Tags:
@article{de_melo_heuristic_2021-1,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gramlich, Michael A.; Smolenski, Derek J.; Norr, Aaron M.; Rothbaum, Barbara O.; Rizzo, Albert A.; Andrasik, Frank; Fantelli, Emily; Reger, Greg M.
In: Depression and Anxiety, pp. da.23141, 2021, ISSN: 1091-4269, 1520-6394.
Abstract | Links | BibTeX | Tags: MedVR
@article{gramlich_psychophysiology_2021,
title = {Psychophysiology during exposure to trauma memories: Comparative effects of virtual reality and imaginal exposure for posttraumatic stress disorder},
author = {Michael A. Gramlich and Derek J. Smolenski and Aaron M. Norr and Barbara O. Rothbaum and Albert A. Rizzo and Frank Andrasik and Emily Fantelli and Greg M. Reger},
url = {https://onlinelibrary.wiley.com/doi/10.1002/da.23141},
doi = {10.1002/da.23141},
issn = {1091-4269, 1520-6394},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {Depression and Anxiety},
pages = {da.23141},
abstract = {Background: This investigation involved an in‐depth examination of psychophysiological responses during exposure to the trauma memory across 10 sessions among active duty soldiers with combat‐related posttraumatic stress disorder (PTSD) treated by Prolonged Exposure (PE) or Virtual Reality Exposure (VRE). We compared psychophysiological changes, session‐by‐session, between VRE and traditional imaginal exposure. Methods: Heart rate (HR), galvanic skin response (GSR), and peripheral skin temperature were collected every 5 min during exposure sessions with 61 combat veterans of Iraq/Afghanistan and compared to the PTSD Checklist (PCL‐C) and Clinician‐Administered PTSD Scale (CAPS) outcomes using multilevel modeling. Results: Over the course of treatment, participants in the PE group had higher HR arousal compared to participants in the VRE group. With reference to GSR, in earlier sessions, participants demonstrated a within‐session increase, whereas, in later sessions, participants showed a within‐session habituation response. A significant interaction was found for GSR and treatment assignment for within‐session change, withinperson effect, predicting CAPS (d = 0.70) and PCL‐C (d = 0.66) outcomes. Conclusion: Overall, these findings suggest that exposure to traumatic memories activates arousal across sessions, with GSR being most associated with reductions in PTSD symptoms for participants in the PE group.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Zhu, Runhe; Becerik-Gerber, Burcin; Lucas, Gale; Southers, Erroll
An integrated emotional and physiological assessment for VR-based active shooter incident experiments Journal Article
In: Advanced Engineering Informatics, vol. 47, pp. 101227, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, VR
@article{awada_integrated_2021,
title = {An integrated emotional and physiological assessment for VR-based active shooter incident experiments},
author = {Mohamad Awada and Runhe Zhu and Burcin Becerik-Gerber and Gale Lucas and Erroll Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034620301968},
doi = {10.1016/j.aei.2020.101227},
issn = {14740346},
year = {2021},
date = {2021-01-01},
urldate = {2022-10-24},
journal = {Advanced Engineering Informatics},
volume = {47},
pages = {101227},
keywords = {DTIC, VR},
pubstate = {published},
tppubtype = {article}
}
Barnes, Michael J.; Wang, Ning; Pynadath, David V.; Chen, Jessie Y. C.
Human-agent bidirectional transparency Incollection
In: Trust in Human-Robot Interaction, pp. 209–232, Elsevier, 2021, ISBN: 978-0-12-819472-0.
@incollection{barnes_human-agent_2021,
title = {Human-agent bidirectional transparency},
author = {Michael J. Barnes and Ning Wang and David V. Pynadath and Jessie Y. C. Chen},
url = {https://linkinghub.elsevier.com/retrieve/pii/B9780128194720000101},
doi = {10.1016/B978-0-12-819472-0.00010-1},
isbn = {978-0-12-819472-0},
year = {2021},
date = {2021-01-01},
urldate = {2022-10-24},
booktitle = {Trust in Human-Robot Interaction},
pages = {209--232},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Bell, Benjamin; Bennett, Winston “Wink”; Kelsey, Elaine; Nye, Benjamin
Attention and Engagement in Virtual Environments: Measuring the Unobservable Inproceedings
In: 2021.
Links | BibTeX | Tags: AR, DTIC, Machine Learning, UARC, VR
@inproceedings{bell_attention_2021,
title = {Attention and Engagement in Virtual Environments: Measuring the Unobservable},
author = {Benjamin Bell and Winston “Wink” Bennett and Elaine Kelsey and Benjamin Nye},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=95758&CID=862#View},
year = {2021},
date = {2021-01-01},
keywords = {AR, DTIC, Machine Learning, UARC, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Cheng, Junyan; Fostiropoulos, Iordanis; Boehm, Barry; Soleymani, Mohammad
Multimodal Phased Transformer for Sentiment Analysis Inproceedings
In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2447–2458, Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{cheng_multimodal_2021,
title = {Multimodal Phased Transformer for Sentiment Analysis},
author = {Junyan Cheng and Iordanis Fostiropoulos and Barry Boehm and Mohammad Soleymani},
url = {https://aclanthology.org/2021.emnlp-main.189},
doi = {10.18653/v1/2021.emnlp-main.189},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
pages = {2447--2458},
publisher = {Association for Computational Linguistics},
address = {Online and Punta Cana, Dominican Republic},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
He, Zihao; Tavabi, Leili; Lerman, Kristina; Soleymani, Mohammad
Speaker Turn Modeling for Dialogue Act Classification Inproceedings
In: Findings of the Association for Computational Linguistics: EMNLP 2021, pp. 2150–2157, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{he_speaker_2021,
title = {Speaker Turn Modeling for Dialogue Act Classification},
author = {Zihao He and Leili Tavabi and Kristina Lerman and Mohammad Soleymani},
url = {https://aclanthology.org/2021.findings-emnlp.185},
doi = {10.18653/v1/2021.findings-emnlp.185},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2021},
pages = {2150--2157},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Incollection
In: Marchi, Erik; Siniscalchi, Sabato Marco; Cumani, Sandro; Salerno, Valerio Mario; Li, Haizhou (Ed.): Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems, pp. 115–127, Springer, Singapore, 2021, ISBN: 9789811593239.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@incollection{gervits_classification-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
editor = {Erik Marchi and Sabato Marco Siniscalchi and Sandro Cumani and Valerio Mario Salerno and Haizhou Li},
url = {https://doi.org/10.1007/978-981-15-9323-9_10},
doi = {10.1007/978-981-15-9323-9_10},
isbn = {9789811593239},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Increasing Naturalness and Flexibility in Spoken Dialogue Interaction: 10th International Workshop on Spoken Dialogue Systems},
pages = {115--127},
publisher = {Springer},
address = {Singapore},
series = {Lecture Notes in Electrical Engineering},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Feng, Andrew; Hou, Yu; McCullough, Kyle; Prasad, Pratusha Bhuvana; Soibelman, Lucio
Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach Journal Article
In: 2021.
Abstract | Links | BibTeX | Tags: DTIC, Simulation, UARC
@article{chen_ground_2021,
title = {Ground material classification and for UAV-based photogrammetric 3D data A 2D-3D Hybrid Approach},
author = {Meida Chen and Andrew Feng and Yu Hou and Kyle McCullough and Pratusha Bhuvana Prasad and Lucio Soibelman},
url = {https://arxiv.org/abs/2109.12221},
doi = {10.48550/ARXIV.2109.12221},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-27},
abstract = {In recent years, photogrammetry has been widely used in many areas to create photorealistic 3D virtual data representing the physical environment. The innovation of small unmanned aerial vehicles (sUAVs) has provided additional high-resolution imaging capabilities with low cost for mapping a relatively large area of interest. These cutting-edge technologies have caught the US Army and Navy's attention for the purpose of rapid 3D battlefield reconstruction, virtual training, and simulations. Our previous works have demonstrated the importance of information extraction from the derived photogrammetric data to create semantic-rich virtual environments (Chen et al., 2019). For example, an increase of simulation realism and fidelity was achieved by segmenting and replacing photogrammetric trees with game-ready tree models. In this work, we further investigated the semantic information extraction problem and focused on the ground material segmentation and object detection tasks. The main innovation of this work was that we leveraged both the original 2D images and the derived 3D photogrammetric data to overcome the challenges faced when using each individual data source. For ground material segmentation, we utilized an existing convolutional neural network architecture (i.e., 3DMV) which was originally designed for segmenting RGB-D sensed indoor data. We improved its performance for outdoor photogrammetric data by introducing a depth pooling layer in the architecture to take into consideration the distance between the source images and the reconstructed terrain model. To test the performance of our improved 3DMV, a ground truth ground material database was created using data from the One World Terrain (OWT) data repository. Finally, a workflow for importing the segmented ground materials into a virtual simulation scene was introduced, and visual results are reported in this paper.},
keywords = {DTIC, Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Döveling, Katrin; Konijn, Elly A. (Ed.)
Routledge international handbook of emotions and media Book
Routledge, New York, 2021, ISBN: 978-1-138-61049-1 978-1-03-211461-3.
@book{doveling_routledge_2021,
title = {Routledge international handbook of emotions and media},
editor = {Katrin Döveling and Elly A. Konijn},
isbn = {978-1-138-61049-1 978-1-03-211461-3},
year = {2021},
date = {2021-01-01},
publisher = {Routledge},
address = {New York},
series = {Routledge international handbooks},
abstract = {"In times of a worldwide pandemic, the election of a new US president, "MeToo," and "Fridays for Future," to name but a few examples, one thing becomes palpable: the emotional impact of media on individuals and society cannot be underestimated. The relations between media, people, and society are to a great extent based on human emotions. Emotions are essential in understanding how media messages are processed and how media affect individual and social behavior as well as public social life. Adopting a thoroughly interdisciplinary approach to the study of emotions in the context of media, the second, entirely revised and updated, edition of Routledge International Handbook of Emotions and Media comprises areas such as evolutionary psychology, media psychology, media sociology, cultural studies, media entertainment, and political and digital communication. Leading experts from across the globe explore cutting-edge research on the role of emotion in selecting and processing media contents, the emotional consequences of media use, politics and public emotion, emotions in political communication and persuasion, as well as emotions in digital, interactive, and virtual encounters. This compelling and authoritative Handbook is an essential reference tool for scholars and students of media, communication science, media psychology, emotion, cognitive and social psychology, cultural studies, media sociology, and related fields"–},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Nye, Benjamin; Nelson, David; Herrick, Imogen; Sinatra, Gale; Swartout, Bill; Porter, Molly; Davis, Matt; Lindsey, Emily
SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences Inproceedings
In: TMS Proceedings 2021, American Psychological Association, 2021.
Links | BibTeX | Tags: AR, MxR
@inproceedings{nye_science_2021,
title = {SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences},
author = {Benjamin Nye and David Nelson and Imogen Herrick and Gale Sinatra and Bill Swartout and Molly Porter and Matt Davis and Emily Lindsey},
url = {https://tmb.apaopen.org/pub/djue4kjf},
doi = {10.1037/tms0000106},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-21},
booktitle = {TMS Proceedings 2021},
publisher = {American Psychological Association},
keywords = {AR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Wang, Timothy S.
Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates Incollection
In: Mitchell, Alex; Vosmeer, Mirjam (Ed.): Interactive Storytelling, vol. 13138, pp. 71–79, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-92299-3 978-3-030-92300-6.
Links | BibTeX | Tags: DTIC, Narrative, UARC
@incollection{gordon_narrative_2021,
title = {Narrative Text Generation from Abductive Interpretations Using Axiom-Specific Templates},
author = {Andrew S. Gordon and Timothy S. Wang},
editor = {Alex Mitchell and Mirjam Vosmeer},
url = {https://link.springer.com/10.1007/978-3-030-92300-6_7},
doi = {10.1007/978-3-030-92300-6_7},
isbn = {978-3-030-92299-3 978-3-030-92300-6},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-22},
booktitle = {Interactive Storytelling},
volume = {13138},
pages = {71--79},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {DTIC, Narrative, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Natural Language, UARC, Virtual Humans
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Dialogue, Natural Language, UARC, Virtual Humans
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13--34},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Front. Robot. AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
Abstract | Links | BibTeX | Tags: Autonomous Vehicles, UARC, Virtual Humans
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Front. Robot. AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {Autonomous Vehicles, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Femminella, Brian; Hartholt, Arno; Rizzo, Skip
User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP) Journal Article
In: pp. 10, 2021.
Abstract | Links | BibTeX | Tags: ARL, MedVR
@article{mozgai_user-centered_2021,
title = {User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP)},
author = {Sharon Mozgai and Brian Femminella and Arno Hartholt and Skip Rizzo},
url = {https://uploads-ssl.webflow.com/5f11f7e80d5a3b6dfdeeb614/5f9b3284d3d73e1da6a8f848_CHI_2021_Battle%20Buddy.pdf},
year = {2021},
date = {2021-01-01},
pages = {10},
abstract = {CCS Concepts: • Human-centered computing → Ubiquitous and mobile computing design and evaluation methods; HCI design and evaluation methods; User centered design; • Applied computing → Military; • Computing methodologies → Intelligent agents.},
keywords = {ARL, MedVR},
pubstate = {published},
tppubtype = {article}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Inproceedings
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, DTIC, Natural Language, Virtual Humans
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21--29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {ARL, Dialogue, DTIC, Natural Language, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
The field of Affective Computing: An interdisciplinary perspective Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 36, no. 1, pp. 13, 2021.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_field_2021,
title = {The field of Affective Computing: An interdisciplinary perspective},
author = {Jonathan Gratch},
url = {https://people.ict.usc.edu/~gratch/CSCI534/Readings/Gratch%20-%20The%20field%20of%20affective%20computing.pdf},
year = {2021},
date = {2021-01-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {36},
number = {1},
pages = {13},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2020
Adami, Pooya; Becerik-Gerber, Burcin; Soibelman, Lucio; Doleck, Tenzin; Copur-Gencturk, Yasemin; Lucas, Gale
An Immersive Virtual Learning Environment for Worker-Robot Collaboration on Construction Sites Inproceedings
In: 2020 Winter Simulation Conference (WSC), pp. 2400–2411, IEEE, Orlando, FL, USA, 2020, ISBN: 978-1-72819-499-8.
Links | BibTeX | Tags: Learning Sciences
@inproceedings{adami_immersive_2020,
title = {An Immersive Virtual Learning Environment for Worker-Robot Collaboration on Construction Sites},
author = {Pooya Adami and Burcin Becerik-Gerber and Lucio Soibelman and Tenzin Doleck and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ieeexplore.ieee.org/document/9383944/},
doi = {10.1109/WSC48552.2020.9383944},
isbn = {978-1-72819-499-8},
year = {2020},
date = {2020-12-01},
urldate = {2022-10-24},
booktitle = {2020 Winter Simulation Conference (WSC)},
pages = {2400--2411},
publisher = {IEEE},
address = {Orlando, FL, USA},
keywords = {Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Jiaman; Kuang, Zhengfei; Zhao, Yajie; He, Mingming; Bladin, Karl; Li, Hao
Dynamic Facial Asset and Rig Generation from a Single Scan Journal Article
In: ACM Transactions on Graphics, vol. 39, no. 6, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@article{li_dynamic_2020,
title = {Dynamic Facial Asset and Rig Generation from a Single Scan},
author = {Jiaman Li and Zhengfei Kuang and Yajie Zhao and Mingming He and Karl Bladin and Hao Li},
url = {https://dl.acm.org/doi/10.1145/3414685.3417817},
doi = {doi/10.1145/3414685.3417817},
year = {2020},
date = {2020-11-01},
journal = {ACM Transactions on Graphics},
volume = {39},
number = {6},
abstract = {The creation of high-fidelity computer-generated (CG) characters for films and games is tied with intensive manual labor, which involves the creation of comprehensive facial assets that are often captured using complex hardware. To simplify and accelerate this digitization process, we propose a framework for the automatic generation of high-quality dynamic facial models, including rigs which can be readily deployed for artists to polish. Our framework takes a single scan as input to generate a set of personalized blendshapes, dynamic textures, as well as secondary facial components (e.g., teeth and eyeballs). Based on a facial database with over 4, 000 scans with pore-level details, varying expressions and identities, we adopt a self-supervised neural network to learn personalized blendshapes from a set of template expressions. We also model the joint distribution between identities and expressions, enabling the inference of a full set of personalized blendshapes with dynamic appearances from a single neutral input scan. Our generated personalized face rig assets are seamlessly compatible with professional production pipelines for facial animation and rendering. We demonstrate a highly robust and effective framework on a wide range of subjects, and showcase high-fidelity facial animations with automatically generated personalized dynamic textures.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {article}
}