Publications
Search
Adami, Pooya; Singh, Rashmi; Rodrigues, Patrick Borges; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
In: Advanced Engineering Informatics, vol. 55, pp. 101837, 2023, ISSN: 1474-0346.
@article{adami_participants_2023,
title = {Participants matter: Effectiveness of VR-based training on the knowledge, trust in the robot, and self-efficacy of construction workers and university students},
author = {Pooya Adami and Rashmi Singh and Patrick Borges Rodrigues and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://www.sciencedirect.com/science/article/pii/S1474034622002956},
doi = {10.1016/j.aei.2022.101837},
issn = {1474-0346},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Advanced Engineering Informatics},
volume = {55},
pages = {101837},
abstract = {Virtual Reality (VR)-based training has gained attention from the scientific community in the Architecture, Engineering, and Construction (AEC) industry as a cost-effective and safe method that eliminates the safety risks that may impose on workers during the training compared to traditional training methods (e.g., in-person hands-on training, apprenticeship). Although researchers have developed VR-based training for construction workers, some have recruited students rather than workers to understand the effect of their VR-based training. However, students are different from construction workers in many ways, which can threaten the validity of such studies. Hence, research is needed to investigate the extent to which the findings of a VR-based training study are contingent on whether students or construction workers were used as the study sample. This paper strives to compare the effectiveness of VR-based training on university students’ and construction workers’ knowledge acquisition, trust in the robot, and robot operation self-efficacy in remote operation of a construction robot. Twenty-five construction workers and twenty-five graduate construction engineering students were recruited to complete a VR-based training for remote operating a demolition robot. We used quantitative analyses to answer our research questions. Our study shows that the results are dependent on the target sample in that students gained more knowledge, whereas construction workers gained more trust in the robot and more self-efficacy in robot operation. These findings suggest that the effectiveness of VR-based training on students may not necessarily associate with its effectiveness on construction workers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lucas, Gale M.; Mell, Johnathan; Boberg, Jill; Zenone, Forrest; Visser, Ewart J.; Tossell, Chad; Seech, Todd
Customizing virtual interpersonal skills training applications may not improve trainee performance Journal Article
In: Sci Rep, vol. 13, no. 1, pp. 78, 2023, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
@article{lucas_customizing_2023,
title = {Customizing virtual interpersonal skills training applications may not improve trainee performance},
author = {Gale M. Lucas and Johnathan Mell and Jill Boberg and Forrest Zenone and Ewart J. Visser and Chad Tossell and Todd Seech},
url = {https://www.nature.com/articles/s41598-022-27154-2},
doi = {10.1038/s41598-022-27154-2},
issn = {2045-2322},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {13},
number = {1},
pages = {78},
abstract = {While some theoretical perspectives imply that the context of a virtual training should be customized to match the intended context where those skills would ultimately be applied, others suggest this might not be necessary for learning. It is important to determine whether manipulating context matters for performance in training applications because customized virtual training systems made for specific use cases are more costly than generic “off-the-shelf” ones designed for a broader set of users. Accordingly, we report a study where military cadets use a virtual platform to practice their negotiation skills, and are randomly assigned to one of two virtual context conditions: military versus civilian. Out of 28 measures capturing performance in the negotiation, there was only one significant result: cadets in the civilian condition politely ask the agent to make an offer significantly more than those in the military condition. These results imply that—for this interpersonal skills application, and perhaps ones like it—virtual context may matter very little for performance during social skills training, and that commercial systems may yield real benefits to military scenarios with little-to-no modification.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lei, Su; Gratch, Jonathan
Emotional Expressivity is a Reliable Signal of Surprise Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
@article{lei_emotional_2023,
title = {Emotional Expressivity is a Reliable Signal of Surprise},
author = {Su Lei and Jonathan Gratch},
doi = {10.1109/TAFFC.2023.3234015},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {We consider the problem of inferring what happened to a person in a social task from momentary facial reactions. To approach this, we introduce several innovations. First, rather than predicting what (observers think) someone feels, we predict objective features of the event that immediately preceded the facial reactions. Second, we draw on appraisal theory, a key psychological theory of emotion, to characterize features of this immediately-preceded event. Specifically, we explore if facial expressions reveal if the event is expected, goal-congruent, and norm-compatible. Finally, we argue that emotional expressivity serves as a better feature for characterizing momentary expressions than traditional facial features. Specifically, we use supervised machine learning to predict third-party judgments of emotional expressivity with high accuracy, and show this model improves inferences about the nature of the event that preceded an emotional reaction. Contrary to common sense, “genuine smiles” failed to predict if an event advanced a person's goals. Rather, expressions best revealed if an event violated expectations. We discussed the implications of these findings for the interpretation of facial displays and potential limitations that could impact the generality of these findings.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale M.; Gratch, Jonathan
Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2023, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
@article{chawla_towards_2023,
title = {Towards Emotion-Aware Agents for Improved User Satisfaction and Partner Perception in Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale M. Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/abstract/document/10021626},
doi = {10.1109/TAFFC.2023.3238007},
issn = {1949-3045},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {Negotiation is a complex social interaction that encapsulates emotional encounters in human decision-making. Virtual agents that can negotiate with humans by the means of language are useful in pedagogy and conversational AI. To advance the development of such agents, we explore the role of emotion in the prediction of two important subjective goals in a negotiation – outcome satisfaction and partner perception. We devise ways to measure and compare different degrees of emotion expression in negotiation dialogues, consisting of emoticon, lexical, and contextual variables. Through an extensive analysis of a large-scale dataset in chat-based negotiations, we find that incorporating emotion expression explains significantly more variance, above and beyond the demographics and personality traits of the participants. Further, our temporal analysis reveals that emotive information from both early and later stages of the negotiation contributes to this prediction, indicating the need for a continual learning model of capturing emotion for automated agents. Finally, we extend our analysis to another dataset, showing promise that our findings generalize to more complex scenarios. We conclude by discussing our insights, which will be helpful for designing adaptive negotiation agents that interact through realistic communication interfaces.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vlake, Johan H.; Bommel, Jasper; Riva, Giuseppe; Wiederhold, Brenda K.; Cipresso, Pietro; Rizzo, Albert Skip; Botella, Cristina; Hooft, Lotty; Bienvenu, O. Joseph; Geerts, Bart; Wils, Evert-Jan; Gommers, Diederik; Genderen, Michel E.
Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR Journal Article
In: Nat Med, vol. 29, no. 1, pp. 12–13, 2023, ISSN: 1546-170X, (Number: 1 Publisher: Nature Publishing Group).
@article{vlake_reporting_2023,
title = {Reporting the early stage clinical evaluation of virtual-reality-based intervention trials: RATE-VR},
author = {Johan H. Vlake and Jasper Bommel and Giuseppe Riva and Brenda K. Wiederhold and Pietro Cipresso and Albert Skip Rizzo and Cristina Botella and Lotty Hooft and O. Joseph Bienvenu and Bart Geerts and Evert-Jan Wils and Diederik Gommers and Michel E. Genderen},
url = {https://www.nature.com/articles/s41591-022-02085-7},
doi = {10.1038/s41591-022-02085-7},
issn = {1546-170X},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
journal = {Nat Med},
volume = {29},
number = {1},
pages = {12–13},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yang, Jing; Xiao, Hanyuan; Teng, Wenbin; Cai, Yunxuan; Zhao, Yajie
Light Sampling Field and BRDF Representation for Physically-based Neural Rendering Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{yang_light_2023,
title = {Light Sampling Field and BRDF Representation for Physically-based Neural Rendering},
author = {Jing Yang and Hanyuan Xiao and Wenbin Teng and Yunxuan Cai and Yajie Zhao},
url = {https://arxiv.org/abs/2304.05472},
doi = {10.48550/ARXIV.2304.05472},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-22},
abstract = {Physically-based rendering (PBR) is key for immersive rendering effects used widely in the industry to showcase detailed realistic scenes from computer graphics assets. A well-known caveat is that producing the same is computationally heavy and relies on complex capture devices. Inspired by the success in quality and efficiency of recent volumetric neural rendering, we want to develop a physically-based neural shader to eliminate device dependency and significantly boost performance. However, no existing lighting and material models in the current neural rendering approaches can accurately represent the comprehensive lighting models and BRDFs properties required by the PBR process. Thus, this paper proposes a novel lighting representation that models direct and indirect light locally through a light sampling strategy in a learned light sampling field. We also propose BRDF models to separately represent surface/subsurface scattering details to enable complex objects such as translucent material (i.e., skin, jade). We then implement our proposed representations with an end-to-end physically-based neural face skin shader, which takes a standard face asset (i.e., geometry, albedo map, and normal map) and an HDRI for illumination as inputs and generates a photo-realistic rendering as output. Extensive experiments showcase the quality and efficiency of our PBR face skin shader, indicating the effectiveness of our proposed lighting and material representations.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2022
Gurney, Nikolos; Marsella, Stacy; Ustun, Volkan; Pynadath, David V.
Operationalizing Theories of Theory of Mind: A Survey Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 3–20, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Cognitive Architecture, Social Simulation, UARC
@incollection{gurney_operationalizing_2022,
title = {Operationalizing Theories of Theory of Mind: A Survey},
author = {Nikolos Gurney and Stacy Marsella and Volkan Ustun and David V. Pynadath},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_1},
doi = {10.1007/978-3-031-21671-8_1},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {3–20},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Cognitive Architecture, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Jessie; DeVault, David; Gratch, Jonathan
Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2022, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hoegen_exploring_2022,
title = {Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus},
author = {Jessie Hoegen and David DeVault and Jonathan Gratch},
doi = {10.1109/TAFFC.2022.3223030},
issn = {1949-3045},
year = {2022},
date = {2022-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {For affective computing to have an impact outside the laboratory, facial expressions must be studied in rich naturalistic situations. We argue negotiations are one such situation as they are ubiquitous in daily life, often evoke strong emotions, and perceived emotion shapes decisions and outcomes. Negotiations are a growing focus in AI research and applications, including agents that negotiate directly with people and attempt to use affective information. We introduce the DyNego-WOZ Corpus, which includes dyadic negotiation between participants and wizard-controlled virtual humans. We demonstrate the value of this corpus to the affective computing community by examining participants' facial expressions in response to a virtual human negotiation partner. We show that people's facial expressions typically co-occur with the end of their partner's speech (suggesting they reflect a reaction to the content of this speech), that these reactions do not correspond to prototypical emotional expressions, and that these reactions can help predict the expresser's subsequent action. We highlight challenges in working with such naturalistic data, including difficulties of expression recognition during speech, and the extreme variability of expressions, both across participants and within a negotiation. Our findings reinforce arguments that facial expressions convey more than emotional state but serve important communicative functions.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Proceedings Article
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580–590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Greenwald, Eric; Montgomery, Ryan; Leitner, Maxyn
ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 528–531, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: AI, UARC
@inproceedings{wang_arin-561_2022,
title = {ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students},
author = {Ning Wang and Eric Greenwald and Ryan Montgomery and Maxyn Leitner},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-031-11647-6_108},
doi = {10.1007/978-3-031-11647-6_108},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {528–531},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Artificial Intelligence (AI) is increasingly vital to our future generations, who will join a workforce that utilizes AI-driven tools and contributes to the advancement of AI. Today’s students will need exposure to AI knowledge at a younger age. Relatively little is currently known about how to most effectively provide AI education to K-12 students. In this paper, we discuss the design and evaluation of an educational game for high-school AI education called ARIN-561. Results from pilot studies indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Proceedings Article
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22–34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Learning from Multi-Annotated Corpora Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 147–165, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
Links | BibTeX | Tags: Natural Language, UARC
@incollection{paun_learning_2022,
title = {Learning from Multi-Annotated Corpora},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_6},
doi = {10.1007/978-3-031-03763-4_6},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {147–165},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Probabilistic Models of Agreement Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 79–101, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
Links | BibTeX | Tags: Natural Language, UARC
@incollection{paun_probabilistic_2022,
title = {Probabilistic Models of Agreement},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_4},
doi = {10.1007/978-3-031-03763-4_4},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {79–101},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Using Agreement Measures for CL Annotation Tasks Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 47–78, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@incollection{paun_using_2022,
title = {Using Agreement Measures for CL Annotation Tasks},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_3},
doi = {10.1007/978-3-031-03763-4_3},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {47–78},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
abstract = {We will now review the use of intercoder agreement measures in CL since Carletta’s original paper in the light of the discussion in the previous sections. We begin with a summary of Krippendorff’s recommendations about measuring reliability (Krippendorff, 2004a, Chapter 11), then discuss how coefficients of agreement have been used in CL to measure the reliability of annotation, focusing in particular on the types of annotation where there has been some debate concerning the most appropriate measures of agreement.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Probabilistic Models of Annotation Book Section
In: Paun, Silviu; Artstein, Ron; Poesio, Massimo (Ed.): Statistical Methods for Annotation Analysis, pp. 105–145, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03763-4.
Links | BibTeX | Tags: Natural Language, UARC
@incollection{paun_probabilistic_2022-1,
title = {Probabilistic Models of Annotation},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
editor = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://doi.org/10.1007/978-3-031-03763-4_5},
doi = {10.1007/978-3-031-03763-4_5},
isbn = {978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
booktitle = {Statistical Methods for Annotation Analysis},
pages = {105–145},
publisher = {Springer International Publishing},
address = {Cham},
series = {Synthesis Lectures on Human Language Technologies},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Saxon, Leslie; Faulk, Robert T; Barrett, Travor; McLelland, Steve; Boberg, Jill
A Novel Digital Research Methodology for Continuous Health Assessment of the Special Operations Warfighter: The Digital cORA Study Journal Article
In: J. Spec. Oper. Med., vol. 22, no. 4, pp. 78, 2022, ISSN: 1553-9768.
Links | BibTeX | Tags: CBC, UARC
@article{saxon_novel_2022,
title = {A Novel Digital Research Methodology for Continuous Health Assessment of the Special Operations Warfighter: The Digital cORA Study},
author = {Leslie Saxon and Robert T Faulk and Travor Barrett and Steve McLelland and Jill Boberg},
url = {https://www.jsomonline.org/Citations/4SSJ-AHIB.php},
doi = {10.55460/4SSJ-AHIB},
issn = {1553-9768},
year = {2022},
date = {2022-01-01},
urldate = {2023-03-31},
journal = {J. Spec. Oper. Med.},
volume = {22},
number = {4},
pages = {78},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: Human Factors in Virtual Environments and Game Design, AHFE Open Acces, 2022, ISBN: 978-1-958651-26-1, (ISSN: 27710718 Issue: 50).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@inproceedings{talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
isbn = {978-1-958651-26-1},
year = {2022},
date = {2022-01-01},
urldate = {2023-04-03},
booktitle = {Human Factors in Virtual Environments and Game Design},
volume = {50},
publisher = {AHFE Open Acces},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
note = {ISSN: 27710718
Issue: 50},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Leitner, Maxyn; Greenwald, Eric; Montgomery, Ryan; Wang, Ning
Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education Proceedings Article
In: Proceedings of the 30th International Conference on Computers in Education, 2022.
Abstract | Links | BibTeX | Tags: AI, UARC
@inproceedings{leitner_design_2022,
title = {Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ryan Montgomery and Ning Wang},
url = {https://par.nsf.gov/servlets/purl/10440195},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 30th International Conference on Computers in Education},
abstract = {Artificial Intelligence (AI) is increasingly vital to our everyday lives. Future generations will not only consume AI, but work with AI-driven tools and contribute to the development of AI. As such, students will need exposure to AI knowledge at a younger age. Despite this need, relatively little is currently known about how to most effectively provide AI education to K-12 (kindergarten through 12th grade) students. In this paper, we discuss the design of an educational game for high-school AI education called ARIN-561. The game centered around two agents – a player character and a companion robot, as the story and learning experience unfold through conversations between the two agents and explorations that bond the two agents A series of studies were carried out at high schools in the United States to evaluate the efficacy of the game. Results indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Shi, Weiyan; Zhang, Jingwen; Lucas, Gale; Yu, Zhou; Gratch, Jonathan
Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks Journal Article
In: 2022, (Publisher: arXiv Version Number: 2).
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{chawla_social_2022,
title = {Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks},
author = {Kushal Chawla and Weiyan Shi and Jingwen Zhang and Gale Lucas and Zhou Yu and Jonathan Gratch},
url = {https://arxiv.org/abs/2210.05664},
doi = {10.48550/ARXIV.2210.05664},
year = {2022},
date = {2022-01-01},
urldate = {2023-08-22},
abstract = {Dialogue systems capable of social influence such as persuasion, negotiation, and therapy, are essential for extending the use of technology to numerous realistic scenarios. However, existing research primarily focuses on either task-oriented or open-domain scenarios, a categorization that has been inadequate for capturing influence skills systematically. There exists no formal definition or category for dialogue systems with these skills and data-driven efforts in this direction are highly limited. In this work, we formally define and introduce the category of social influence dialogue systems that influence users' cognitive and emotional responses, leading to changes in thoughts, opinions, and behaviors through natural conversations. We present a survey of various tasks, datasets, and methods, compiling the progress across seven diverse domains. We discuss the commonalities and differences between the examined systems, identify limitations, and recommend future directions. This study serves as a comprehensive reference for social influence dialogue systems to inspire more dedicated research and discussion in this emerging area.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Herrick, Imogen; Sinatra, Gale; Kennedy, Alana; Nye, Benjamin; Swartout, William; Lindsey, Emily
Using Augmented Reality (AR) to Bring the Past to Life in Informal Science Learning Journal Article
In: NSF-PAR, 2022.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{herrick_using_2022,
title = {Using Augmented Reality (AR) to Bring the Past to Life in Informal Science Learning},
author = {Imogen Herrick and Gale Sinatra and Alana Kennedy and Benjamin Nye and William Swartout and Emily Lindsey},
url = {https://par.nsf.gov/biblio/10344989},
year = {2022},
date = {2022-01-01},
journal = {NSF-PAR},
abstract = {A key mission for museums is to engage a large and diverse public audience in science learning (Macdonald, 1997). To that end, science museums attempt to use immersive technologies in entertaining, socially oriented, and innovative ways. An example is the use of augmented reality (AR) to overlay virtual objects onto the real-world (Azuma, Baillot, Behringer, Feiner, Julier, & MacIntyre, 2001).We used a Design Based Research (DBR) approach to develop and test four features of an AR experience to promote place-based science learning in an museum setting. While quantitative differences were not found among conditions in knowledge gained, significant learning gains were seen from pre to post, illustrating the potential for place-based informal science learning. Incorporating AR technology into museum exhibits can update them with 21st tools to support visitor engagement in the learning experience. This research contributes to understanding of usability and logistical issues for different AR designs for a public, outdoor informal settings.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
2021
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic Journal Article
In: ASME Journal of Engineering for Sustainable Buildings and Cities, vol. 2, no. 4, pp. 041001, 2021, ISSN: 2642-6641, 2642-6625.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_associations_2021,
title = {Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://asmedigitalcollection.asme.org/sustainablebuildings/article/2/4/041001/1122847/Associations-Among-Home-Indoor-Environmental},
doi = {10.1115/1.4052822},
issn = {2642-6641, 2642-6625},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-26},
journal = {ASME Journal of Engineering for Sustainable Buildings and Cities},
volume = {2},
number = {4},
pages = {041001},
abstract = {Abstract The outbreak of SARS-CoV-2 virus forced office workers to conduct their daily work activities from home over an extended period. Given this unique situation, an opportunity emerged to study the satisfaction of office workers with indoor environmental quality (IEQ) factors of their houses where work activities took place and associate these factors with mental and physical health. We designed and administered a questionnaire that was open for 45 days during the COVID-19 pandemic and received valid data from 988 respondents. The results show that low satisfaction with natural lighting, glare, and humidity predicted eye-related symptoms, while low satisfaction with noise was a strong predictor of fatigue or tiredness, headaches or migraines, anxiety, and depression or sadness. Nose- and throat-related symptoms and skin-related symptoms were only uniquely predicted by low satisfaction with humidity. Low satisfaction with glare uniquely predicted an increase in musculoskeletal discomfort. Symptoms related to mental stress, rumination, or worry were predicted by low satisfaction with air quality and noise. Finally, low satisfaction with noise and indoor temperature predicted the prevalence of symptoms related to trouble concentrating, maintaining attention, or focus. Workers with higher income were more satisfied with humidity, air quality, and indoor temperature and had better overall mental health. Older individuals had increased satisfaction with natural lighting, humidity, air quality, noise, and indoor temperature. Findings from this study can inform future design practices that focus on hybrid home-work environments by highlighting the impact of IEQ factors on occupant well-being.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas B.; Rizzo, Albert S.; Soleymani, Mohammed
In: Technology, Mind, and Behavior, 2021.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{talbot_advances_2021,
title = {Advances in Affective Computing for Psychological Applications: From the Fundamentals to the Future of Emotional Cognizant Artificial Intelligence Entities},
author = {Thomas B. Talbot and Albert S. Rizzo and Mohammed Soleymani},
url = {https://tmb.apaopen.org/pub/zm0la9di/release/1},
doi = {10.1037/tms0000011},
year = {2021},
date = {2021-11-01},
urldate = {2023-03-31},
journal = {Technology, Mind, and Behavior},
abstract = {Keywords: Affective Computing, Emotion Perception, Virtual Humans, Conversational Simulations},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804–3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation Journal Article
In: Advanced Engineering Informatics, vol. 50, pp. 101431, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, VR
@article{adami_effectiveness_2021,
title = {Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S147403462100183X},
doi = {10.1016/j.aei.2021.101431},
issn = {14740346},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101431},
keywords = {DTIC, Learning Sciences, UARC, VR},
pubstate = {published},
tppubtype = {article}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790–14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt
Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning Proceedings Article
In: GSA, 2021.
Links | BibTeX | Tags: AR, Learning Sciences, UARC
@inproceedings{davis_augment_2021,
title = {Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning},
author = {Matt Davis},
url = {https://gsa.confex.com/gsa/2021AM/webprogram/Paper371425.html},
year = {2021},
date = {2021-10-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {AR, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139–144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90–97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195–202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148–155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Proceedings Article
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hernandez, Stephanie; Artstein, Ron
Annotating low-confidence questions improves classifier performance Journal Article
In: Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, 2021.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@article{hernandez_annotating_2021,
title = {Annotating low-confidence questions improves classifier performance},
author = {Stephanie Hernandez and Ron Artstein},
url = {https://par.nsf.gov/biblio/10313591-annotating-low-confidence-questions-improves-classifier-performance},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
journal = {Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
abstract = {This paper compares methods to select data for annotation in order to improve a classifier used in a question-answering dialogue system. With a classifier trained on 1,500 questions, adding 300 training questions on which the classifier is least confident results in consistently improved performance, whereas adding 300 arbitrarily selected training questions does not yield consistent improvement, and sometimes even degrades performance. The paper uses a new method for comparative evaluation of classifiers for dialogue, which scores each classifier based on the number of appropriate responses retrieved.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Lucas, Gale; Becerik-Gerber, Burcin; Roll, Shawn
Working from home during the COVID-19 pandemic: Impact on office worker productivity and work experience Journal Article
In: WOR, vol. 69, no. 4, pp. 1171–1189, 2021, ISSN: 10519815, 18759270.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_working_2021,
title = {Working from home during the COVID-19 pandemic: Impact on office worker productivity and work experience},
author = {Mohamad Awada and Gale Lucas and Burcin Becerik-Gerber and Shawn Roll},
url = {https://www.medra.org/servlet/aliasResolver?alias=iospress&doi=10.3233/WOR-210301},
doi = {10.3233/WOR-210301},
issn = {10519815, 18759270},
year = {2021},
date = {2021-08-01},
urldate = {2022-09-26},
journal = {WOR},
volume = {69},
number = {4},
pages = {1171–1189},
abstract = {BACKGROUND: With the COVID-19 pandemic, organizations embraced Work From Home (WFH). An important component of transitioning to WFH is the effect on workers, particularly related to their productivity and work experience. OBJECTIVES: The objective of this study is to examine how worker-, workspace-, and work-related factors affected productivity and time spent at a workstation on a typical WFH day during the pandemic. METHODS: An online questionnaire was designed and administered to collect the necessary information. Data from 988 respondents were included in the analyses. RESULTS: Overall perception of productivity level among workers did not change relative to their in-office productivity before the pandemic. Female, older, and high-income workers were likely to report increased productivity. Productivity was positively influenced by better mental and physical health statuses, having a teenager, increased communication with coworkers and having a dedicated room for work. Number of hours spent at a workstation increased by approximately 1.5 hours during a typical WFH day. Longer hours were reported by individuals who had school age children, owned an office desk or an adjustable chair, and had adjusted their work hours. CONCLUSION: The findings highlight key factors for employers and employees to consider for improving the WFH experience.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Kennedy, Alana A. U.; Thacker, Ian; Nye, Benjamin D.; Sinatra, Gale M.; Swartout, William; Lindsey, Emily
Promoting interest, positive emotions, and knowledge using augmented reality in a museum setting Journal Article
In: International Journal of Science Education, Part B, vol. 11, no. 3, pp. 242–258, 2021, ISSN: 2154-8455, (Publisher: Routledge _eprint: https://doi.org/10.1080/21548455.2021.1946619).
Abstract | Links | BibTeX | Tags: AR, Learning Sciences, UARC
@article{kennedy_promoting_2021,
title = {Promoting interest, positive emotions, and knowledge using augmented reality in a museum setting},
author = {Alana A. U. Kennedy and Ian Thacker and Benjamin D. Nye and Gale M. Sinatra and William Swartout and Emily Lindsey},
url = {https://doi.org/10.1080/21548455.2021.1946619},
doi = {10.1080/21548455.2021.1946619},
issn = {2154-8455},
year = {2021},
date = {2021-07-01},
urldate = {2023-03-31},
journal = {International Journal of Science Education, Part B},
volume = {11},
number = {3},
pages = {242–258},
abstract = {Informal learning environments, such as museums, provide unique opportunities for science learning. They are deliberately designed to impact public understanding of science and shape visitors’ attitudes and behaviors. As a developing technology, augmented reality (AR) offers the transformative potential to support museums’ educational missions by enhancing visitors’ experience, thereby creating effective conditions for learning and personalized interactions with science. We implemented an AR-enhanced exhibit at the La Brea Tar Pits (LBTP) to reduce scientific misconceptions and explore the role of interest and emotions around science and AR technology as it related to learning and knowledge revision. Using a pretest-posttest design, 62 adults completed an AR experience that addressed two scientific misconceptions related to the consistency of tar and frequency of large animal entrapment. We found that participants had significantly fewer misconceptions at posttest than at pretest. Participants also reported higher levels of interest in science content than AR technology and discriminated between emotions they experienced with regard to science content and AR technology. Feelings of curiosity predicted knowledge revision and interest in both science content and AR technology. These findings may be useful for museums and other science communicators seeking to create AR interventions that support learning and conceptual change.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/21548455.2021.1946619},
keywords = {AR, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert “Skip”; Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: Journal of Technology in Human Services, vol. 39, no. 3, pp. 314–347, 2021, ISSN: 1522-8835, (Publisher: Routledge _eprint: https://doi.org/10.1080/15228835.2021.1915931).
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@article{rizzo_combat_2021,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Albert “Skip” Rizzo and Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1080/15228835.2021.1915931},
doi = {10.1080/15228835.2021.1915931},
issn = {1522-8835},
year = {2021},
date = {2021-07-01},
urldate = {2023-03-31},
journal = {Journal of Technology in Human Services},
volume = {39},
number = {3},
pages = {314–347},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
note = {Publisher: Routledge
_eprint: https://doi.org/10.1080/15228835.2021.1915931},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Doran, Bethany; Mei, Chaoqun; Varosy, Paul D.; Kao, David P.; Saxon, Leslie A.; Feldman, Arthur M.; DeMets, David; Bristow, Michael R.
The Addition of a Defibrillator to Resynchronization Therapy Decreases Mortality in Patients With Nonischemic Cardiomyopathy Journal Article
In: JACC: Heart Failure, vol. 9, no. 6, pp. 439–449, 2021, (Publisher: American College of Cardiology Foundation).
Links | BibTeX | Tags: MedVR, UARC
@article{doran_addition_2021,
title = {The Addition of a Defibrillator to Resynchronization Therapy Decreases Mortality in Patients With Nonischemic Cardiomyopathy},
author = {Bethany Doran and Chaoqun Mei and Paul D. Varosy and David P. Kao and Leslie A. Saxon and Arthur M. Feldman and David DeMets and Michael R. Bristow},
url = {https://www.jacc.org/doi/abs/10.1016/j.jchf.2021.02.013},
doi = {10.1016/j.jchf.2021.02.013},
year = {2021},
date = {2021-06-01},
urldate = {2023-03-31},
journal = {JACC: Heart Failure},
volume = {9},
number = {6},
pages = {439–449},
note = {Publisher: American College of Cardiology Foundation},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Greenwald, Eric; Leitner, Maxyn; Wang, Ning
The Human-Interpreter Problem in Youth Encounters with AI Journal Article
In: Proceedings of the 15th International Conference of the Learning Sciences, pp. 1107–1108, 2021, (Publisher: International Society of the Learning Sciences).
Abstract | Links | BibTeX | Tags: AI, UARC
@article{greenwald_human-interpreter_2021,
title = {The Human-Interpreter Problem in Youth Encounters with AI},
author = {Eric Greenwald and Maxyn Leitner and Ning Wang},
url = {https://repository.isls.org//handle/1/7421},
year = {2021},
date = {2021-06-01},
urldate = {2023-03-31},
journal = {Proceedings of the 15th International Conference of the Learning Sciences},
pages = {1107–1108},
abstract = {Artificial Intelligence’s impact on society is increasingly pervasive. While innovative educational programs are being developed, there is yet little understanding of how pre-college aged students construct understanding of, and gain practice with, core AI concepts and strategies. In this paper, we discuss emerging findings from a cognitive interview study with middle school and high school students to better understand how students learn AI concepts. Drawing on these qualitative data, we present evidence for a conceptual challenge that may arise as youth develop understanding of AI: when considering how AI systems might use data to make decisions, students often began by drawing on prior experience to suggest underlying motivations within the decision space, rather than attending to features of the data themselves. We hypothesize that youth may begin with a working theory of AI that assumes general intelligence for the system, including the capacity to recognize and reason from human motivations.},
note = {Publisher: International Society of the Learning Sciences},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {article}
}
Holder, Eric; Wang, Ning
Explainable artificial intelligence (XAI) interactively working with humans as a junior cyber analyst Journal Article
In: Hum.-Intell. Syst. Integr., vol. 3, no. 2, pp. 139–153, 2021, ISSN: 2524-4884.
Abstract | Links | BibTeX | Tags: AI, UARC
@article{holder_explainable_2021,
title = {Explainable artificial intelligence (XAI) interactively working with humans as a junior cyber analyst},
author = {Eric Holder and Ning Wang},
url = {https://doi.org/10.1007/s42454-020-00021-z},
doi = {10.1007/s42454-020-00021-z},
issn = {2524-4884},
year = {2021},
date = {2021-06-01},
urldate = {2023-03-31},
journal = {Hum.-Intell. Syst. Integr.},
volume = {3},
number = {2},
pages = {139–153},
abstract = {There are many applications where artificial intelligence (AI) can add a benefit, but this benefit may not be fully realized, if the human cannot understand and interact with the output as required by their context. Allowing AI to explain its decisions can potentially mitigate this issue. To develop effective explainable AI methods to support this need, we need to understand both what the human needs for decision-making, as well as what information the AI has and can make available. This paper presents an example case of capturing those requirements. We explore how an operational planner (senior human analyst) for a cyber protection team could use a junior analyst virtual agent to scour, analyze, and present the data available on vulnerabilities and incidents on both the target systems as well as similar systems. We explore the interactions required to understand these outputs and to integrate additional knowledge held by the human. This is an exemplar case for integrating XAI into the real-world bi-directional workflow: the senior analyst needs to be able to understand the junior analysts results, particularly the assumptions and implications, in order to create a plan and brief it up the command chain. He or she may have further questions, or analysis needs to achieve this understanding. The application is the junior analyst agent and senior human analysts working together to create this understanding of threats, vulnerabilities, incidents, likely future attacks, and counteractions on the mission relevant cyber terrain that their unit has been assigned a mission on.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {article}
}
Nikolovski, Janeta; Koldijk, Martin; Weverling, Gerrit Jan; Spertus, John; Turakhia, Mintu; Saxon, Leslie; Gibson, Mike; Whang, John; Sarich, Troy; Zambon, Robert; Ezeanochie, Nnamdi; Turgiss, Jennifer; Jones, Robyn; Stoddard, Jeff; Burton, Paul; Navar, Ann Marie
Factors indicating intention to vaccinate with a COVID-19 vaccine among older UṠ. adults Journal Article
In: PLOS ONE, vol. 16, no. 5, pp. e0251963, 2021, ISSN: 1932-6203, (Publisher: Public Library of Science).
Abstract | Links | BibTeX | Tags: CBC, UARC
@article{nikolovski_factors_2021,
title = {Factors indicating intention to vaccinate with a COVID-19 vaccine among older UṠ. adults},
author = {Janeta Nikolovski and Martin Koldijk and Gerrit Jan Weverling and John Spertus and Mintu Turakhia and Leslie Saxon and Mike Gibson and John Whang and Troy Sarich and Robert Zambon and Nnamdi Ezeanochie and Jennifer Turgiss and Robyn Jones and Jeff Stoddard and Paul Burton and Ann Marie Navar},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0251963},
doi = {10.1371/journal.pone.0251963},
issn = {1932-6203},
year = {2021},
date = {2021-05-01},
urldate = {2023-03-31},
journal = {PLOS ONE},
volume = {16},
number = {5},
pages = {e0251963},
abstract = {Background The success of vaccination efforts to curb the COVID-19 pandemic will require broad public uptake of immunization and highlights the importance of understanding factors associated with willingness to receive a vaccine. Methods U.S. adults aged 65 and older enrolled in the HeartlineTM clinical study were invited to complete a COVID-19 vaccine assessment through the HeartlineTM mobile application between November 6–20, 2020. Factors associated with willingness to receive a COVID-19 vaccine were evaluated using an ordered logistic regression as well as a Random Forest classification algorithm. Results Among 9,106 study participants, 81.3% (n = 7402) responded and had available demographic data. The majority (91.3%) reported a willingness to be vaccinated. Factors most strongly associated with vaccine willingness were beliefs about the safety and efficacy of COVID-19 vaccines and vaccines in general. Women and Black or African American respondents reported lower willingness to vaccinate. Among those less willing to get vaccinated, 66.2% said that they would talk with their health provider before making a decision. During the study, positive results from the first COVID-19 vaccine outcome study were released; vaccine willingness increased after this report. Conclusions Even among older adults at high-risk for COVID-19 complications who are participating in a longitudinal clinical study, 1 in 11 reported lack of willingness to receive COVID-19 vaccine in November 2020. Variability in vaccine willingness by gender, race, education, and income suggests the potential for uneven vaccine uptake. Education by health providers directed toward assuaging concerns about vaccine safety and efficacy can help improve vaccine acceptance among those less willing. Trial registration Clinicaltrials.gov NCT04276441.},
note = {Publisher: Public Library of Science},
keywords = {CBC, UARC},
pubstate = {published},
tppubtype = {article}
}
Greenwald, Eric; Leitner, Maxyn; Wang, Ning
Learning Artificial Intelligence: Insights into How Youth Encounter and Build Understanding of AI Concepts Journal Article
In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, no. 17, pp. 15526–15533, 2021, ISSN: 2374-3468, (Number: 17).
Abstract | Links | BibTeX | Tags: AI, UARC
@article{greenwald_learning_2021,
title = {Learning Artificial Intelligence: Insights into How Youth Encounter and Build Understanding of AI Concepts},
author = {Eric Greenwald and Maxyn Leitner and Ning Wang},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/17828},
doi = {10.1609/aaai.v35i17.17828},
issn = {2374-3468},
year = {2021},
date = {2021-05-01},
urldate = {2023-03-31},
journal = {Proceedings of the AAAI Conference on Artificial Intelligence},
volume = {35},
number = {17},
pages = {15526–15533},
abstract = {Artificial Intelligence’s impact on society is increasingly pervasive. While innovative educational programs are being developed, there has been little understanding of how students, especially pre-college aged students, construct understanding and gain practice with core ideas about AI or what concepts are most appropriate for what age-levels. In this paper, we discuss a cognitive interview study with high school students to better understand how students learn AI concepts. We aim to shed light on questions including: what is the range of background knowledge and experiences students are able to apply in encountering AI concepts; what concepts are most readily accessible and which are more challenging; what misconceptions do students bring to bear on AI problems; and how to help students approach AI concepts by leveraging related concepts, such as mathematical and computational thinking). Results from the exploratory study have the potential to provide important insights into AI learning for pre-college youth. These initial findings can inform further investigations to ground the design of learning and assessment in evidence-based learning progressions and grade-level performance expectations.},
note = {Number: 17},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Sanghrajka, Rushit; Bodhwani, Vinit; Acob, Martin; Budziwojski, Daniel; Carr, Kayla; Kirshner, Larry; Swartout, William R.
OpenTutor: Designing a Rapid-Authored Tutor that Learns as you Grade Journal Article
In: The International FLAIRS Conference Proceedings, vol. 34, 2021, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{nye_opentutor_2021,
title = {OpenTutor: Designing a Rapid-Authored Tutor that Learns as you Grade},
author = {Benjamin D. Nye and Rushit Sanghrajka and Vinit Bodhwani and Martin Acob and Daniel Budziwojski and Kayla Carr and Larry Kirshner and William R. Swartout},
url = {https://journals.flvc.org/FLAIRS/article/view/128576},
doi = {10.32473/flairs.v34i1.128576},
issn = {2334-0762},
year = {2021},
date = {2021-04-01},
urldate = {2023-03-31},
journal = {The International FLAIRS Conference Proceedings},
volume = {34},
abstract = {Despite strong evidence that dialog-based intelligent tutoring systems (ITS) can increase learning gains, few courses include these tutors. In this research, we posit that existing dialog-based tutoring systems are not widely used because they are too complex and unfamiliar for a typical teacher to adapt or augment. OpenTutor is an open-source research project intended to scale up dialog-based tutoring by enabling ordinary teachers to rapidly author and improve dialog-based ITS, where authoring is presented through familiar tasks such as assessment item creation and grading. Formative usability results from a set of five non-CS educators are presented, which indicate that the OpenTutor system was relatively easy to use but that teachers would closely consider the cost benefit for time vs. student outcomes. Specifically, while OpenTutor grading was faster than expected, teachers reported that they would only spend any additional time (compared to a multiple choice) if the content required deeper learning. To decrease time to train answer classifiers, OpenTutor is investigating ways to reduce cold-start problems for tutoring dialogs.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin Daniel; Shiel, Aaron; Olmez, Ibrahim Burak; Mittal, Anirudh; Latta, Jason; Auerbach, Daniel; Copur-Gencturk, Yasemin
Virtual Agents for Real Teachers: Applying AI to Support Professional Development of Proportional Reasoning Journal Article
In: The International FLAIRS Conference Proceedings, vol. 34, 2021, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{nye_virtual_2021,
title = {Virtual Agents for Real Teachers: Applying AI to Support Professional Development of Proportional Reasoning},
author = {Benjamin Daniel Nye and Aaron Shiel and Ibrahim Burak Olmez and Anirudh Mittal and Jason Latta and Daniel Auerbach and Yasemin Copur-Gencturk},
url = {https://journals.flvc.org/FLAIRS/article/view/128574},
doi = {10.32473/flairs.v34i1.128574},
issn = {2334-0762},
year = {2021},
date = {2021-04-01},
urldate = {2023-03-31},
journal = {The International FLAIRS Conference Proceedings},
volume = {34},
abstract = {Despite the critical role of teachers in the educational process, few advanced learning technologies have been developed to support teacher-instruction or professional development. This lack of support is particularly acute for middle school math teachers, where only 37% felt well prepared to scaffold instruction to address the needs of diverse students in a national sample. To address this gap, the Advancing Middle School Teachers’ Understanding of Proportional Reasoning project is researching techniques to apply pedagogical virtual agents and dialog-based tutoring to enhance teachers' content knowledge and pedagogical content knowledge. This paper describes the design of a conversational, agent-based intelligent tutoring system to support teachers' professional development. Pedagogical strategies are presented that leverage a virtual human facilitator to tutor pedagogical content knowledge (how to teach proportions to students), as opposed to content knowledge (understanding proportions). The roles for different virtual facilitator capabilities are presented, including embedding actions into virtual agent dialog, open-response versus choice-based tutoring, ungraded pop-up sub-activities (e.g. whiteboard, calculator, note-taking). Usability feedback for a small cohort of instructors pursuing graduate studies was collected. In this feedback, teachers rated the system ease of use and perceived usefulness moderately well, but also reported confusion about what to expect from the system in terms of flow between lessons and support by the facilitator.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_2021,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
year = {2021},
date = {2021-03-01},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: J Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: DTIC, Machine Learning, UARC, Virtual Humans
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {J Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {DTIC, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Dialogue, Natural Language, UARC, Virtual Humans
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}