Publications
Search
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn; Liu, Ruying
A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress Journal Article
In: IEEE Trans. Affective Comput., pp. 1–15, 2023, ISSN: 1949-3045, 2371-9850.
@article{awada_new_2023,
title = {A New Perspective on Stress Detection: An Automated Approach for Detecting Eustress and Distress},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll and Ruying Liu},
url = {https://ieeexplore.ieee.org/document/10286408/},
doi = {10.1109/TAFFC.2023.3324910},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-12-07},
journal = {IEEE Trans. Affective Comput.},
pages = {1–15},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Pynadath, David; Wang, Ning
My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes Book Section
In: vol. 14051, pp. 232–248, 2023, (arXiv:2301.09011 [cs]).
@incollection{gurney_my_2023,
title = {My Actions Speak Louder Than Your Words: When User Behavior Predicts Their Beliefs about Agents' Attributes},
author = {Nikolos Gurney and David Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2301.09011},
doi = {10.1007/978-3-031-35894-4_17},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {14051},
pages = {232–248},
abstract = {An implicit expectation of asking users to rate agents, such as an AI decision-aid, is that they will use only relevant information – ask them about an agent's benevolence, and they should consider whether or not it was kind. Behavioral science, however, suggests that people sometimes use irrelevant information. We identify an instance of this phenomenon, where users who experience better outcomes in a human-agent interaction systematically rated the agent as having better abilities, being more benevolent, and exhibiting greater integrity in a post hoc assessment than users who experienced worse outcome – which were the result of their own behavior – with the same agent. Our analyses suggest the need for augmentation of models so that they account for such biased perceptions as well as mechanisms so that agents can detect and even actively work to correct this and similar biases of users.},
note = {arXiv:2301.09011 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions Book Section
In: vol. 13832, pp. 175–197, 2023, (arXiv:2302.01854 [cs]).
@incollection{gurney_comparing_2023,
title = {Comparing Psychometric and Behavioral Predictors of Compliance During Human-AI Interactions},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
url = {http://arxiv.org/abs/2302.01854},
doi = {10.1007/978-3-031-30933-5_12},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-15},
volume = {13832},
pages = {175–197},
abstract = {Optimization of human-AI teams hinges on the AI's ability to tailor its interaction to individual human teammates. A common hypothesis in adaptive AI research is that minor differences in people's predisposition to trust can significantly impact their likelihood of complying with recommendations from the AI. Predisposition to trust is often measured with self-report inventories that are administered before interactions. We benchmark a popular measure of this kind against behavioral predictors of compliance. We find that the inventory is a less effective predictor of compliance than the behavioral measures in datasets taken from three previous research projects. This suggests a general property that individual differences in initial behavior are more predictive than differences in self-reported trust attitudes. This result also shows a potential for easily accessible behavioral measures to provide an AI with more accurate models without the use of (often costly) survey instruments.},
note = {arXiv:2302.01854 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yang, Jing; Xiao, Hanyuan; Teng, Wenbin; Cai, Yunxuan; Zhao, Yajie
Light Sampling Field and BRDF Representation for Physically-based Neural Rendering Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{yang_light_2023,
title = {Light Sampling Field and BRDF Representation for Physically-based Neural Rendering},
author = {Jing Yang and Hanyuan Xiao and Wenbin Teng and Yunxuan Cai and Yajie Zhao},
url = {https://arxiv.org/abs/2304.05472},
doi = {10.48550/ARXIV.2304.05472},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-22},
abstract = {Physically-based rendering (PBR) is key for immersive rendering effects used widely in the industry to showcase detailed realistic scenes from computer graphics assets. A well-known caveat is that producing the same is computationally heavy and relies on complex capture devices. Inspired by the success in quality and efficiency of recent volumetric neural rendering, we want to develop a physically-based neural shader to eliminate device dependency and significantly boost performance. However, no existing lighting and material models in the current neural rendering approaches can accurately represent the comprehensive lighting models and BRDFs properties required by the PBR process. Thus, this paper proposes a novel lighting representation that models direct and indirect light locally through a light sampling strategy in a learned light sampling field. We also propose BRDF models to separately represent surface/subsurface scattering details to enable complex objects such as translucent material (i.e., skin, jade). We then implement our proposed representations with an end-to-end physically-based neural face skin shader, which takes a standard face asset (i.e., geometry, albedo map, and normal map) and an HDRI for illumination as inputs and generates a photo-realistic rendering as output. Extensive experiments showcase the quality and efficiency of our PBR face skin shader, indicating the effectiveness of our proposed lighting and material representations.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Koenig, Sebastian; Lange, Belinda
Clinical virtual reality: The state of the science. Book Section
In: Brown, Gregory G.; Crosson, Bruce; Haaland, Kathleen Y.; King, Tricia Z. (Ed.): APA handbook of neuropsychology, Volume 2: Neuroscience and neuromethods (Vol. 2)., pp. 473–491, American Psychological Association, Washington, 2023, ISBN: 978-1-4338-4001-2 978-1-4338-4002-9.
@incollection{brown_clinical_2023,
title = {Clinical virtual reality: The state of the science.},
author = {Albert Rizzo and Sebastian Koenig and Belinda Lange},
editor = {Gregory G. Brown and Bruce Crosson and Kathleen Y. Haaland and Tricia Z. King},
url = {http://content.apa.org/books/17303-023},
doi = {10.1037/0000308-023},
isbn = {978-1-4338-4001-2 978-1-4338-4002-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {APA handbook of neuropsychology, Volume 2: Neuroscience and neuromethods (Vol. 2).},
pages = {473–491},
publisher = {American Psychological Association},
address = {Washington},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul S.
Rethinking the Physical Symbol Systems Hypothesis Book Section
In: Hammer, Patrick; Alirezaie, Marjan; Strannegård, Claes (Ed.): Artificial General Intelligence, vol. 13921, pp. 207–216, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-33468-9 978-3-031-33469-6, (Series Title: Lecture Notes in Computer Science).
@incollection{hammer_rethinking_2023,
title = {Rethinking the Physical Symbol Systems Hypothesis},
author = {Paul S. Rosenbloom},
editor = {Patrick Hammer and Marjan Alirezaie and Claes Strannegård},
url = {https://link.springer.com/10.1007/978-3-031-33469-6_21},
doi = {10.1007/978-3-031-33469-6_21},
isbn = {978-3-031-33468-9 978-3-031-33469-6},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial General Intelligence},
volume = {13921},
pages = {207–216},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Gurney, Nikolos
The Design of Transparency Communication for Human-Multirobot Teams Book Section
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, vol. 14051, pp. 311–321, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35893-7 978-3-031-35894-4, (Series Title: Lecture Notes in Computer Science).
@incollection{degen_design_2023,
title = {The Design of Transparency Communication for Human-Multirobot Teams},
author = {Ning Wang and David V. Pynadath and Nikolos Gurney},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/10.1007/978-3-031-35894-4_23},
doi = {10.1007/978-3-031-35894-4_23},
isbn = {978-3-031-35893-7 978-3-031-35894-4},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
booktitle = {Artificial Intelligence in HCI},
volume = {14051},
pages = {311–321},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wu, Haochen; Sequeira, Pedro; Pynadath, David V.
Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{wu_multiagent_2023,
title = {Multiagent Inverse Reinforcement Learning via Theory of Mind Reasoning},
author = {Haochen Wu and Pedro Sequeira and David V. Pynadath},
url = {https://arxiv.org/abs/2302.10238},
doi = {10.48550/ARXIV.2302.10238},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {We approach the problem of understanding how people interact with each other in collaborative settings, especially when individuals know little about their teammates, via Multiagent Inverse Reinforcement Learning (MIRL), where the goal is to infer the reward functions guiding the behavior of each individual given trajectories of a team's behavior during some task. Unlike current MIRL approaches, we do not assume that team members know each other's goals a priori; rather, that they collaborate by adapting to the goals of others perceived by observing their behavior, all while jointly performing a task. To address this problem, we propose a novel approach to MIRL via Theory of Mind (MIRL-ToM). For each agent, we first use ToM reasoning to estimate a posterior distribution over baseline reward profiles given their demonstrated behavior. We then perform MIRL via decentralized equilibrium by employing single-agent Maximum Entropy IRL to infer a reward function for each agent, where we simulate the behavior of other teammates according to the time-varying distribution over profiles. We evaluate our approach in a simulated 2-player search-and-rescue operation where the goal of the agents, playing different roles, is to search for and evacuate victims in the environment. Our results show that the choice of baseline profiles is paramount to the recovery of the ground-truth rewards, and that MIRL-ToM is able to recover the rewards used by agents interacting both with known and unknown teammates.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Yu, Zifan; Chen, Meida; Zhang, Zhikang; You, Suya; Ren, Fengbo
TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation Journal Article
In: 2023, (Publisher: arXiv Version Number: 2).
@article{yu_transupr_2023,
title = {TransUPR: A Transformer-based Uncertain Point Refiner for LiDAR Point Cloud Semantic Segmentation},
author = {Zifan Yu and Meida Chen and Zhikang Zhang and Suya You and Fengbo Ren},
url = {https://arxiv.org/abs/2302.08594},
doi = {10.48550/ARXIV.2302.08594},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-24},
abstract = {In this work, we target the problem of uncertain points refinement for image-based LiDAR point cloud semantic segmentation (LiDAR PCSS). This problem mainly results from the boundary-blurring problem of convolution neural networks (CNNs) and quantitation loss of spherical projection, which are often hard to avoid for common image-based LiDAR PCSS approaches. We propose a plug-and-play transformer-based uncertain point refiner (TransUPR) to address the problem. Through local feature aggregation, uncertain point localization, and self-attention-based transformer design, TransUPR, integrated into an existing range image-based LiDAR PCSS approach (e.g., CENet), achieves the state-of-the-art performance (68.2% mIoU) on Semantic-KITTI benchmark, which provides a performance improvement of 0.6% on the mIoU.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2022
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Li, Tianye; Chen, Weikai; Li, Hao
A General Differentiable Mesh Renderer for Image-Based 3D Reasoning Journal Article
In: IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 44, no. 1, pp. 50–62, 2022, ISSN: 1939-3539, (Conference Name: IEEE Transactions on Pattern Analysis and Machine Intelligence).
Abstract | Links | BibTeX | Tags: VGL
@article{liu_general_2022,
title = {A General Differentiable Mesh Renderer for Image-Based 3D Reasoning},
author = {Shichen Liu and Tianye Li and Weikai Chen and Hao Li},
doi = {10.1109/TPAMI.2020.3007759},
issn = {1939-3539},
year = {2022},
date = {2022-01-01},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = {44},
number = {1},
pages = {50–62},
abstract = {Rendering bridges the gap between 2D vision and 3D scenes by simulating the physical process of image formation. By inverting such renderer, one can think of a learning approach to infer 3D information from 2D images. However, standard graphics renderers involve a fundamental step called rasterization, which prevents rendering to be differentiable. Unlike the state-of-the-art differentiable renderers (Kato et al. 2018 and Loper 2018), which only approximate the rendering gradient in the backpropagation, we propose a natually differentiable rendering framework that is able to (1) directly render colorized mesh using differentiable functions and (2) back-propagate efficient supervisions to mesh vertices and their attributes from various forms of image representations. The key to our framework is a novel formulation that views rendering as an aggregation function that fuses the probabilistic contributions of all mesh triangles with respect to the rendered pixels. Such formulation enables our framework to flow gradients to the occluded and distant vertices, which cannot be achieved by the previous state-of-the-arts. We show that by using the proposed renderer, one can achieve significant improvement in 3D unsupervised single-view reconstruction both qualitatively and quantitatively. Experiments also demonstrate that our approach can handle the challenging tasks in image-based shape fitting, which remain nontrivial to existing differentiable renders.},
note = {Conference Name: IEEE Transactions on Pattern Analysis and Machine Intelligence},
keywords = {VGL},
pubstate = {published},
tppubtype = {article}
}
Gurney, Nikolos; Marsella, Stacy; Ustun, Volkan; Pynadath, David V.
Operationalizing Theories of Theory of Mind: A Survey Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 3–20, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Cognitive Architecture, Social Simulation, UARC
@incollection{gurney_operationalizing_2022,
title = {Operationalizing Theories of Theory of Mind: A Survey},
author = {Nikolos Gurney and Stacy Marsella and Volkan Ustun and David V. Pynadath},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_1},
doi = {10.1007/978-3-031-21671-8_1},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {3–20},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Cognitive Architecture, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Yunzhe; Gurney, Nikolos; Zhou, Jincheng; Pynadath, David V.; Ustun, Volkan
Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent Book Section
In: Gurney, Nikolos; Sukthankar, Gita (Ed.): Computational Theory of Mind for Human-Machine Teams, vol. 13775, pp. 220–228, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-21670-1 978-3-031-21671-8, (Series Title: Lecture Notes in Computer Science).
Links | BibTeX | Tags: Cognitive Architecture, Social Simulation, UARC
@incollection{gurney_route_2022,
title = {Route Optimization in Service of a Search and Rescue Artificial Social Intelligence Agent},
author = {Yunzhe Wang and Nikolos Gurney and Jincheng Zhou and David V. Pynadath and Volkan Ustun},
editor = {Nikolos Gurney and Gita Sukthankar},
url = {https://link.springer.com/10.1007/978-3-031-21671-8_14},
doi = {10.1007/978-3-031-21671-8_14},
isbn = {978-3-031-21670-1 978-3-031-21671-8},
year = {2022},
date = {2022-01-01},
urldate = {2023-02-10},
booktitle = {Computational Theory of Mind for Human-Machine Teams},
volume = {13775},
pages = {220–228},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {Cognitive Architecture, Social Simulation, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Marge, Matthew; Espy-Wilson, Carol; Ward, Nigel G.; Alwan, Abeer; Artzi, Yoav; Bansal, Mohit; Blankenship, Gil; Chai, Joyce; Daumé, Hal; Dey, Debadeepta; Harper, Mary; Howard, Thomas; Kennington, Casey; Kruijff-Korbayová, Ivana; Manocha, Dinesh; Matuszek, Cynthia; Mead, Ross; Mooney, Raymond; Moore, Roger K.; Ostendorf, Mari; Pon-Barry, Heather; Rudnicky, Alexander I.; Scheutz, Matthias; Amant, Robert St.; Sun, Tong; Tellex, Stefanie; Traum, David; Yu, Zhou
Spoken language interaction with robots: Recommendations for future research Journal Article
In: Computer Speech & Language, vol. 71, pp. 101255, 2022, ISSN: 08852308.
Links | BibTeX | Tags: ARL, Dialogue
@article{marge_spoken_2022,
title = {Spoken language interaction with robots: Recommendations for future research},
author = {Matthew Marge and Carol Espy-Wilson and Nigel G. Ward and Abeer Alwan and Yoav Artzi and Mohit Bansal and Gil Blankenship and Joyce Chai and Hal Daumé and Debadeepta Dey and Mary Harper and Thomas Howard and Casey Kennington and Ivana Kruijff-Korbayová and Dinesh Manocha and Cynthia Matuszek and Ross Mead and Raymond Mooney and Roger K. Moore and Mari Ostendorf and Heather Pon-Barry and Alexander I. Rudnicky and Matthias Scheutz and Robert St. Amant and Tong Sun and Stefanie Tellex and David Traum and Zhou Yu},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0885230821000620},
doi = {10.1016/j.csl.2021.101255},
issn = {08852308},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-23},
journal = {Computer Speech & Language},
volume = {71},
pages = {101255},
keywords = {ARL, Dialogue},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Baarslag, Tim; Kaisers, Michael; Gerding, Enrico H.; Jonker, Catholijn M.; Gratch, Jonathan
In: Karagözoğlu, Emin; Hyndman, Kyle B. (Ed.): Bargaining, pp. 387–406, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-76665-8 978-3-030-76666-5.
Links | BibTeX | Tags: Virtual Humans
@incollection{baarslag_self-sufficient_2022,
title = {Self-sufficient, Self-directed, and Interdependent Negotiation Systems: A Roadmap Toward Autonomous Negotiation Agents},
author = {Tim Baarslag and Michael Kaisers and Enrico H. Gerding and Catholijn M. Jonker and Jonathan Gratch},
editor = {Emin Karagözoğlu and Kyle B. Hyndman},
url = {https://link.springer.com/10.1007/978-3-030-76666-5_18},
doi = {10.1007/978-3-030-76666-5_18},
isbn = {978-3-030-76665-8 978-3-030-76666-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-27},
booktitle = {Bargaining},
pages = {387–406},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Hou, Yu; Chen, Meida; Volk, Rebekka; Soibelman, Lucio
In: Journal of Building Engineering, vol. 45, pp. 103380, 2022, ISSN: 23527102.
Links | BibTeX | Tags: Graphics
@article{hou_investigation_2022,
title = {Investigation on performance of RGB point cloud and thermal information data fusion for 3D building thermal map modeling using aerial images under different experimental conditions},
author = {Yu Hou and Meida Chen and Rebekka Volk and Lucio Soibelman},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352710221012389},
doi = {10.1016/j.jobe.2021.103380},
issn = {23527102},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
journal = {Journal of Building Engineering},
volume = {45},
pages = {103380},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Statistical Methods for Annotation Analysis Book
Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03753-5 978-3-031-03763-4.
Links | BibTeX | Tags: AI, Natural Language
@book{paun_statistical_2022,
title = {Statistical Methods for Annotation Analysis},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://link.springer.com/10.1007/978-3-031-03763-4},
doi = {10.1007/978-3-031-03763-4},
isbn = {978-3-031-03753-5 978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {AI, Natural Language},
pubstate = {published},
tppubtype = {book}
}
Aster, Hans-Christoph; Romanos, Marcel; Walitza, Susanne; Gerlach, Manfred; Mühlberger, Andreas; Rizzo, Albert; Andreatta, Marta; Hasenauer, Natalie; Hartrampf, Philipp E.; Nerlich, Kai; Reiners, Christoph; Lorenz, Reinhard; Buck, Andreas K.; Deserno, Lorenz
In: Frontiers in Psychiatry, vol. 13, 2022, ISSN: 1664-0640.
Abstract | Links | BibTeX | Tags: MedVR
@article{aster_responsivity_2022,
title = {Responsivity of the Striatal Dopamine System to Methylphenidate—A Within-Subject I-123-β-CIT-SPECT Study in Male Children and Adolescents With Attention-Deficit/Hyperactivity Disorder},
author = {Hans-Christoph Aster and Marcel Romanos and Susanne Walitza and Manfred Gerlach and Andreas Mühlberger and Albert Rizzo and Marta Andreatta and Natalie Hasenauer and Philipp E. Hartrampf and Kai Nerlich and Christoph Reiners and Reinhard Lorenz and Andreas K. Buck and Lorenz Deserno},
url = {https://www.frontiersin.org/articles/10.3389/fpsyt.2022.804730},
issn = {1664-0640},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Psychiatry},
volume = {13},
abstract = {Background:Methylphenidate (MPH) is the first-line pharmacological treatment of attention-deficit/hyperactivity disorder (ADHD). MPH binds to the dopamine (DA) transporter (DAT), which has high density in the striatum. Assessments of the striatal dopamine transporter by single positron emission computed tomography (SPECT) in childhood and adolescent patients are rare but can provide insight on how the effects of MPH affect DAT availability. The aim of our within-subject study was to investigate the effect of MPH on DAT availability and how responsivity to MPH in DAT availability is linked to clinical symptoms and cognitive functioning.MethodsThirteen adolescent male patients (9–16 years) with a diagnosis of ADHD according to the DSM-IV and long-term stimulant medication (for at least 6 months) with MPH were assessed twice within 7 days using SPECT after application of I-123-β-CIT to examine DAT binding potential (DAT BP). SPECT measures took place in an on- and off-MPH status balanced for order across participants. A virtual reality continuous performance test was performed at each time point. Further clinical symptoms were assessed for baseline off-MPH.ResultsOn-MPH status was associated with a highly significant change (−29.9%) of striatal DAT BP as compared to off-MPH (t = −4.12},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163–167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Stokes, Jared D.; Rizzo, Albert; Geng, Joy J.; Schweitzer, Julie B.
Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking Journal Article
In: Frontiers in Virtual Reality, vol. 3, 2022, ISSN: 2673-4192.
Abstract | Links | BibTeX | Tags: MedVR, VR
@article{stokes_measuring_2022,
title = {Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking},
author = {Jared D. Stokes and Albert Rizzo and Joy J. Geng and Julie B. Schweitzer},
url = {https://www.frontiersin.org/articles/10.3389/frvir.2022.855895},
issn = {2673-4192},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Virtual Reality},
volume = {3},
abstract = {Objective: Distractions inordinately impair attention in children with Attention-Deficit Hyperactivity Disorder (ADHD) but examining this behavior under real-life conditions poses a challenge for researchers and clinicians. Virtual reality (VR) technologies may mitigate the limitations of traditional laboratory methods by providing a more ecologically relevant experience. The use of eye-tracking measures to assess attentional functioning in a VR context in ADHD is novel. In this proof of principle project, we evaluate the temporal dynamics of distraction via eye-tracking measures in a VR classroom setting with 20 children diagnosed with ADHD between 8 and 12 years of age.Method: We recorded continuous eye movements while participants performed math, Stroop, and continuous performance test (CPT) tasks with a series of “real-world” classroom distractors presented. We analyzed the impact of the distractors on rates of on-task performance and on-task, eye-gaze (i.e., looking at a classroom whiteboard) versus off-task eye-gaze (i.e., looking away from the whiteboard).Results: We found that while children did not always look at distractors themselves for long periods of time, the presence of a distractor disrupted on-task gaze at task-relevant whiteboard stimuli and lowered rates of task performance. This suggests that children with attention deficits may have a hard time returning to tasks once those tasks are interrupted, even if the distractor itself does not hold attention. Eye-tracking measures within the VR context can reveal rich information about attentional disruption.Conclusions: Leveraging virtual reality technology in combination with eye-tracking measures is well-suited to advance the understanding of mechanisms underlying attentional impairment in naturalistic settings. Assessment within these immersive and well-controlled simulated environments provides new options for increasing our understanding of distractibility and its potential impact on the development of interventions for children with ADHD.},
keywords = {MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: 2022.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, MR, VR
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {DTIC, MedVR, MR, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt; Nye, Benjamin; Sinatra, Gale; Swartout, William; Sjӧberg, Molly; Porter, Molly; Nelson, David; Kennedy, Alana; Herrick, Imogen; Weeks, Danaan DeNeve; Lindsey, Emily
Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits Journal Article
In: Palaeontol Electron, 2022, ISSN: 19353952, 10948074.
Links | BibTeX | Tags: AR, MxR, VR
@article{davis_designing_2022,
title = {Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits},
author = {Matt Davis and Benjamin Nye and Gale Sinatra and William Swartout and Molly Sjӧberg and Molly Porter and David Nelson and Alana Kennedy and Imogen Herrick and Danaan DeNeve Weeks and Emily Lindsey},
url = {https://palaeo-electronica.org/content/2022/3524-la-brea-tar-pits-paleoart},
doi = {10.26879/1191},
issn = {19353952, 10948074},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-15},
journal = {Palaeontol Electron},
keywords = {AR, MxR, VR},
pubstate = {published},
tppubtype = {article}
}
Zhou, Jincheng; Ustun, Volkan
PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture Book Section
In: Goertzel, Ben; Iklé, Matthew; Potapov, Alexey (Ed.): Artificial General Intelligence, vol. 13154, pp. 355–366, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93757-7 978-3-030-93758-4.
Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, UARC
@incollection{zhou_pysigma_2022,
title = {PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture},
author = {Jincheng Zhou and Volkan Ustun},
editor = {Ben Goertzel and Matthew Iklé and Alexey Potapov},
url = {https://link.springer.com/10.1007/978-3-030-93758-4_36},
doi = {10.1007/978-3-030-93758-4_36},
isbn = {978-3-030-93757-7 978-3-030-93758-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-21},
booktitle = {Artificial General Intelligence},
volume = {13154},
pages = {355–366},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {CogArch, Cognitive Architecture, DTIC, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2021
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ionescu, Alina; Daele, Tom Van; Rizzo, Albert; Blair, Carolyn; Best, Paul
360° Videos for Immersive Mental Health Interventions: a Systematic Review Journal Article
In: J. technol. behav. sci., vol. 6, no. 4, pp. 631–651, 2021, ISSN: 2366-5963.
Abstract | Links | BibTeX | Tags: MedVR, VR
@article{ionescu_360_2021,
title = {360° Videos for Immersive Mental Health Interventions: a Systematic Review},
author = {Alina Ionescu and Tom Van Daele and Albert Rizzo and Carolyn Blair and Paul Best},
url = {https://doi.org/10.1007/s41347-021-00221-7},
doi = {10.1007/s41347-021-00221-7},
issn = {2366-5963},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-13},
journal = {J. technol. behav. sci.},
volume = {6},
number = {4},
pages = {631–651},
abstract = {Virtual reality is increasingly recognized as a powerful method for clinical interventions in the mental health field, but has yet to achieve mainstream adoption in routine mental healthcare settings. A similar, yet slightly different technology, immersive 360° videos might have the potential to cover this gap, by requiring both lower costs and less technical skills to construct and operate such virtual environments. This systematic review therefore aims to identify, evaluate, and summarize mental health interventions using immersive 360° videos to support an understanding of their implementation in daily clinical practice. The quality of the 14 selected studies was evaluated using a critical appraisal tool, addressing populations with clinical levels of psychopathological symptoms, somatic conditions associated with psychological implications, and other at-risk groups. Immersive 360° videos successfully increased users’ feelings of presence, given their realistic features, and therefore yielded positive outcomes in clinical interventions where presence is considered as an essential precondition. Because the technical skills required to create immersive 360° video footage are fairly limited, most of the interventions using this approach have been created by mental health researchers or clinicians themselves. Immersive 360° videos are still in an early phase of implementation as a tool for clinical interventions for mental health, resulting in high heterogeneity in focus, procedures, and research designs. An important next step for making use of this technology may therefore involve the creation of standardized procedures, as a means to increase the quality of research and evidence-based interventions.},
keywords = {MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 2021.
Abstract | Links | BibTeX | Tags: VHTL
@article{hartholt_introducing_2021,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
year = {2021},
date = {2021-11-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {11},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {VHTL},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas B.; Rizzo, Albert S.; Soleymani, Mohammed
In: Technology, Mind, and Behavior, 2021.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{talbot_advances_2021,
title = {Advances in Affective Computing for Psychological Applications: From the Fundamentals to the Future of Emotional Cognizant Artificial Intelligence Entities},
author = {Thomas B. Talbot and Albert S. Rizzo and Mohammed Soleymani},
url = {https://tmb.apaopen.org/pub/zm0la9di/release/1},
doi = {10.1037/tms0000011},
year = {2021},
date = {2021-11-01},
urldate = {2023-03-31},
journal = {Technology, Mind, and Behavior},
abstract = {Keywords: Affective Computing, Emotion Perception, Virtual Humans, Conversational Simulations},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic Journal Article
In: ASME Journal of Engineering for Sustainable Buildings and Cities, vol. 2, no. 4, pp. 041001, 2021, ISSN: 2642-6641, 2642-6625.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_associations_2021,
title = {Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://asmedigitalcollection.asme.org/sustainablebuildings/article/2/4/041001/1122847/Associations-Among-Home-Indoor-Environmental},
doi = {10.1115/1.4052822},
issn = {2642-6641, 2642-6625},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-26},
journal = {ASME Journal of Engineering for Sustainable Buildings and Cities},
volume = {2},
number = {4},
pages = {041001},
abstract = {Abstract The outbreak of SARS-CoV-2 virus forced office workers to conduct their daily work activities from home over an extended period. Given this unique situation, an opportunity emerged to study the satisfaction of office workers with indoor environmental quality (IEQ) factors of their houses where work activities took place and associate these factors with mental and physical health. We designed and administered a questionnaire that was open for 45 days during the COVID-19 pandemic and received valid data from 988 respondents. The results show that low satisfaction with natural lighting, glare, and humidity predicted eye-related symptoms, while low satisfaction with noise was a strong predictor of fatigue or tiredness, headaches or migraines, anxiety, and depression or sadness. Nose- and throat-related symptoms and skin-related symptoms were only uniquely predicted by low satisfaction with humidity. Low satisfaction with glare uniquely predicted an increase in musculoskeletal discomfort. Symptoms related to mental stress, rumination, or worry were predicted by low satisfaction with air quality and noise. Finally, low satisfaction with noise and indoor temperature predicted the prevalence of symptoms related to trouble concentrating, maintaining attention, or focus. Workers with higher income were more satisfied with humidity, air quality, and indoor temperature and had better overall mental health. Older individuals had increased satisfaction with natural lighting, humidity, air quality, noise, and indoor temperature. Findings from this study can inform future design practices that focus on hybrid home-work environments by highlighting the impact of IEQ factors on occupant well-being.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
We've Entered a New Era of Streaming Health Care. Now What? Journal Article
In: IEEE Spectrum, 2021.
Abstract | Links | BibTeX | Tags: CBC
@article{noauthor_weve_2021,
title = {We've Entered a New Era of Streaming Health Care. Now What?},
url = {https://spectrum.ieee.org/digital-health},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Spectrum},
abstract = {COVID-19 forced the transition to digital medicine, but there's much still to do.},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Schuller, Bjorn W.; Picard, Rosalind; Andre, Elisabeth; Gratch, Jonathan; Tao, Jianhua
Intelligent Signal Processing for Affective Computing [From the Guest Editors] Journal Article
In: IEEE Signal Process. Mag., vol. 38, no. 6, pp. 9–11, 2021, ISSN: 1053-5888, 1558-0792.
Links | BibTeX | Tags: Emotions, Virtual Humans
@article{schuller_intelligent_2021,
title = {Intelligent Signal Processing for Affective Computing [From the Guest Editors]},
author = {Bjorn W. Schuller and Rosalind Picard and Elisabeth Andre and Jonathan Gratch and Jianhua Tao},
url = {https://ieeexplore.ieee.org/document/9591500/},
doi = {10.1109/MSP.2021.3096415},
issn = {1053-5888, 1558-0792},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Signal Process. Mag.},
volume = {38},
number = {6},
pages = {9–11},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, VHTL, VR
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {DTIC, MedVR, VHTL, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt
Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning Proceedings Article
In: GSA, 2021.
Links | BibTeX | Tags: AR, Learning Sciences, UARC
@inproceedings{davis_augment_2021,
title = {Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning},
author = {Matt Davis},
url = {https://gsa.confex.com/gsa/2021AM/webprogram/Paper371425.html},
year = {2021},
date = {2021-10-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {AR, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790–14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation Journal Article
In: Advanced Engineering Informatics, vol. 50, pp. 101431, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, VR
@article{adami_effectiveness_2021,
title = {Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S147403462100183X},
doi = {10.1016/j.aei.2021.101431},
issn = {14740346},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101431},
keywords = {DTIC, Learning Sciences, UARC, VR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Lucas, Gale M.
Emotionally resonant media Book Section
In: Routledge International Handbook of Emotions and Media, pp. 285–302, Routledge, London, 2021, ISBN: 978-0-429-46575-8.
Links | BibTeX | Tags: Emotions
@incollection{gratch_emotionally_2021,
title = {Emotionally resonant media},
author = {Jonathan Gratch and Gale M. Lucas},
url = {https://www.taylorfrancis.com/books/9780429465758/chapters/10.4324/9780429465758-18},
doi = {10.4324/9780429465758-18},
isbn = {978-0-429-46575-8},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Routledge International Handbook of Emotions and Media},
pages = {285–302},
publisher = {Routledge},
address = {London},
edition = {2},
keywords = {Emotions},
pubstate = {published},
tppubtype = {incollection}
}
Hou, Yu; Chen, Meida; Volk, Rebekka; Soibelman, Lucio
An Approach to Semantically Segmenting Building Components and Outdoor Scenes Based on Multichannel Aerial Imagery Datasets Journal Article
In: Remote Sensing, vol. 13, no. 21, pp. 4357, 2021, ISSN: 2072-4292.
Abstract | Links | BibTeX | Tags:
@article{hou_approach_2021,
title = {An Approach to Semantically Segmenting Building Components and Outdoor Scenes Based on Multichannel Aerial Imagery Datasets},
author = {Yu Hou and Meida Chen and Rebekka Volk and Lucio Soibelman},
url = {https://www.mdpi.com/2072-4292/13/21/4357},
doi = {10.3390/rs13214357},
issn = {2072-4292},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
journal = {Remote Sensing},
volume = {13},
number = {21},
pages = {4357},
abstract = {As-is building modeling plays an important role in energy audits and retrofits. However, in order to understand the source(s) of energy loss, researchers must know the semantic information of the buildings and outdoor scenes. Thermal information can potentially be used to distinguish objects that have similar surface colors but are composed of different materials. To utilize both the red–green–blue (RGB) color model and thermal information for the semantic segmentation of buildings and outdoor scenes, we deployed and adapted various pioneering deep convolutional neural network (DCNN) tools that combine RGB information with thermal information to improve the semantic and instance segmentation processes. When both types of information are available, the resulting DCNN models allow us to achieve better segmentation performance. By deploying three case studies, we experimented with our proposed DCNN framework, deploying datasets of building components and outdoor scenes, and testing the models to determine whether the segmentation performance had improved or not. In our observation, the fusion of RGB and thermal information can help the segmentation task in specific cases, but it might also make the neural networks hard to train or deteriorate their prediction performance in some cases. Additionally, different algorithms perform differently in semantic and instance segmentation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804–3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hernandez, Stephanie; Artstein, Ron
Annotating low-confidence questions improves classifier performance Journal Article
In: Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, 2021.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@article{hernandez_annotating_2021,
title = {Annotating low-confidence questions improves classifier performance},
author = {Stephanie Hernandez and Ron Artstein},
url = {https://par.nsf.gov/biblio/10313591-annotating-low-confidence-questions-improves-classifier-performance},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
journal = {Proceedings of the 25th Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
abstract = {This paper compares methods to select data for annotation in order to improve a classifier used in a question-answering dialogue system. With a classifier trained on 1,500 questions, adding 300 training questions on which the classifier is least confident results in consistently improved performance, whereas adding 300 arbitrarily selected training questions does not yield consistent improvement, and sometimes even degrades performance. The paper uses a new method for comparative evaluation of classifiers for dialogue, which scores each classifier based on the number of appropriate responses retrieved.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {article}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.)
1, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Dialogue, Virtual Humans
@book{lugrin_handbook_2021,
title = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 1: Methods, Behavior, Cognition},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/book/10.1145/3477322},
doi = {10.1145/3477322},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Dialogue, Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Proceedings Article
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Abrams, Mitchell; Baker, Anthony L.; Hudson, Taylor; Lukin, Stephanie; Traum, David; Voss, Clare
Context is key: Annotating situated dialogue relations in multi-floor dialogue Proceedings Article
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_context_2021,
title = {Context is key: Annotating situated dialogue relations in multi-floor dialogue},
author = {Claire Bonial and Mitchell Abrams and Anthony L. Baker and Taylor Hudson and Stephanie Lukin and David Traum and Clare Voss},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-3006/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148–155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghasem, Wesley; Valenzuela, Joshua; Saxon, Leslie A.
Player Tracking Technology and Data for Injury Prevention in the National Football League Journal Article
In: Curr Sports Med Rep, vol. 20, no. 9, pp. 436–439, 2021, ISSN: 1537-8918.
@article{ghasem_player_2021,
title = {Player Tracking Technology and Data for Injury Prevention in the National Football League},
author = {Wesley Ghasem and Joshua Valenzuela and Leslie A. Saxon},
url = {https://journals.lww.com/10.1249/JSR.0000000000000873},
doi = {10.1249/JSR.0000000000000873},
issn = {1537-8918},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
journal = {Curr Sports Med Rep},
volume = {20},
number = {9},
pages = {436–439},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Lucas, Gale
Rapport Between Humans and Socially Interactive Agents Book Section
In: Lugrin, Birgit; Pelachaud, Catherine; Traum, David (Ed.): The Handbook on Socially Interactive Agents, pp. 433–462, ACM, New York, NY, USA, 2021, ISBN: 978-1-4503-8720-0.
Links | BibTeX | Tags: Virtual Humans
@incollection{gratch_rapport_2021,
title = {Rapport Between Humans and Socially Interactive Agents},
author = {Jonathan Gratch and Gale Lucas},
editor = {Birgit Lugrin and Catherine Pelachaud and David Traum},
url = {https://dl.acm.org/doi/10.1145/3477322.3477335},
doi = {10.1145/3477322.3477335},
isbn = {978-1-4503-8720-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {The Handbook on Socially Interactive Agents},
pages = {433–462},
publisher = {ACM},
address = {New York, NY, USA},
edition = {1},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195–202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90–97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139–144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Visualization of social emotional appraisal process of an agent Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1–2, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-021-3.
Links | BibTeX | Tags: Emotions, Virtual Humans
@inproceedings{sato_visualization_2021,
title = {Visualization of social emotional appraisal process of an agent},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9666329/},
doi = {10.1109/ACIIW52867.2021.9666329},
isbn = {978-1-66540-021-3},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
pages = {1–2},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}