Publications
Search
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Inproceedings
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1--6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Inproceedings
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Larry; Kolacz, Jacek; Rizzo, Albert; Scherer, Stefan; Soleymani, Mohammad
Speech Behavioral Markers Align on Symptom Factors in Psychological Distress Inproceedings
In: 2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, 2022, (ISSN: 2156-8111).
@inproceedings{zhang_speech_2022,
title = {Speech Behavioral Markers Align on Symptom Factors in Psychological Distress},
author = {Larry Zhang and Jacek Kolacz and Albert Rizzo and Stefan Scherer and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/abstract/document/9953849},
doi = {10.1109/ACII55700.2022.9953849},
year = {2022},
date = {2022-10-01},
booktitle = {2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
abstract = {Automatic detection of psychological disorders has gained significant attention in recent years due to the rise in their prevalence. However, the majority of studies have overlooked the complexity of disorders in favor of a “present/not present” dichotomy in representing disorders. Recent psychological research challenges favors transdiagnostic approaches, moving beyond general disorder classifications to symptom level analysis, as symptoms are often not exclusive to individual disorder classes. In our study, we investigated the link between speech signals and psychological distress symptoms in a corpus of 333 screening interviews from the Distress Analysis Interview Corpus (DAIC). Given the semi-structured organization of interviews, we aggregated speech utterances from responses to shared questions across interviews. We employed deterministic sample selection in classification to rank salient questions for eliciting symptom-specific behaviors in order to predict symptom presence. Some questions include “Do you find therapy helpful?” and “When was the last time you felt happy?”. The prediction results align closely to the factor structure of psychological distress symptoms, linking speech behaviors primarily to somatic and affective alterations in both depression and PTSD. This lends support for the transdiagnostic validity of speech markers for detecting such symptoms. Surprisingly, we did not find a strong link between speech markers and cognitive or psychomotor alterations. This is surprising, given the complexity of motor and cognitive actions required in speech production. The results of our analysis highlight the importance of aligning affective computing research with psychological research to investigate the use of automatic behavioral sensing to assess psychiatric risk.},
note = {ISSN: 2156-8111},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Traum, David
Towards an Automatic Speech Recognizer for the Choctaw language Inproceedings
In: 1st Workshop on Speech for Social Good (S4SG), pp. 6–9, ISCA, 2022.
@inproceedings{brixey_towards_2022,
title = {Towards an Automatic Speech Recognizer for the Choctaw language},
author = {Jacqueline Brixey and David Traum},
url = {https://www.isca-speech.org/archive/s4sg_2022/brixey22_s4sg.html},
doi = {10.21437/S4SG.2022-2},
year = {2022},
date = {2022-09-01},
urldate = {2023-03-31},
booktitle = {1st Workshop on Speech for Social Good (S4SG)},
pages = {6--9},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus Inproceedings
In: 2022.
@inproceedings{georgila_comparing_2022,
title = {Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus},
author = {Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z22/Z22-3011/},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
abstract = {Wecompare various state-of-the-art regression methods for predicting user ratings of their interaction with a dialogue system using a richly annotated corpus. We vary the size of the training data and, in particular for kernel-based methods, we vary the type of kernel used. Furthermore, we experiment with various domainindependent features, including feature combinations that do not rely on complex annotations. We present detailed results in terms of root mean square error, and Pearson’s r and Spearman’s ρ correlations. Our results show that in many cases Gaussian Process Regression leads to modest but statistically significant gains compared to Support Vector Regression (a strong baseline), and that the type of kernel used matters. The gains are even larger when compared to linear regression. The larger the training data set the higher the gains but for some cases more data may result in over-fitting. Finally, some feature combinations work better than others but overall the best results are obtained when all features are used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Inproceedings
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749--756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kuang, Zhengfei; Li, Jiaman; He, Mingming; Wang, Tong; Zhao, Yajie
DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points Inproceedings
In: pp. 542–549, IEEE Computer Society, 2022, ISBN: 978-1-66549-062-7.
@inproceedings{kuang_densegap_2022,
title = {DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points},
author = {Zhengfei Kuang and Jiaman Li and Mingming He and Tong Wang and Yajie Zhao},
url = {https://www.computer.org/csdl/proceedings-article/icpr/2022/09956472/1IHpppIuqOc},
doi = {10.1109/ICPR56361.2022.9956472},
isbn = {978-1-66549-062-7},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
pages = {542--549},
publisher = {IEEE Computer Society},
abstract = {Establishing dense correspondence between two images is a fundamental computer vision problem, which is typically tackled by matching local feature descriptors. However, without global awareness, such local features are often insufficient for disambiguating similar regions. And computing the pairwise feature correlation across images is both computation-expensive and memory-intensive. To make the local features aware of the global context and improve their matching accuracy, we introduce DenseGAP, a new solution for efficient Dense correspondence learning with a Graph-structured neural network conditioned on Anchor Points. Specifically, we first propose a graph structure that utilizes anchor points to provide sparse but reliable prior on inter- and intra-image context and propagates them to all image points via directed edges. We also design a graph-structured network to broadcast multi-level contexts via light-weighted message-passing layers and generate high-resolution feature maps at low memory cost. Finally, based on the predicted feature maps, we introduce a coarse-to-fine framework for accurate correspondence prediction using cycle consistency. Our feature descriptors capture both local and global information, thus enabling a continuous feature field for querying arbitrary points at high resolution. Through comprehensive ablative experiments and evaluations on large-scale indoor and outdoor datasets, we demonstrate that our method advances the state-of-the-art of correspondence learning on most benchmarks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Inproceedings
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993--1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Inproceedings
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240--3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Paetzel-Prüsmann, Maike; Georgila, Kallirroi
Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task Inproceedings
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5768–5777, European Language Resources Association, Marseille, France, 2022.
@inproceedings{karkada_strategy-level_2022,
title = {Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Maike Paetzel-Prüsmann and Kallirroi Georgila},
url = {https://aclanthology.org/2022.lrec-1.620},
year = {2022},
date = {2022-06-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5768--5777},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we study entrainment of users playing a creative reference resolution game with an autonomous dialogue system. The language understanding module in our dialogue system leverages annotated human-wizard conversational data, openly available knowledge graphs, and crowd-augmented data. Unlike previous entrainment work, our dialogue system does not attempt to make the human conversation partner adopt lexical items in their dialogue, but rather to adapt their descriptive strategy to one that is simpler to parse for our natural language understanding unit. By deploying this dialogue system through a crowd-sourced study, we show that users indeed entrain on a “strategy-level” without the change of strategy impinging on their creativity. Our work thus presents a promising future research direction for developing dialogue management systems that can strategically influence people's descriptive strategy to ease the system's language understanding in creative tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tur, Ada; Traum, David
Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis Inproceedings
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5813–5820, European Language Resources Association, Marseille, France, 2022.
@inproceedings{tur_comparing_2022,
title = {Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis},
author = {Ada Tur and David Traum},
url = {https://aclanthology.org/2022.lrec-1.625},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5813--5820},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this paper, we compare two different approaches to language understanding for a human-robot interaction domain in which a human commander gives navigation instructions to a robot. We contrast a relevance-based classifier with a GPT-2 model, using about 2000 input-output examples as training data. With this level of training data, the relevance-based model outperforms the GPT-2 based model 79% to 8%. We also present a taxonomy of types of errors made by each model, indicating that they have somewhat different strengths and weaknesses, so we also examine the potential for a combined model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Jiayi; Chen, Weikai; Liu, Shichen; Zhao, Yajie
Exemplar-based Pattern Synthesis with Implicit Periodic Field Network Inproceedings
In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3698–3707, IEEE, New Orleans, LA, USA, 2022, ISBN: 978-1-66546-946-3.
@inproceedings{chen_exemplar-based_2022,
title = {Exemplar-based Pattern Synthesis with Implicit Periodic Field Network},
author = {Haiwei Chen and Jiayi Liu and Weikai Chen and Shichen Liu and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9879904/},
doi = {10.1109/CVPR52688.2022.00369},
isbn = {978-1-66546-946-3},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3698--3707},
publisher = {IEEE},
address = {New Orleans, LA, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tadimeti, Divya; Georgila, Kallirroi; Traum, David
Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain Inproceedings
In: Proceedings of the Language Resources and Evaluation Conference, pp. 6001–6008, European Language Resources Association, Marseille, France, 2022.
@inproceedings{tadimeti_evaluation_2022,
title = {Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain},
author = {Divya Tadimeti and Kallirroi Georgila and David Traum},
url = {https://aclanthology.org/2022.lrec-1.645},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference},
pages = {6001--6008},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We evaluate several publicly available off-the-shelf (commercial and research) automatic speech recognition (ASR) systems on dialogue agent-directed English speech from speakers with General American vs. non-American accents. Our results show that the performance of the ASR systems for non-American accents is considerably worse than for General American accents. Depending on the recognizer, the absolute difference in performance between General American accents and all non-American accents combined can vary approximately from 2% to 12%, with relative differences varying approximately between 16% and 49%. This drop in performance becomes even larger when we consider specific categories of non-American accents indicating a need for more diligent collection of and training on non-native English speaker data in order to narrow this performance gap. There are performance differences across ASR systems, and while the same general pattern holds, with more errors for non-American accents, there are some accents for which the best recognizer is different than in the overall case. We expect these results to be useful for dialogue system designers in developing more robust inclusive dialogue systems, and for ASR providers in taking into account performance requirements for different accents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Inproceedings
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698--4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710--1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2023
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Inproceedings
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags:
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1--6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Inproceedings
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags:
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Zhang, Larry; Kolacz, Jacek; Rizzo, Albert; Scherer, Stefan; Soleymani, Mohammad
Speech Behavioral Markers Align on Symptom Factors in Psychological Distress Inproceedings
In: 2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, 2022, (ISSN: 2156-8111).
Abstract | Links | BibTeX | Tags:
@inproceedings{zhang_speech_2022,
title = {Speech Behavioral Markers Align on Symptom Factors in Psychological Distress},
author = {Larry Zhang and Jacek Kolacz and Albert Rizzo and Stefan Scherer and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/abstract/document/9953849},
doi = {10.1109/ACII55700.2022.9953849},
year = {2022},
date = {2022-10-01},
booktitle = {2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
abstract = {Automatic detection of psychological disorders has gained significant attention in recent years due to the rise in their prevalence. However, the majority of studies have overlooked the complexity of disorders in favor of a “present/not present” dichotomy in representing disorders. Recent psychological research challenges favors transdiagnostic approaches, moving beyond general disorder classifications to symptom level analysis, as symptoms are often not exclusive to individual disorder classes. In our study, we investigated the link between speech signals and psychological distress symptoms in a corpus of 333 screening interviews from the Distress Analysis Interview Corpus (DAIC). Given the semi-structured organization of interviews, we aggregated speech utterances from responses to shared questions across interviews. We employed deterministic sample selection in classification to rank salient questions for eliciting symptom-specific behaviors in order to predict symptom presence. Some questions include “Do you find therapy helpful?” and “When was the last time you felt happy?”. The prediction results align closely to the factor structure of psychological distress symptoms, linking speech behaviors primarily to somatic and affective alterations in both depression and PTSD. This lends support for the transdiagnostic validity of speech markers for detecting such symptoms. Surprisingly, we did not find a strong link between speech markers and cognitive or psychomotor alterations. This is surprising, given the complexity of motor and cognitive actions required in speech production. The results of our analysis highlight the importance of aligning affective computing research with psychological research to investigate the use of automatic behavioral sensing to assess psychiatric risk.},
note = {ISSN: 2156-8111},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Traum, David
Towards an Automatic Speech Recognizer for the Choctaw language Inproceedings
In: 1st Workshop on Speech for Social Good (S4SG), pp. 6–9, ISCA, 2022.
@inproceedings{brixey_towards_2022,
title = {Towards an Automatic Speech Recognizer for the Choctaw language},
author = {Jacqueline Brixey and David Traum},
url = {https://www.isca-speech.org/archive/s4sg_2022/brixey22_s4sg.html},
doi = {10.21437/S4SG.2022-2},
year = {2022},
date = {2022-09-01},
urldate = {2023-03-31},
booktitle = {1st Workshop on Speech for Social Good (S4SG)},
pages = {6--9},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus Inproceedings
In: 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgila_comparing_2022,
title = {Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus},
author = {Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z22/Z22-3011/},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
abstract = {Wecompare various state-of-the-art regression methods for predicting user ratings of their interaction with a dialogue system using a richly annotated corpus. We vary the size of the training data and, in particular for kernel-based methods, we vary the type of kernel used. Furthermore, we experiment with various domainindependent features, including feature combinations that do not rely on complex annotations. We present detailed results in terms of root mean square error, and Pearson’s r and Spearman’s ρ correlations. Our results show that in many cases Gaussian Process Regression leads to modest but statistically significant gains compared to Support Vector Regression (a strong baseline), and that the type of kernel used matters. The gains are even larger when compared to linear regression. The larger the training data set the higher the gains but for some cases more data may result in over-fitting. Finally, some feature combinations work better than others but overall the best results are obtained when all features are used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Inproceedings
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags:
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749--756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kuang, Zhengfei; Li, Jiaman; He, Mingming; Wang, Tong; Zhao, Yajie
DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points Inproceedings
In: pp. 542–549, IEEE Computer Society, 2022, ISBN: 978-1-66549-062-7.
Abstract | Links | BibTeX | Tags: VGL
@inproceedings{kuang_densegap_2022,
title = {DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points},
author = {Zhengfei Kuang and Jiaman Li and Mingming He and Tong Wang and Yajie Zhao},
url = {https://www.computer.org/csdl/proceedings-article/icpr/2022/09956472/1IHpppIuqOc},
doi = {10.1109/ICPR56361.2022.9956472},
isbn = {978-1-66549-062-7},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
pages = {542--549},
publisher = {IEEE Computer Society},
abstract = {Establishing dense correspondence between two images is a fundamental computer vision problem, which is typically tackled by matching local feature descriptors. However, without global awareness, such local features are often insufficient for disambiguating similar regions. And computing the pairwise feature correlation across images is both computation-expensive and memory-intensive. To make the local features aware of the global context and improve their matching accuracy, we introduce DenseGAP, a new solution for efficient Dense correspondence learning with a Graph-structured neural network conditioned on Anchor Points. Specifically, we first propose a graph structure that utilizes anchor points to provide sparse but reliable prior on inter- and intra-image context and propagates them to all image points via directed edges. We also design a graph-structured network to broadcast multi-level contexts via light-weighted message-passing layers and generate high-resolution feature maps at low memory cost. Finally, based on the predicted feature maps, we introduce a coarse-to-fine framework for accurate correspondence prediction using cycle consistency. Our feature descriptors capture both local and global information, thus enabling a continuous feature field for querying arbitrary points at high resolution. Through comprehensive ablative experiments and evaluations on large-scale indoor and outdoor datasets, we demonstrate that our method advances the state-of-the-art of correspondence learning on most benchmarks.},
keywords = {VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Inproceedings
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags:
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993--1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Inproceedings
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240--3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Paetzel-Prüsmann, Maike; Georgila, Kallirroi
Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task Inproceedings
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5768–5777, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{karkada_strategy-level_2022,
title = {Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Maike Paetzel-Prüsmann and Kallirroi Georgila},
url = {https://aclanthology.org/2022.lrec-1.620},
year = {2022},
date = {2022-06-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5768--5777},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we study entrainment of users playing a creative reference resolution game with an autonomous dialogue system. The language understanding module in our dialogue system leverages annotated human-wizard conversational data, openly available knowledge graphs, and crowd-augmented data. Unlike previous entrainment work, our dialogue system does not attempt to make the human conversation partner adopt lexical items in their dialogue, but rather to adapt their descriptive strategy to one that is simpler to parse for our natural language understanding unit. By deploying this dialogue system through a crowd-sourced study, we show that users indeed entrain on a “strategy-level” without the change of strategy impinging on their creativity. Our work thus presents a promising future research direction for developing dialogue management systems that can strategically influence people's descriptive strategy to ease the system's language understanding in creative tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tur, Ada; Traum, David
Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis Inproceedings
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5813–5820, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{tur_comparing_2022,
title = {Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis},
author = {Ada Tur and David Traum},
url = {https://aclanthology.org/2022.lrec-1.625},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5813--5820},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this paper, we compare two different approaches to language understanding for a human-robot interaction domain in which a human commander gives navigation instructions to a robot. We contrast a relevance-based classifier with a GPT-2 model, using about 2000 input-output examples as training data. With this level of training data, the relevance-based model outperforms the GPT-2 based model 79% to 8%. We also present a taxonomy of types of errors made by each model, indicating that they have somewhat different strengths and weaknesses, so we also examine the potential for a combined model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Jiayi; Chen, Weikai; Liu, Shichen; Zhao, Yajie
Exemplar-based Pattern Synthesis with Implicit Periodic Field Network Inproceedings
In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3698–3707, IEEE, New Orleans, LA, USA, 2022, ISBN: 978-1-66546-946-3.
@inproceedings{chen_exemplar-based_2022,
title = {Exemplar-based Pattern Synthesis with Implicit Periodic Field Network},
author = {Haiwei Chen and Jiayi Liu and Weikai Chen and Shichen Liu and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9879904/},
doi = {10.1109/CVPR52688.2022.00369},
isbn = {978-1-66546-946-3},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3698--3707},
publisher = {IEEE},
address = {New Orleans, LA, USA},
keywords = {VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Tadimeti, Divya; Georgila, Kallirroi; Traum, David
Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain Inproceedings
In: Proceedings of the Language Resources and Evaluation Conference, pp. 6001–6008, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags:
@inproceedings{tadimeti_evaluation_2022,
title = {Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain},
author = {Divya Tadimeti and Kallirroi Georgila and David Traum},
url = {https://aclanthology.org/2022.lrec-1.645},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference},
pages = {6001--6008},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We evaluate several publicly available off-the-shelf (commercial and research) automatic speech recognition (ASR) systems on dialogue agent-directed English speech from speakers with General American vs. non-American accents. Our results show that the performance of the ASR systems for non-American accents is considerably worse than for General American accents. Depending on the recognizer, the absolute difference in performance between General American accents and all non-American accents combined can vary approximately from 2% to 12%, with relative differences varying approximately between 16% and 49%. This drop in performance becomes even larger when we consider specific categories of non-American accents indicating a need for more diligent collection of and training on non-native English speaker data in order to narrow this performance gap. There are performance differences across ASR systems, and while the same general pattern holds, with more errors for non-American accents, there are some accents for which the best recognizer is different than in the overall case. We expect these results to be useful for dialogue system designers in developing more robust inclusive dialogue systems, and for ASR providers in taking into account performance requirements for different accents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Inproceedings
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698--4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: Emotions
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710--1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {Emotions},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Inproceedings
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902--1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Weeks, Danaan DeNeve
TAR AR: RESEARCHING HOW AUGMENTED REALITY ACTIVITIES CAN FACILITATE VISITOR LEARNING AT LA BREA TAR PITS Inproceedings
In: GSA, 2022.
@inproceedings{deneve_weeks_tar_2022,
title = {TAR AR: RESEARCHING HOW AUGMENTED REALITY ACTIVITIES CAN FACILITATE VISITOR LEARNING AT LA BREA TAR PITS},
author = {Danaan DeNeve Weeks},
url = {https://gsa.confex.com/gsa/2022CD/webprogram/Paper373373.html},
year = {2022},
date = {2022-03-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Inproceedings
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
Abstract | Links | BibTeX | Tags:
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22--34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Greenwald, Eric; Montgomery, Ryan; Leitner, Maxyn
ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students Inproceedings
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 528–531, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags:
@inproceedings{wang_arin-561_2022,
title = {ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students},
author = {Ning Wang and Eric Greenwald and Ryan Montgomery and Maxyn Leitner},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-031-11647-6_108},
doi = {10.1007/978-3-031-11647-6_108},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {528--531},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Artificial Intelligence (AI) is increasingly vital to our future generations, who will join a workforce that utilizes AI-driven tools and contributes to the advancement of AI. Today’s students will need exposure to AI knowledge at a younger age. Relatively little is currently known about how to most effectively provide AI education to K-12 students. In this paper, we discuss the design and evaluation of an educational game for high-school AI education called ARIN-561. Results from pilot studies indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Inproceedings
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
Abstract | Links | BibTeX | Tags:
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580--590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Inproceedings
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
Abstract | Links | BibTeX | Tags:
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39--45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Inproceedings
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661--674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Inproceedings
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163--167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas Brett; Chinara, Chinmay
In: 2022.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, MR, VR
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {DTIC, MedVR, MR, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Inproceedings
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1--12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Inproceedings
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771--781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Inproceedings
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, VR
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247--250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {DTIC, MedVR, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Inproceedings
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt
Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning Inproceedings
In: GSA, 2021.
@inproceedings{davis_augment_2021,
title = {Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning},
author = {Matt Davis},
url = {https://gsa.confex.com/gsa/2021AM/webprogram/Paper371425.html},
year = {2021},
date = {2021-10-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790--14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Inproceedings
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112--120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Inproceedings
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25--30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839--12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804--3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Inproceedings
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
Abstract | Links | BibTeX | Tags:
@inproceedings{hartholt_introducing_2021,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109--111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Inproceedings
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Abrams, Mitchell; Baker, Anthony L.; Hudson, Taylor; Lukin, Stephanie; Traum, David; Voss, Clare
Context is key: Annotating situated dialogue relations in multi-floor dialogue Inproceedings
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_context_2021,
title = {Context is key: Annotating situated dialogue relations in multi-floor dialogue},
author = {Claire Bonial and Mitchell Abrams and Anthony L. Baker and Taylor Hudson and Stephanie Lukin and David Traum and Clare Voss},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-3006/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148--155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Inproceedings
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1--8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195--202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90--97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Inproceedings
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139--144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}