Publications
Search
Brixey, Jacqueline; Traum, David
Towards an Automatic Speech Recognizer for the Choctaw language Proceedings Article
In: 1st Workshop on Speech for Social Good (S4SG), pp. 6–9, ISCA, 2022.
@inproceedings{brixey_towards_2022,
title = {Towards an Automatic Speech Recognizer for the Choctaw language},
author = {Jacqueline Brixey and David Traum},
url = {https://www.isca-speech.org/archive/s4sg_2022/brixey22_s4sg.html},
doi = {10.21437/S4SG.2022-2},
year = {2022},
date = {2022-09-01},
urldate = {2023-03-31},
booktitle = {1st Workshop on Speech for Social Good (S4SG)},
pages = {6–9},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus Proceedings Article
In: Proceedings of the 26th Workshop on the Semantics and Pragmatics of Dialogue - Full Papers, 2022.
@inproceedings{georgila_comparing_2022,
title = {Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus},
author = {Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z22/Z22-3011/},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 26th Workshop on the Semantics and Pragmatics of Dialogue - Full Papers},
abstract = {Wecompare various state-of-the-art regression methods for predicting user ratings of their interaction with a dialogue system using a richly annotated corpus. We vary the size of the training data and, in particular for kernel-based methods, we vary the type of kernel used. Furthermore, we experiment with various domainindependent features, including feature combinations that do not rely on complex annotations. We present detailed results in terms of root mean square error, and Pearson’s r and Spearman’s ρ correlations. Our results show that in many cases Gaussian Process Regression leads to modest but statistically significant gains compared to Support Vector Regression (a strong baseline), and that the type of kernel used matters. The gains are even larger when compared to linear regression. The larger the training data set the higher the gains but for some cases more data may result in over-fitting. Finally, some feature combinations work better than others but overall the best results are obtained when all features are used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749–756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kuang, Zhengfei; Li, Jiaman; He, Mingming; Wang, Tong; Zhao, Yajie
DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points Proceedings Article
In: pp. 542–549, IEEE Computer Society, 2022, ISBN: 978-1-66549-062-7.
@inproceedings{kuang_densegap_2022,
title = {DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points},
author = {Zhengfei Kuang and Jiaman Li and Mingming He and Tong Wang and Yajie Zhao},
url = {https://www.computer.org/csdl/proceedings-article/icpr/2022/09956472/1IHpppIuqOc},
doi = {10.1109/ICPR56361.2022.9956472},
isbn = {978-1-66549-062-7},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
pages = {542–549},
publisher = {IEEE Computer Society},
abstract = {Establishing dense correspondence between two images is a fundamental computer vision problem, which is typically tackled by matching local feature descriptors. However, without global awareness, such local features are often insufficient for disambiguating similar regions. And computing the pairwise feature correlation across images is both computation-expensive and memory-intensive. To make the local features aware of the global context and improve their matching accuracy, we introduce DenseGAP, a new solution for efficient Dense correspondence learning with a Graph-structured neural network conditioned on Anchor Points. Specifically, we first propose a graph structure that utilizes anchor points to provide sparse but reliable prior on inter- and intra-image context and propagates them to all image points via directed edges. We also design a graph-structured network to broadcast multi-level contexts via light-weighted message-passing layers and generate high-resolution feature maps at low memory cost. Finally, based on the predicted feature maps, we introduce a coarse-to-fine framework for accurate correspondence prediction using cycle consistency. Our feature descriptors capture both local and global information, thus enabling a continuous feature field for querying arbitrary points at high resolution. Through comprehensive ablative experiments and evaluations on large-scale indoor and outdoor datasets, we demonstrate that our method advances the state-of-the-art of correspondence learning on most benchmarks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993–1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Proceedings Article
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240–3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Paetzel-Prüsmann, Maike; Georgila, Kallirroi
Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task Proceedings Article
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5768–5777, European Language Resources Association, Marseille, France, 2022.
@inproceedings{karkada_strategy-level_2022,
title = {Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Maike Paetzel-Prüsmann and Kallirroi Georgila},
url = {https://aclanthology.org/2022.lrec-1.620},
year = {2022},
date = {2022-06-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5768–5777},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we study entrainment of users playing a creative reference resolution game with an autonomous dialogue system. The language understanding module in our dialogue system leverages annotated human-wizard conversational data, openly available knowledge graphs, and crowd-augmented data. Unlike previous entrainment work, our dialogue system does not attempt to make the human conversation partner adopt lexical items in their dialogue, but rather to adapt their descriptive strategy to one that is simpler to parse for our natural language understanding unit. By deploying this dialogue system through a crowd-sourced study, we show that users indeed entrain on a “strategy-level” without the change of strategy impinging on their creativity. Our work thus presents a promising future research direction for developing dialogue management systems that can strategically influence people's descriptive strategy to ease the system's language understanding in creative tasks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tur, Ada; Traum, David
Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis Proceedings Article
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5813–5820, European Language Resources Association, Marseille, France, 2022.
@inproceedings{tur_comparing_2022,
title = {Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis},
author = {Ada Tur and David Traum},
url = {https://aclanthology.org/2022.lrec-1.625},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5813–5820},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this paper, we compare two different approaches to language understanding for a human-robot interaction domain in which a human commander gives navigation instructions to a robot. We contrast a relevance-based classifier with a GPT-2 model, using about 2000 input-output examples as training data. With this level of training data, the relevance-based model outperforms the GPT-2 based model 79% to 8%. We also present a taxonomy of types of errors made by each model, indicating that they have somewhat different strengths and weaknesses, so we also examine the potential for a combined model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Jiayi; Chen, Weikai; Liu, Shichen; Zhao, Yajie
Exemplar-based Pattern Synthesis with Implicit Periodic Field Network Proceedings Article
In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3698–3707, IEEE, New Orleans, LA, USA, 2022, ISBN: 978-1-66546-946-3.
@inproceedings{chen_exemplar-based_2022,
title = {Exemplar-based Pattern Synthesis with Implicit Periodic Field Network},
author = {Haiwei Chen and Jiayi Liu and Weikai Chen and Shichen Liu and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9879904/},
doi = {10.1109/CVPR52688.2022.00369},
isbn = {978-1-66546-946-3},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3698–3707},
publisher = {IEEE},
address = {New Orleans, LA, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tadimeti, Divya; Georgila, Kallirroi; Traum, David
Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference, pp. 6001–6008, European Language Resources Association, Marseille, France, 2022.
@inproceedings{tadimeti_evaluation_2022,
title = {Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain},
author = {Divya Tadimeti and Kallirroi Georgila and David Traum},
url = {https://aclanthology.org/2022.lrec-1.645},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference},
pages = {6001–6008},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We evaluate several publicly available off-the-shelf (commercial and research) automatic speech recognition (ASR) systems on dialogue agent-directed English speech from speakers with General American vs. non-American accents. Our results show that the performance of the ASR systems for non-American accents is considerably worse than for General American accents. Depending on the recognizer, the absolute difference in performance between General American accents and all non-American accents combined can vary approximately from 2% to 12%, with relative differences varying approximately between 16% and 49%. This drop in performance becomes even larger when we consider specific categories of non-American accents indicating a need for more diligent collection of and training on non-native English speaker data in order to narrow this performance gap. There are performance differences across ASR systems, and while the same general pattern holds, with more errors for non-American accents, there are some accents for which the best recognizer is different than in the overall case. We expect these results to be useful for dialogue system designers in developing more robust inclusive dialogue systems, and for ASR providers in taking into account performance requirements for different accents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698–4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710–1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Weeks, Danaan DeNeve; Lindsey, Emily; Davis, Matt; Kennedy, Alana; Nye, Benjamin; Nelson, David; Porter, Molly; Swartout, William; Sinatra, Gale
TAR AR: Researching How Augmented Reality Activities Can Facilitate Visitor Learning at La Brea Tar Pits Proceedings Article
In: GSA, 2022.
@inproceedings{deneve_weeks_tar_2022,
title = {TAR AR: Researching How Augmented Reality Activities Can Facilitate Visitor Learning at La Brea Tar Pits},
author = {Danaan DeNeve Weeks and Emily Lindsey and Matt Davis and Alana Kennedy and Benjamin Nye and David Nelson and Molly Porter and William Swartout and Gale Sinatra},
url = {https://gsa.confex.com/gsa/2022CD/webprogram/Paper373373.html},
year = {2022},
date = {2022-03-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D; Jain, Aditya; Ramirez, Dilan; Core, Mark G; Swartout, William
Designing a Rapid Adaptive Content Registry (RACR) for Adaptive Learning Proceedings Article
In: 2022.
@inproceedings{nye_designing_2022,
title = {Designing a Rapid Adaptive Content Registry (RACR) for Adaptive Learning},
author = {Benjamin D Nye and Aditya Jain and Dilan Ramirez and Mark G Core and William Swartout},
year = {2022},
date = {2022-01-01},
abstract = {Despite meta-analyses showing strong learning gains for adaptive learning, few domain areas are covered by adaptive learning. A key reason for this is a content bottleneck: currently, adaptive systems require highly-trained computer scientists and educational specialists to add new content. To explore this issue, the Rapid Adaptive Content Registry (RACR) project is researching a pipeline of interactive tools designed for content managers with little or no training to incorporate content into an adaptive learning ecosystem. This prototype consists of four components:
1) Adaptive Module Registry for composing a set of learning resources and learning objectives (competencies) in an intuitive content-management UI;
2) Rapid Content Analysis Service, which leverages machine learning to analyze web pages (static or dynamic), PDFs, or short videos to generate metadata tags for competencies, estimated duration, and complexity;
3) Preview and Text Extraction interface to review, test, and manually extract text from resources; and
4) Module Simulator to analyze the ability of the available content to adapt to different simulated student patterns (e.g., struggling learner, learner starting with partial mastery, etc.)
This paper outlines the design principles, machine learning performance, and formative usability testing process for this toolkit. For this research, the performance metrics are authoring time, metadata tag quality, deployment reliability (valid content), and personalized pathways (differentiation between different kinds of learners). A comparison of machine learning models based on BERT-S to generate competency tags is presented, which indicates that a general model (not tag-specific) is reasonable for cold-start labels. Initial testing indicates potential usefulness of such a tool, but frustration with delays and limitations for tagging more complex learning resources (e.g., videos, simulations). Strategies and issues for integrating this tool into an enterprise ecosystem are also discussed, such as how specialized tools should integrate with more traditional content management systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
1) Adaptive Module Registry for composing a set of learning resources and learning objectives (competencies) in an intuitive content-management UI;
2) Rapid Content Analysis Service, which leverages machine learning to analyze web pages (static or dynamic), PDFs, or short videos to generate metadata tags for competencies, estimated duration, and complexity;
3) Preview and Text Extraction interface to review, test, and manually extract text from resources; and
4) Module Simulator to analyze the ability of the available content to adapt to different simulated student patterns (e.g., struggling learner, learner starting with partial mastery, etc.)
This paper outlines the design principles, machine learning performance, and formative usability testing process for this toolkit. For this research, the performance metrics are authoring time, metadata tag quality, deployment reliability (valid content), and personalized pathways (differentiation between different kinds of learners). A comparison of machine learning models based on BERT-S to generate competency tags is presented, which indicates that a general model (not tag-specific) is reasonable for cold-start labels. Initial testing indicates potential usefulness of such a tool, but frustration with delays and limitations for tagging more complex learning resources (e.g., videos, simulations). Strategies and issues for integrating this tool into an enterprise ecosystem are also discussed, such as how specialized tools should integrate with more traditional content management systems.
Filter
2021
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Proceedings Article
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771–781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Proceedings Article
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, VHTL, VR
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247–250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {DTIC, MedVR, VHTL, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Proceedings Article
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC, VHTL},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt
Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning Proceedings Article
In: GSA, 2021.
Links | BibTeX | Tags: AR, Learning Sciences, UARC
@inproceedings{davis_augment_2021,
title = {Augment Reality In Natural Hostory Museums: Impact on Visitor Engagement and Science Learning},
author = {Matt Davis},
url = {https://gsa.confex.com/gsa/2021AM/webprogram/Paper371425.html},
year = {2021},
date = {2021-10-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {AR, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790–14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Toyoda, Yuushi; Lucas, Gale; Gratch, Jonathan
Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias Proceedings Article
In: Companion Publication of the 2021 International Conference on Multimodal Interaction, pp. 25–30, ACM, Montreal QC Canada, 2021, ISBN: 978-1-4503-8471-1.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{toyoda_predicting_2021,
title = {Predicting Worker Accuracy from Nonverbal Behaviour: Benefits and Potential for Algorithmic Bias},
author = {Yuushi Toyoda and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3461615.3485427},
doi = {10.1145/3461615.3485427},
isbn = {978-1-4503-8471-1},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Companion Publication of the 2021 International Conference on Multimodal Interaction},
pages = {25–30},
publisher = {ACM},
address = {Montreal QC Canada},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Liu, Shichen; Zhou, Yichao; Zhao, Yajie
VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 12839–12848, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{liu_vapid_2021,
title = {VaPiD: A Rapid Vanishing Point Detector via Learned Optimizers},
author = {Shichen Liu and Yichao Zhou and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711313/},
doi = {10.1109/ICCV48922.2021.01262},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {12839–12848},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Tianye; Liu, Shichen; Bolkart, Timo; Liu, Jiayi; Li, Hao; Zhao, Yajie
Topologically Consistent Multi-View Face Inference Using Volumetric Sampling Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 3804–3814, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_topologically_2021,
title = {Topologically Consistent Multi-View Face Inference Using Volumetric Sampling},
author = {Tianye Li and Shichen Liu and Timo Bolkart and Jiayi Liu and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9711264/},
doi = {10.1109/ICCV48922.2021.00380},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-22},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {3804–3814},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Mozgai, Sharon
Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool Proceedings Article
In: Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents, pp. 109–111, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 978-1-4503-8619-7.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2021-1,
title = {Introducing VHMason: A Visual, Integrated, Multimodal Virtual Human Authoring Tool},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3472306.3478363},
doi = {10.1145/3472306.3478363},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 21st ACM International Conference on Intelligent Virtual Agents},
pages = {109–111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '21},
abstract = {A major impediment to the success of virtual agents is the inability of non-technical experts to easily author content. To address this barrier we present VHMason, a multimodal authoring tool designed to help creative authors build embodied conversational agents. We introduce the novel aspects of this authoring tool and explore a use case of the creation of an agent-led educational experience implemented at Children's Hospital Los Angeles (CHLA).},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Xiao, Yao; Xu, Zhi; Cai, Kaijie; Jiang, Haonan; Gratch, Jonathan; Soleymani, Mohammad
Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_contrastive_2021,
title = {Contrastive Learning for Domain Transfer in Cross-Corpus Emotion Recognition},
author = {Yufeng Yin and Liupei Lu and Yao Xiao and Zhi Xu and Kaijie Cai and Haonan Jiang and Jonathan Gratch and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9597453/},
doi = {10.1109/ACII52823.2021.9597453},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Traum, David
Identity models for role-play dialogue characters Proceedings Article
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{chaffey_identity_2021,
title = {Identity models for role-play dialogue characters},
author = {Patricia Chaffey and David Traum},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-4022/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Abrams, Mitchell; Baker, Anthony L.; Hudson, Taylor; Lukin, Stephanie; Traum, David; Voss, Clare
Context is key: Annotating situated dialogue relations in multi-floor dialogue Proceedings Article
In: 2021.
Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_context_2021,
title = {Context is key: Annotating situated dialogue relations in multi-floor dialogue},
author = {Claire Bonial and Mitchell Abrams and Anthony L. Baker and Taylor Hudson and Stephanie Lukin and David Traum and Clare Voss},
url = {http://semdial.org/anthology/papers/Z/Z21/Z21-3006/},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-23},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 148–155, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{mell_pandemic_2021,
title = {Pandemic Panic: The Effect of Disaster-Related Stress on Negotiation Outcomes},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478353},
doi = {10.1145/3472306.3478353},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-26},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {148–155},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Clever, Rene; Ramirez, Jaysa; Lucas, Gale; Gratch, Jonathan
Towards Emotion-Aware Agents For Negotiation Dialogues Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-019-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{chawla_towards_2021,
title = {Towards Emotion-Aware Agents For Negotiation Dialogues},
author = {Kushal Chawla and Rene Clever and Jaysa Ramirez and Gale Lucas and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9597427/},
doi = {10.1109/ACII52823.2021.9597427},
isbn = {978-1-66540-019-0},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-27},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Terada, Kazunori; Okazoe, Mitsuki; Gratch, Jonathan
Effect of politeness strategies in dialogue on negotiation outcomes Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 195–202, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{terada_effect_2021,
title = {Effect of politeness strategies in dialogue on negotiation outcomes},
author = {Kazunori Terada and Mitsuki Okazoe and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478336},
doi = {10.1145/3472306.3478336},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {195–202},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan; Boberg, Jill; DeVault, David; Kim, Peter; Lucas, Gale
Using Intelligent Agents to Examine Gender in Negotiations Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 90–97, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_using_2021,
title = {Using Intelligent Agents to Examine Gender in Negotiations},
author = {Emmanuel Johnson and Jonathan Gratch and Jill Boberg and David DeVault and Peter Kim and Gale Lucas},
url = {https://dl.acm.org/doi/10.1145/3472306.3478348},
doi = {10.1145/3472306.3478348},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {90–97},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Gratch, Jonathan
Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents, pp. 139–144, ACM, Virtual Event Japan, 2021, ISBN: 978-1-4503-8619-7.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{johnson_comparing_2021,
title = {Comparing The Accuracy of Frequentist and Bayesian Models in Human-Agent Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3472306.3478354},
doi = {10.1145/3472306.3478354},
isbn = {978-1-4503-8619-7},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {Proceedings of the 21th ACM International Conference on Intelligent Virtual Agents},
pages = {139–144},
publisher = {ACM},
address = {Virtual Event Japan},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Visualization of social emotional appraisal process of an agent Proceedings Article
In: 2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW), pp. 1–2, IEEE, Nara, Japan, 2021, ISBN: 978-1-66540-021-3.
Links | BibTeX | Tags: Emotions, Virtual Humans
@inproceedings{sato_visualization_2021,
title = {Visualization of social emotional appraisal process of an agent},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/9666329/},
doi = {10.1109/ACIIW52867.2021.9666329},
isbn = {978-1-66540-021-3},
year = {2021},
date = {2021-09-01},
urldate = {2022-09-28},
booktitle = {2021 9th International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)},
pages = {1–2},
publisher = {IEEE},
address = {Nara, Japan},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
et al A Rizzo,
Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond! Proceedings Article
In: Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021), Serpa, Portugal, 2021.
Links | BibTeX | Tags: MedVR, Virtual Humans, VR
@inproceedings{a_rizzo_et_al_normative_2021,
title = {Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond!},
author = {et al A Rizzo},
url = {http://studio.hei-lab.ulusofona.pt/archive/},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021)},
address = {Serpa, Portugal},
keywords = {MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Abrams, Mitchell; Traum, David; Voss, Clare
Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains Proceedings Article
In: Proceedings of the 14th International Conference on Computational Semantics (IWCS), pp. 173–183, Association for Computational Linguistics, Groningen, The Netherlands (online), 2021.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC
@inproceedings{bonial_builder_2021,
title = {Builder, we have done it: Evaluating & Extending Dialogue-AMR NLU Pipeline for Two Collaborative Domains},
author = {Claire Bonial and Mitchell Abrams and David Traum and Clare Voss},
url = {https://aclanthology.org/2021.iwcs-1.17},
year = {2021},
date = {2021-06-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 14th International Conference on Computational Semantics (IWCS)},
pages = {173–183},
publisher = {Association for Computational Linguistics},
address = {Groningen, The Netherlands (online)},
abstract = {We adopt, evaluate, and improve upon a two-step natural language understanding (NLU) pipeline that incrementally tames the variation of unconstrained natural language input and maps to executable robot behaviors. The pipeline first leverages Abstract Meaning Representation (AMR) parsing to capture the propositional content of the utterance, and second converts this into “Dialogue-AMR,” which augments standard AMR with information on tense, aspect, and speech acts. Several alternative approaches and training datasets are evaluated for both steps and corresponding components of the pipeline, some of which outperform the original. We extend the Dialogue-AMR annotation schema to cover a different collaborative instruction domain and evaluate on both domains. With very little training data, we achieve promising performance in the new domain, demonstrating the scalability of this approach.},
keywords = {Dialogue, DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Bennett, Winston Wink; Nye, Benjamin; Kelsey, Elaine
Helping Instructor Pilots Detect and Respond to Engagement Lapses in Simulations Proceedings Article
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems. Adaptation Strategies and Methods, pp. 3–14, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-77873-6.
Abstract | Links | BibTeX | Tags: Machine Learning, Virtual Humans
@inproceedings{bell_helping_2021,
title = {Helping Instructor Pilots Detect and Respond to Engagement Lapses in Simulations},
author = {Benjamin Bell and Winston Wink Bennett and Benjamin Nye and Elaine Kelsey},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/chapter/10.1007/978-3-030-77873-6_1},
doi = {10.1007/978-3-030-77873-6_1},
isbn = {978-3-030-77873-6},
year = {2021},
date = {2021-01-01},
booktitle = {Adaptive Instructional Systems. Adaptation Strategies and Methods},
pages = {3–14},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Adapting training in real time can be challenging for instructors. Real-time simulation can present rapid sequences of events, making it difficult for an instructor to attribute errors or omissions to specific underling gaps in skills and knowledge. Monitoring multiple students simultaneously imposes additional attentional workload on an instructor. This challenge can be further exacerbated when an instructor’s view of the student is obscured by virtual reality (VR) equipment. To support instructors’ ability to adapt training, Eduworks and USC’s Institute for Creative Technologies are developing machine learning (ML) models that can measure user engagement during training simulations and offer recommendations for restoring lapses in engagement. We have created a system, called the Observational Motivation and Engagement Generalized Appliance (OMEGA), which we tested in the context of a new U.S. Air Force approach to Specialized Undergraduate Pilot Training (SUPT) called Pilot Training Next (PTN). PTN integrates traditional flying sorties with VR-enabled ground-based training devices to achieve training efficiencies, improve readiness, and increase throughput. The virtual environment provides a rich source of raw data that machine learning models can use to associate user activity with user engagement. We created a testbed for data capture to construct the ML models, based on theoretical foundations we developed previously. Our research explores OMEGA’s potential to help alert an instructor pilot (IP) to student distraction by flagging attention and engagement lapses. Our hypothesis is that OMEGA could help an IP adapt learning, and potentially manage multiple students at the same time, with alerts of lapsed attention and recommendations for restoring engagement. To test this hypothesis, we ran pilots through multiple PTN scenarios to create data for training the model. In this paper, we report on work to create machine learning models using three different techniques, and present model performance data using standard machine learning metrics. We discuss the modeling approach used to generate instructor recommendations. Future work will present results from a formative evaluation using instructor pilots. These early findings provide preliminary validation for the use of ML models for learning to detect engagement from the rich data sources characteristic of virtual environments. These findings will be applicable across a broad range of conventional and VR training applications.},
keywords = {Machine Learning, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Shichen; Chen, Weikai; Li, Hao; Hill, Randall
Equivariant Point Network for 3D Point Cloud Analysis Proceedings Article
In: pp. 14514–14523, 2021.
Links | BibTeX | Tags: UARC, VGL
@inproceedings{chen_equivariant_2021,
title = {Equivariant Point Network for 3D Point Cloud Analysis},
author = {Haiwei Chen and Shichen Liu and Weikai Chen and Hao Li and Randall Hill},
url = {https://openaccess.thecvf.com/content/CVPR2021/html/Chen_Equivariant_Point_Network_for_3D_Point_Cloud_Analysis_CVPR_2021_paper.html},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
pages = {14514–14523},
keywords = {UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Jajodia, Aditya; Karpurapu, Abhilash; Merchant, Chirag
Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors Proceedings Article
In: Roll, Ido; McNamara, Danielle; Sosnovsky, Sergey; Luckin, Rose; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, pp. 372–377, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-78270-2.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{wang_charisma_2021,
title = {Charisma and Learning: Designing Charismatic Behaviors for Virtual Human Tutors},
author = {Ning Wang and Aditya Jajodia and Abhilash Karpurapu and Chirag Merchant},
editor = {Ido Roll and Danielle McNamara and Sergey Sosnovsky and Rose Luckin and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-030-78270-2_66},
doi = {10.1007/978-3-030-78270-2_66},
isbn = {978-3-030-78270-2},
year = {2021},
date = {2021-01-01},
booktitle = {Artificial Intelligence in Education},
pages = {372–377},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication. Research on charisma on a specific type of leader in a specific type of organization – teachers in the classroom - has indicated the positive influence of a teacher’s charismatic behaviors, often referred to as immediacy behaviors, on student learning. How do we realize such behaviors in a virtual tutor? How do such behaviors impact student learning? In this paper, we discuss the design of a charismatic virtual human tutor. We developed verbal and nonverbal (with the focus on voice) charismatic strategies and realized such strategies through scripted tutorial dialogues and pre-recorded voices. A study with the virtual human tutor has shown an intriguing impact of charismatic behaviors on student learning.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Bennett, Winston “Wink”; Kelsey, Elaine; Nye, Benjamin
Attention and Engagement in Virtual Environments: Measuring the Unobservable Proceedings Article
In: 2021.
Links | BibTeX | Tags: AR, DTIC, Machine Learning, UARC, VR
@inproceedings{bell_attention_2021,
title = {Attention and Engagement in Virtual Environments: Measuring the Unobservable},
author = {Benjamin Bell and Winston “Wink” Bennett and Elaine Kelsey and Benjamin Nye},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=95758&CID=862#View},
year = {2021},
date = {2021-01-01},
keywords = {AR, DTIC, Machine Learning, UARC, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Cheng, Junyan; Fostiropoulos, Iordanis; Boehm, Barry; Soleymani, Mohammad
Multimodal Phased Transformer for Sentiment Analysis Proceedings Article
In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 2447–2458, Association for Computational Linguistics, Online and Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: DTIC, UARC
@inproceedings{cheng_multimodal_2021,
title = {Multimodal Phased Transformer for Sentiment Analysis},
author = {Junyan Cheng and Iordanis Fostiropoulos and Barry Boehm and Mohammad Soleymani},
url = {https://aclanthology.org/2021.emnlp-main.189},
doi = {10.18653/v1/2021.emnlp-main.189},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
pages = {2447–2458},
publisher = {Association for Computational Linguistics},
address = {Online and Punta Cana, Dominican Republic},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
He, Zihao; Tavabi, Leili; Lerman, Kristina; Soleymani, Mohammad
Speaker Turn Modeling for Dialogue Act Classification Proceedings Article
In: Findings of the Association for Computational Linguistics: EMNLP 2021, pp. 2150–2157, Association for Computational Linguistics, Punta Cana, Dominican Republic, 2021.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{he_speaker_2021,
title = {Speaker Turn Modeling for Dialogue Act Classification},
author = {Zihao He and Leili Tavabi and Kristina Lerman and Mohammad Soleymani},
url = {https://aclanthology.org/2021.findings-emnlp.185},
doi = {10.18653/v1/2021.findings-emnlp.185},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-23},
booktitle = {Findings of the Association for Computational Linguistics: EMNLP 2021},
pages = {2150–2157},
publisher = {Association for Computational Linguistics},
address = {Punta Cana, Dominican Republic},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin; Nelson, David; Herrick, Imogen; Sinatra, Gale; Swartout, Bill; Porter, Molly; Davis, Matt; Lindsey, Emily
SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences Proceedings Article
In: TMS Proceedings 2021, American Psychological Association, 2021.
Links | BibTeX | Tags: AR, MxR
@inproceedings{nye_science_2021,
title = {SCIENCE BIG and SMALL: Visiting the Ice Age through Miniature and Life-Sized Augmented Reality Experiences},
author = {Benjamin Nye and David Nelson and Imogen Herrick and Gale Sinatra and Bill Swartout and Molly Porter and Matt Davis and Emily Lindsey},
url = {https://tmb.apaopen.org/pub/djue4kjf},
doi = {10.1037/tms0000106},
year = {2021},
date = {2021-01-01},
urldate = {2022-09-21},
booktitle = {TMS Proceedings 2021},
publisher = {American Psychological Association},
keywords = {AR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Proceedings Article
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, DTIC, Natural Language, Virtual Humans
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21–29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {ARL, Dialogue, DTIC, Natural Language, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Zhou, Yi; Wu, Chenglei; Li, Zimo; Cao, Chen; Ye, Yuting; Saragih, Jason; Li, Hao; Sheikh, Yaser
Fully convolutional mesh autoencoder using efficient spatially varying kernels Proceedings Article
In: Proceedings of the 34th International Conference on Neural Information Processing Systems, pp. 9251–9262, Curran Associates Inc., Red Hook, NY, USA, 2020, ISBN: 978-1-71382-954-6.
@inproceedings{zhou_fully_2020,
title = {Fully convolutional mesh autoencoder using efficient spatially varying kernels},
author = {Yi Zhou and Chenglei Wu and Zimo Li and Chen Cao and Yuting Ye and Jason Saragih and Hao Li and Yaser Sheikh},
isbn = {978-1-71382-954-6},
year = {2020},
date = {2020-12-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 34th International Conference on Neural Information Processing Systems},
pages = {9251–9262},
publisher = {Curran Associates Inc.},
address = {Red Hook, NY, USA},
series = {NIPS'20},
abstract = {Learning latent representations of registered meshes is useful for many 3D tasks. Techniques have recently shifted to neural mesh autoencoders. Although they demonstrate higher precision than traditional methods, they remain unable to capture fine-grained deformations. Furthermore, these methods can only be applied to a template-specific surface mesh, and is not applicable to more general meshes, like tetrahedrons and non-manifold meshes. While more general graph convolution methods can be employed, they lack performance in reconstruction precision and require higher memory usage. In this paper, we propose a non-template-specific fully convolutional mesh autoencoder for arbitrary registered mesh data. It is enabled by our novel convolution and (un)pooling operators learned with globally shared weights and locally varying coefficients which can efficiently capture the spatially varying contents presented by irregular mesh connections. Our model outperforms state-of-the-art methods on reconstruction accuracy. In addition, the latent codes of our network are fully localized thanks to the fully convolutional structure, and thus have much higher interpolation capability than many traditional 3D mesh generation models.},
keywords = {VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Becerik-Gerber, Burcin; Soibelman, Lucio; Doleck, Tenzin; Copur-Gencturk, Yasemin; Lucas, Gale
An Immersive Virtual Learning Environment for Worker-Robot Collaboration on Construction Sites Proceedings Article
In: 2020 Winter Simulation Conference (WSC), pp. 2400–2411, IEEE, Orlando, FL, USA, 2020, ISBN: 978-1-72819-499-8.
Links | BibTeX | Tags: Learning Sciences
@inproceedings{adami_immersive_2020,
title = {An Immersive Virtual Learning Environment for Worker-Robot Collaboration on Construction Sites},
author = {Pooya Adami and Burcin Becerik-Gerber and Lucio Soibelman and Tenzin Doleck and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ieeexplore.ieee.org/document/9383944/},
doi = {10.1109/WSC48552.2020.9383944},
isbn = {978-1-72819-499-8},
year = {2020},
date = {2020-12-01},
urldate = {2022-10-24},
booktitle = {2020 Winter Simulation Conference (WSC)},
pages = {2400–2411},
publisher = {IEEE},
address = {Orlando, FL, USA},
keywords = {Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pacheco, Luz; Merchant, Chirag; Skistad, Kristian; Jethwani, Aayushi
The Design of Charismatic Behaviors for Virtual Humans Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{wang_design_2020,
title = {The Design of Charismatic Behaviors for Virtual Humans},
author = {Ning Wang and Luz Pacheco and Chirag Merchant and Kristian Skistad and Aayushi Jethwani},
url = {https://doi.org/10.1145/3383652.3423867},
doi = {10.1145/3383652.3423867},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '20},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal and nonverbal (with the focus on voice) charismatic strategies based on the analysis of behaviors of charismatic leaders. We developed scripted speech dialogues with the verbal strategies and recorded the speeches with actors using the nonverbal strategies. The dialogue is further implemented in a virtual human, embedded in a virtual classroom, to give a lecture on the human circulatory system. We conducted a study with the virtual human to assess the impact of charismatic verbal and nonverbal behaviors on perceived charisma. The results show the positive impact of the use of verbal strategies and how the use of voice can influence such impact. The results shed light on the next steps needed to automatically generate charismatic speech, voices, and gestures for virtual characters.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Gordon, Andrew S.
Recognizing Multiplayer Behaviors Using Synthetic Training Data Proceedings Article
In: 2020 IEEE Conference on Games (CoG), pp. 463–470, 2020, (ISSN: 2325-4289).
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{feng_recognizing_2020,
title = {Recognizing Multiplayer Behaviors Using Synthetic Training Data},
author = {Andrew Feng and Andrew S. Gordon},
doi = {10.1109/CoG47356.2020.9231742},
year = {2020},
date = {2020-08-01},
booktitle = {2020 IEEE Conference on Games (CoG)},
pages = {463–470},
abstract = {Accurate recognition of group behaviors is essential to the design of engaging networked multiplayer games. However, contemporary data-driven machine learning solutions are difficult to apply during the game development process, given that no authentic gameplay data is yet available for use as training data. In this paper, we investigate the use of synthetic training data, i.e., gameplay data that is generated by AI-controlled agent teams programmed to perform each of the behaviors to be recognized in groups of human players. The particular task we focus on is to recognize group movement formations in player-controlled avatars in a realistic virtual world. We choose five typical military team movement patterns for the formation recognition task and train machine learning models using procedurally generated unit trajectories as training data. The experiments were conducted using ResNet and EfficientNet, which are two popular convolutional neural network architectures for image classifications. The synthetic data is augmented by creating variations in image rotation, unit spacing, team size, and positional perturbations to bridge the gap between synthetic and human gameplay data. We demonstrate that high-accuracy behavior recognition can be achieved using deep neural networks by applying the aforementioned data augmentation methods to simulated gameplay data.},
note = {ISSN: 2325-4289},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Proceedings Article
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121–2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Proceedings Article
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222–3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net? Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735–742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Proceedings Article
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Yanov, Volodymyr; Traum, David
Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers Proceedings Article
In: Proceedings of the Twelfth Language Resources and Evaluation Conference, pp. 726–734, European Language Resources Association, Marseille, France, 2020, ISBN: 979-10-95546-34-4.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{georgila_predicting_2020,
title = {Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers},
author = {Kallirroi Georgila and Carla Gordon and Volodymyr Yanov and David Traum},
url = {https://aclanthology.org/2020.lrec-1.91},
isbn = {979-10-95546-34-4},
year = {2020},
date = {2020-05-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Twelfth Language Resources and Evaluation Conference},
pages = {726–734},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We collected a corpus of dialogues in a Wizard of Oz (WOz) setting in the Internet of Things (IoT) domain. We asked users participating in these dialogues to rate the system on a number of aspects, namely, intelligence, naturalness, personality, friendliness, their enjoyment, overall quality, and whether they would recommend the system to others. Then we asked dialogue observers, i.e., Amazon Mechanical Turkers (MTurkers), to rate these dialogues on the same aspects. We also generated simulated dialogues between dialogue policies and simulated users and asked MTurkers to rate them again on the same aspects. Using linear regression, we developed dialogue evaluation functions based on features from the simulated dialogues and the MTurkers' ratings, the WOz dialogues and the MTurkers' ratings, and the WOz dialogues and the WOz participants' ratings. We applied all these dialogue evaluation functions to a held-out portion of our WOz dialogues, and we report results on the predictive power of these different types of dialogue evaluation functions. Our results suggest that for three conversational aspects (intelligence, naturalness, overall quality) just training evaluation functions on simulated data could be sufficient.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Proceedings Article
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Ruilong; Bladin, Karl; Zhao, Yajie; Chinara, Chinmay; Ingraham, Owen; Xiang, Pengda; Ren, Xinglei; Prasad, Pratusha; Kishore, Bipin; Xing, Jun; Li, Hao
Learning Formation of Physically-Based Face Attributes Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{li_learning_2020,
title = {Learning Formation of Physically-Based Face Attributes},
author = {Ruilong Li and Karl Bladin and Yajie Zhao and Chinmay Chinara and Owen Ingraham and Pengda Xiang and Xinglei Ren and Pratusha Prasad and Bipin Kishore and Jun Xing and Hao Li},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d407/1m3oiaP9ouQ},
doi = {10.1109/CVPR42600.2020.00347},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {Based on a combined data set of 4000 high resolution facial scans, we introduce a non-linear morphable face model, capable of producing multifarious face geometry of pore-level resolution, coupled with material attributes for use in physically-based rendering. We aim to maximize the variety of face identities, while increasing the robustness of correspondence between unique components, including middle-frequency geometry, albedo maps, specular intensity maps and high-frequency displacement details. Our deep learning based generative model learns to correlate albedo and geometry, which ensures the anatomical correctness of the generated assets. We demonstrate potential use of our generative model for novel identity generation, model fitting, interpolation, animation, high fidelity data visualization, and low-to-high resolution data domain transferring. We hope the release of this generative model will encourage further cooperation between all graphics, vision, and data focused professionals, while demonstrating the cumulative value of every individual’s complete biometric profile.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Ceylan, Duygu; Xing, Jun; Echevarria, Jose; Chen, Zhili; Chen, Weikai; Li, Hao
Intuitive, Interactive Beard and Hair Synthesis with Generative Models Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{olszewski_intuitive_2020,
title = {Intuitive, Interactive Beard and Hair Synthesis with Generative Models},
author = {Kyle Olszewski and Duygu Ceylan and Jun Xing and Jose Echevarria and Zhili Chen and Weikai Chen and Hao Li},
url = {http://arxiv.org/abs/2004.06848},
doi = {10.1109/CVPR42600.2020.00747},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {We present an interactive approach to synthesizing realistic variations in facial hair in images, ranging from subtle edits to existing hair to the addition of complex and challenging hair in images of clean-shaven subjects. To circumvent the tedious and computationally expensive tasks of modeling, rendering and compositing the 3D geometry of the target hairstyle using the traditional graphics pipeline, we employ a neural network pipeline that synthesizes realistic and detailed images of facial hair directly in the target image in under one second. The synthesis is controlled by simple and sparse guide strokes from the user defining the general structural and color properties of the target hairstyle. We qualitatively and quantitatively evaluate our chosen method compared to several alternative approaches. We show compelling interactive editing results with a prototype user interface that allows novice users to progressively refine the generated image to match their desired hairstyle, and demonstrate that our approach also allows for flexible and high-fidelity scalp hair synthesis.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{huang_arch_2020-1,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG},
doi = {10.1109/CVPR42600.2020.00316},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Proceedings Article
In: Proceedings of the CVPR 2020, pp. 3090–3099, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@inproceedings{huang_arch_2020,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d090/1m3nz4mKHzG},
doi = {10.1109/CVPR42600.2020.00316},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
pages = {3090--3099},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Proceedings Article
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160–163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}