Publications
Search
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Jorvekar, Ronit; Gurney, Nikolos; Pynadath, David; Wang, Yunzhe
Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent: Proceedings Article
In: Proceedings of the 16th International Conference on Agents and Artificial Intelligence, pp. 313–320, SCITEPRESS - Science and Technology Publications, Rome, Italy, 2024, ISBN: 978-989-758-680-4.
@inproceedings{ustun_assessing_2024,
title = {Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent:},
author = {Volkan Ustun and Ronit Jorvekar and Nikolos Gurney and David Pynadath and Yunzhe Wang},
url = {https://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0012388100003636},
doi = {10.5220/0012388100003636},
isbn = {978-989-758-680-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Proceedings of the 16th International Conference on Agents and Artificial Intelligence},
pages = {313–320},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Rome, Italy},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D; Mee, Dillon; Core, Mark G
Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns Proceedings Article
In: 2023.
@inproceedings{nye_generative_2023,
title = {Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns},
author = {Benjamin D Nye and Dillon Mee and Mark G Core},
url = {https://ceur-ws.org/Vol-3487/paper4.pdf},
year = {2023},
date = {2023-01-01},
abstract = {After many years of relatively limited capabilities for generative language models, recent large language models (LLM’s) have demonstrated qualitatively better capabilities for understanding, synthesis, and inference on text. Due to the prominence of ChatGPT’s chat system, both the media and many educational developers have suggested using generative AI to directly tutor students. However, despite surface-level similarity between ChatGPT interactions and tutoring dialogs, generative AI has other strengths which may be substantially more relevant for intelligent tutoring (e.g., detecting misconceptions, improved language translation, content generation) and weaknesses that make it problematic for on-the-fly tutoring (e.g., hallucinations, lack of pedagogical training data). In this paper, we discuss how we are approaching generative LLM’s for tutoring dialogs, for problems such as multi- concept short answer grading and semi-supervised interactive content generation. This work shows interesting opportunities for prompt engineering approaches for short-answer classification, despite sometimes quirky behavior. The time savings for high-quality content generation for tutoring is not yet clear and further research is needed. The paper concludes with a consideration of longer-term equity and access in a world where essential capabilities require low-latency real-time connections to large, pay-peruse models. Risks and mitigating technologies for this kind of “AI digital divide” are discussed, including optimized / edge-computing LLM’s and using generative AI models as simulated students to train specialized tutoring models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Xu, Jiashu; Zu, Tianxin; Soleymani, Mohammad
X-Norm: Exchanging Normalization Parameters for Bimodal Fusion Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 605–614, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
@inproceedings{yin_x-norm_2022,
title = {X-Norm: Exchanging Normalization Parameters for Bimodal Fusion},
author = {Yufeng Yin and Jiashu Xu and Tianxin Zu and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3536221.3556581},
doi = {10.1145/3536221.3556581},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {605–614},
publisher = {ACM},
address = {Bengaluru India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Larry; Kolacz, Jacek; Rizzo, Albert; Scherer, Stefan; Soleymani, Mohammad
Speech Behavioral Markers Align on Symptom Factors in Psychological Distress Proceedings Article
In: 2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, 2022, (ISSN: 2156-8111).
@inproceedings{zhang_speech_2022,
title = {Speech Behavioral Markers Align on Symptom Factors in Psychological Distress},
author = {Larry Zhang and Jacek Kolacz and Albert Rizzo and Stefan Scherer and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/abstract/document/9953849},
doi = {10.1109/ACII55700.2022.9953849},
year = {2022},
date = {2022-10-01},
booktitle = {2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
abstract = {Automatic detection of psychological disorders has gained significant attention in recent years due to the rise in their prevalence. However, the majority of studies have overlooked the complexity of disorders in favor of a “present/not present” dichotomy in representing disorders. Recent psychological research challenges favors transdiagnostic approaches, moving beyond general disorder classifications to symptom level analysis, as symptoms are often not exclusive to individual disorder classes. In our study, we investigated the link between speech signals and psychological distress symptoms in a corpus of 333 screening interviews from the Distress Analysis Interview Corpus (DAIC). Given the semi-structured organization of interviews, we aggregated speech utterances from responses to shared questions across interviews. We employed deterministic sample selection in classification to rank salient questions for eliciting symptom-specific behaviors in order to predict symptom presence. Some questions include “Do you find therapy helpful?” and “When was the last time you felt happy?”. The prediction results align closely to the factor structure of psychological distress symptoms, linking speech behaviors primarily to somatic and affective alterations in both depression and PTSD. This lends support for the transdiagnostic validity of speech markers for detecting such symptoms. Surprisingly, we did not find a strong link between speech markers and cognitive or psychomotor alterations. This is surprising, given the complexity of motor and cognitive actions required in speech production. The results of our analysis highlight the importance of aligning affective computing research with psychological research to investigate the use of automatic behavioral sensing to assess psychiatric risk.},
note = {ISSN: 2156-8111},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2024
Liu, Ziming; Suen, Christine Wun Ki; Zou, Zhengbo; Chen, Meida; Shi, Yangming
Assessing Workers’ Operational Postures via Egocentric Camera Mapping Proceedings Article
In: Computing in Civil Engineering 2023, pp. 17–24, American Society of Civil Engineers, Corvallis, Oregon, 2024, ISBN: 978-0-7844-8522-4.
Links | BibTeX | Tags: Narrative, STG
@inproceedings{liu_assessing_2024,
title = {Assessing Workers’ Operational Postures via Egocentric Camera Mapping},
author = {Ziming Liu and Christine Wun Ki Suen and Zhengbo Zou and Meida Chen and Yangming Shi},
url = {https://ascelibrary.org/doi/10.1061/9780784485224.003},
doi = {10.1061/9780784485224.003},
isbn = {978-0-7844-8522-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Computing in Civil Engineering 2023},
pages = {17–24},
publisher = {American Society of Civil Engineers},
address = {Corvallis, Oregon},
keywords = {Narrative, STG},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Jorvekar, Ronit; Gurney, Nikolos; Pynadath, David; Wang, Yunzhe
Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent: Proceedings Article
In: Proceedings of the 16th International Conference on Agents and Artificial Intelligence, pp. 313–320, SCITEPRESS - Science and Technology Publications, Rome, Italy, 2024, ISBN: 978-989-758-680-4.
Links | BibTeX | Tags: AI, Cognitive Architecture, Social Simulation
@inproceedings{ustun_assessing_2024,
title = {Assessing Routing Decisions of Search and Rescue Teams in Service of an Artificial Social Intelligence Agent:},
author = {Volkan Ustun and Ronit Jorvekar and Nikolos Gurney and David Pynadath and Yunzhe Wang},
url = {https://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0012388100003636},
doi = {10.5220/0012388100003636},
isbn = {978-989-758-680-4},
year = {2024},
date = {2024-01-01},
urldate = {2024-03-19},
booktitle = {Proceedings of the 16th International Conference on Agents and Artificial Intelligence},
pages = {313–320},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Rome, Italy},
keywords = {AI, Cognitive Architecture, Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Gilani, Setareh Nasihati; Pollard, Kimberly; Traum, David
Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 71–75, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{nasihati_gilani_multimodal_2023,
title = {Multimodal Prediction of User's Performance in High-Stress Dialogue Interactions},
author = {Setareh Nasihati Gilani and Kimberly Pollard and David Traum},
url = {https://dl.acm.org/doi/10.1145/3610661.3617166},
doi = {10.1145/3610661.3617166},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {71–75},
publisher = {ACM},
address = {Paris France},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Lin, Eleanor; Hale, James; Gratch, Jonathan
Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models Proceedings Article
In: Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing, pp. 545–550, ACM, Washington DC USA, 2023, ISBN: 978-1-4503-9926-5.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lin_toward_2023,
title = {Toward a Better Understanding of the Emotional Dynamics of Negotiation with Large Language Models},
author = {Eleanor Lin and James Hale and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3565287.3617637},
doi = {10.1145/3565287.3617637},
isbn = {978-1-4503-9926-5},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {Proceedings of the Twenty-fourth International Symposium on Theory, Algorithmic Foundations, and Protocol Design for Mobile Networks and Mobile Computing},
pages = {545–550},
publisher = {ACM},
address = {Washington DC USA},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahmed, Tamim; Rikakis, Thanassis; Kelliher, Aisling; Soleymani, Mohammad
ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 11–15, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ahmed_asar_2023,
title = {ASAR Dataset and Computational Model for Affective State Recognition During ARAT Assessment for Upper Extremity Stroke Survivors},
author = {Tamim Ahmed and Thanassis Rikakis and Aisling Kelliher and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617154},
doi = {10.1145/3610661.3617154},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {11–15},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andrist, Sean; Bohus, Dan; Li, Zongjian; Soleymani, Mohammad
Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research Proceedings Article
In: International Cconference on Multimodal Interaction, pp. 105–106, ACM, Paris France, 2023, ISBN: 9798400703218.
Links | BibTeX | Tags: AI, UARC, Virtual Humans
@inproceedings{andrist_platform_2023,
title = {Platform for Situated Intelligence and OpenSense: A Tutorial on Building Multimodal Interactive Applications for Research},
author = {Sean Andrist and Dan Bohus and Zongjian Li and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3610661.3617603},
doi = {10.1145/3610661.3617603},
isbn = {9798400703218},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {International Cconference on Multimodal Interaction},
pages = {105–106},
publisher = {ACM},
address = {Paris France},
keywords = {AI, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Trang; Yin, Yufeng; Tavabi, Leili; Delacruz, Joannalyn; Borsari, Brian; Woolley, Joshua D; Scherer, Stefan; Soleymani, Mohammad
Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 406–415, ACM, Paris France, 2023, ISBN: 9798400700552.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_multimodal_2023,
title = {Multimodal Analysis and Assessment of Therapist Empathy in Motivational Interviews},
author = {Trang Tran and Yufeng Yin and Leili Tavabi and Joannalyn Delacruz and Brian Borsari and Joshua D Woolley and Stefan Scherer and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3577190.3614105},
doi = {10.1145/3577190.3614105},
isbn = {9798400700552},
year = {2023},
date = {2023-10-01},
urldate = {2023-12-07},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {406–415},
publisher = {ACM},
address = {Paris France},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gainer, Alesia; Aptaker, Allison; Artstein, Ron; Cobbins, David; Core, Mark; Gordon, Carla; Leuski, Anton; Li, Zongjian; Merchant, Chirag; Nelson, David; Soleymani, Mohammad; Traum, David
DIVIS: Digital Interactive Victim Intake Simulator Proceedings Article
In: Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents, pp. 1–2, ACM, Würzburg Germany, 2023, ISBN: 978-1-4503-9994-4.
Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{gainer_divis_2023,
title = {DIVIS: Digital Interactive Victim Intake Simulator},
author = {Alesia Gainer and Allison Aptaker and Ron Artstein and David Cobbins and Mark Core and Carla Gordon and Anton Leuski and Zongjian Li and Chirag Merchant and David Nelson and Mohammad Soleymani and David Traum},
url = {https://dl.acm.org/doi/10.1145/3570945.3607328},
doi = {10.1145/3570945.3607328},
isbn = {978-1-4503-9994-4},
year = {2023},
date = {2023-09-01},
urldate = {2024-02-20},
booktitle = {Proceedings of the 23rd ACM International Conference on Intelligent Virtual Agents},
pages = {1–2},
publisher = {ACM},
address = {Würzburg Germany},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Yin, Yufeng; Soleymani, Mohammad
Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition Proceedings Article
In: INTERSPEECH 2023, pp. 636–640, ISCA, 2023.
Links | BibTeX | Tags: Emotions, UARC, Virtual Humans
@inproceedings{tran_personalized_2023,
title = {Personalized Adaptation with Pre-trained Speech Encoders for Continuous Emotion Recognition},
author = {Minh Tran and Yufeng Yin and Mohammad Soleymani},
url = {https://www.isca-speech.org/archive/interspeech_2023/tran23c_interspeech.html},
doi = {10.21437/Interspeech.2023-2170},
year = {2023},
date = {2023-08-01},
urldate = {2023-08-23},
booktitle = {INTERSPEECH 2023},
pages = {636–640},
publisher = {ISCA},
keywords = {Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Speech Representation Anonymization Framework via Selective Noise Perturbation Proceedings Article
In: ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1–5, IEEE, Rhodes Island, Greece, 2023, ISBN: 978-1-72816-327-7.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tran_speech_2023,
title = {A Speech Representation Anonymization Framework via Selective Noise Perturbation},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/10095173/},
doi = {10.1109/ICASSP49357.2023.10095173},
isbn = {978-1-72816-327-7},
year = {2023},
date = {2023-06-01},
urldate = {2023-08-23},
booktitle = {ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1–5},
publisher = {IEEE},
address = {Rhodes Island, Greece},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Feng, Andrew
Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction Proceedings Article
In: 2023 57th Annual Conference on Information Sciences and Systems (CISS), pp. 1–6, IEEE, Baltimore, MD, USA, 2023, ISBN: 978-1-66545-181-9.
Links | BibTeX | Tags: Narrative, UARC
@inproceedings{gordon_searching_2023,
title = {Searching for the Most Probable Combination of Class Labels Using Etcetera Abduction},
author = {Andrew S. Gordon and Andrew Feng},
url = {https://ieeexplore.ieee.org/document/10089729/},
doi = {10.1109/CISS56502.2023.10089729},
isbn = {978-1-66545-181-9},
year = {2023},
date = {2023-03-01},
urldate = {2023-08-07},
booktitle = {2023 57th Annual Conference on Information Sciences and Systems (CISS)},
pages = {1–6},
publisher = {IEEE},
address = {Baltimore, MD, USA},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D; Mee, Dillon; Core, Mark G
Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns Proceedings Article
In: 2023.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{nye_generative_2023,
title = {Generative Large Language Models for Dialog-Based Tutoring: An Early Consideration of Opportunities and Concerns},
author = {Benjamin D Nye and Dillon Mee and Mark G Core},
url = {https://ceur-ws.org/Vol-3487/paper4.pdf},
year = {2023},
date = {2023-01-01},
abstract = {After many years of relatively limited capabilities for generative language models, recent large language models (LLM’s) have demonstrated qualitatively better capabilities for understanding, synthesis, and inference on text. Due to the prominence of ChatGPT’s chat system, both the media and many educational developers have suggested using generative AI to directly tutor students. However, despite surface-level similarity between ChatGPT interactions and tutoring dialogs, generative AI has other strengths which may be substantially more relevant for intelligent tutoring (e.g., detecting misconceptions, improved language translation, content generation) and weaknesses that make it problematic for on-the-fly tutoring (e.g., hallucinations, lack of pedagogical training data). In this paper, we discuss how we are approaching generative LLM’s for tutoring dialogs, for problems such as multi- concept short answer grading and semi-supervised interactive content generation. This work shows interesting opportunities for prompt engineering approaches for short-answer classification, despite sometimes quirky behavior. The time savings for high-quality content generation for tutoring is not yet clear and further research is needed. The paper concludes with a consideration of longer-term equity and access in a world where essential capabilities require low-latency real-time connections to large, pay-peruse models. Risks and mitigating technologies for this kind of “AI digital divide” are discussed, including optimized / edge-computing LLM’s and using generative AI models as simulated students to train specialized tutoring models.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V; Gurney, Nikolos; Kenny, Sarah; Kumar, Rajay; Marsella, Stacy C.; Matuszak, Haley; Mostafa, Hala; Ustun, Volkan; Wu, Peggy; Sequeira, Pedro
Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task Proceedings Article
In: AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems, pp. Pages 2334–2336, 2023.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_effectiveness_2023,
title = {Effectiveness of Teamwork-Level Interventions through Decision-Theoretic Reasoning in a Minecraft Search-and-Rescue Task},
author = {David V Pynadath and Nikolos Gurney and Sarah Kenny and Rajay Kumar and Stacy C. Marsella and Haley Matuszak and Hala Mostafa and Volkan Ustun and Peggy Wu and Pedro Sequeira},
url = {https://dl.acm.org/doi/10.5555/3545946.3598925},
year = {2023},
date = {2023-01-01},
booktitle = {AAMAS '23: Proceedings of the 2023 International Conference on Autonomous Agents and Multiagent Systems},
pages = {Pages 2334–2336},
abstract = {Autonomous agents offer the promise of improved human teamwork through automated assessment and assistance during task performance [15, 16, 18]. Studies of human teamwork have identified various processes that underlie joint task performance, while abstracting away the specifics of the task [7, 11, 13, 17].We present here an agent that focuses exclusively on teamwork-level variables in deciding what interventions to use in assisting a human team. Our agent does not directly observe or model the environment or the people in it, but instead relies on input from analytic components (ACs) (developed by other research teams) that process environmental information and output only teamwork-relevant measures. Our agent models these teamwork variables and updates its beliefs over them using a Bayesian Theory of Mind [1], applying Partially Observable Markov Decision Processes (POMDPs) [9] in a recursive manner to assess the state of the team it is currently observing and to choose interventions to best assist them.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Beland, Sarah; Leeds, Andrew; Winn, Jade G.; Kaurloto, Cari; Heylen, Dirk; Hartholt, Arno
Toward a Scoping Review of Social Intelligence in Virtual Humans Proceedings Article
In: 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1–6, 2023.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2023,
title = {Toward a Scoping Review of Social Intelligence in Virtual Humans},
author = {Sharon Mozgai and Sarah Beland and Andrew Leeds and Jade G. Winn and Cari Kaurloto and Dirk Heylen and Arno Hartholt},
url = {https://ieeexplore.ieee.org/abstract/document/10042532},
doi = {10.1109/FG57933.2023.10042532},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
pages = {1–6},
abstract = {As the demand for socially intelligent Virtual Humans (VHs) increases, so follows the demand for effective and efficient cross-discipline collaboration that is required to bring these VHs “to life”. One avenue for increasing cross-discipline fluency is the aggregation and organization of seemingly disparate areas of research and development (e.g., graphics and emotion models) that are essential to the field of VH research. Our initial investigation (1) identifies and catalogues research streams concentrated in three multidisciplinary VH topic clusters within the domain of social intelligence, Emotion, Social Behavior, and The Face, (2) brings to the forefront key themes and prolific authors within each topic cluster, and (3) provides evidence that a full scoping review is warranted to further map the field, aggregate research findings, and identify gaps in the research. To enable collaboration, we provide full access to the refined VH cluster datasets, key word and author word clouds, as well as interactive evidence maps.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Mozgai, Sharon
Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment Proceedings Article
In: Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems, AHFE Open Acces, 2023, ISBN: 978-1-958651-45-2, (ISSN: 27710718 Issue: 69).
Abstract | Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_creating_2023,
title = {Creating Virtual Worlds with the Virtual Human Toolkit and the Rapid Integration & Development Environment},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-45-2/article/978-1-958651-45-2_41},
doi = {10.54941/ahfe1002856},
isbn = {978-1-958651-45-2},
year = {2023},
date = {2023-01-01},
urldate = {2023-03-31},
booktitle = {Intelligent Human Systems Integration (IHSI 2023): Integrating People and Intelligent Systems},
volume = {69},
publisher = {AHFE Open Acces},
abstract = {The research and development of virtual humans, and the virtual worlds they inhabit, is inherently complex, requiring interdisciplinary approaches that combine social sciences, computer science, design, art, production, and domain expertise. Our previous work in managing this complexity has resulted in the release of the Virtual Human Toolkit (VHToolkit), aimed at lowering the burden of creating embodied conversational agents. In our current efforts, we are integrating the VHToolkit with the Rapid Integration & Development Environment (RIDE), a rapid prototyping modeling and simulation middleware platform that leverages real-time game engines. This integration results in the ability to mix and match commercial AI services from AWS, Azure, and Google, as well as leverage novel 3D geospatial terrain creation pipelines. Combined with dedicated authoring tools that have been developed through human-centered design processes, the platform enables researchers, developers, and domain experts to rapidly create digital worlds with virtual humans for both military and civilian contexts. Our approach is highly interdisciplinary, including academia, government, and industry collaborators. The demonstration shows a user interacting with an embodied conversational agent embedded within real-world captured and virtualized terrain. Further research and development features of the platform are shown, including scripted agent behaviors, networked team play, and machine learning interfaces.},
note = {ISSN: 27710718
Issue: 69},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Xu, Jiashu; Zu, Tianxin; Soleymani, Mohammad
X-Norm: Exchanging Normalization Parameters for Bimodal Fusion Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 605–614, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
Links | BibTeX | Tags: Emotions, Virtual Humans
@inproceedings{yin_x-norm_2022,
title = {X-Norm: Exchanging Normalization Parameters for Bimodal Fusion},
author = {Yufeng Yin and Jiashu Xu and Tianxin Zu and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3536221.3556581},
doi = {10.1145/3536221.3556581},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {605–614},
publisher = {ACM},
address = {Bengaluru India},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhang, Larry; Kolacz, Jacek; Rizzo, Albert; Scherer, Stefan; Soleymani, Mohammad
Speech Behavioral Markers Align on Symptom Factors in Psychological Distress Proceedings Article
In: 2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 1–8, 2022, (ISSN: 2156-8111).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@inproceedings{zhang_speech_2022,
title = {Speech Behavioral Markers Align on Symptom Factors in Psychological Distress},
author = {Larry Zhang and Jacek Kolacz and Albert Rizzo and Stefan Scherer and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/abstract/document/9953849},
doi = {10.1109/ACII55700.2022.9953849},
year = {2022},
date = {2022-10-01},
booktitle = {2022 10th International Conference on Affective Computing and Intelligent Interaction (ACII)},
pages = {1–8},
abstract = {Automatic detection of psychological disorders has gained significant attention in recent years due to the rise in their prevalence. However, the majority of studies have overlooked the complexity of disorders in favor of a “present/not present” dichotomy in representing disorders. Recent psychological research challenges favors transdiagnostic approaches, moving beyond general disorder classifications to symptom level analysis, as symptoms are often not exclusive to individual disorder classes. In our study, we investigated the link between speech signals and psychological distress symptoms in a corpus of 333 screening interviews from the Distress Analysis Interview Corpus (DAIC). Given the semi-structured organization of interviews, we aggregated speech utterances from responses to shared questions across interviews. We employed deterministic sample selection in classification to rank salient questions for eliciting symptom-specific behaviors in order to predict symptom presence. Some questions include “Do you find therapy helpful?” and “When was the last time you felt happy?”. The prediction results align closely to the factor structure of psychological distress symptoms, linking speech behaviors primarily to somatic and affective alterations in both depression and PTSD. This lends support for the transdiagnostic validity of speech markers for detecting such symptoms. Surprisingly, we did not find a strong link between speech markers and cognitive or psychomotor alterations. This is surprising, given the complexity of motor and cognitive actions required in speech production. The results of our analysis highlight the importance of aligning affective computing research with psychological research to investigate the use of automatic behavioral sensing to assess psychiatric risk.},
note = {ISSN: 2156-8111},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Traum, David
Towards an Automatic Speech Recognizer for the Choctaw language Proceedings Article
In: 1st Workshop on Speech for Social Good (S4SG), pp. 6–9, ISCA, 2022.
Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{brixey_towards_2022,
title = {Towards an Automatic Speech Recognizer for the Choctaw language},
author = {Jacqueline Brixey and David Traum},
url = {https://www.isca-speech.org/archive/s4sg_2022/brixey22_s4sg.html},
doi = {10.21437/S4SG.2022-2},
year = {2022},
date = {2022-09-01},
urldate = {2023-03-31},
booktitle = {1st Workshop on Speech for Social Good (S4SG)},
pages = {6–9},
publisher = {ISCA},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi
Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus Proceedings Article
In: Proceedings of the 26th Workshop on the Semantics and Pragmatics of Dialogue - Full Papers, 2022.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{georgila_comparing_2022,
title = {Comparing Regression Methods for Dialogue System Evaluation on a Richly Annotated Corpus},
author = {Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z22/Z22-3011/},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 26th Workshop on the Semantics and Pragmatics of Dialogue - Full Papers},
abstract = {Wecompare various state-of-the-art regression methods for predicting user ratings of their interaction with a dialogue system using a richly annotated corpus. We vary the size of the training data and, in particular for kernel-based methods, we vary the type of kernel used. Furthermore, we experiment with various domainindependent features, including feature combinations that do not rely on complex annotations. We present detailed results in terms of root mean square error, and Pearson’s r and Spearman’s ρ correlations. Our results show that in many cases Gaussian Process Regression leads to modest but statistically significant gains compared to Support Vector Regression (a strong baseline), and that the type of kernel used matters. The gains are even larger when compared to linear regression. The larger the training data set the higher the gains but for some cases more data may result in over-fitting. Finally, some feature combinations work better than others but overall the best results are obtained when all features are used.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Gurney, Nikolos; Wang, Ning
Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 749–756, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_explainable_2022,
title = {Explainable Reinforcement Learning in Human-Robot Teams: The Impact of Decision-Tree Explanations on Transparency},
author = {David V. Pynadath and Nikolos Gurney and Ning Wang},
doi = {10.1109/RO-MAN53752.2022.9900608},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {749–756},
abstract = {Understanding the decisions of AI-driven systems and the rationale behind such decisions is key to the success of the human-robot team. However, the complexity and the "black-box" nature of many AI algorithms create a barrier for establishing such understanding within their human counterparts. Reinforcement Learning (RL), a machine-learning algorithm based on the simple idea of action-reward mappings, has a rich quantitative representation and a complex iterative reasoning process that present a significant obstacle to human understanding of, for example, how value functions are constructed, how the algorithms update the value functions, and how such updates impact the action/policy chosen by the robot. In this paper, we discuss our work to address this challenge by developing a decision-tree based explainable model for RL to make a robot’s decision-making process more transparent. Set in a human-robot virtual teaming testbed, we conducted a study to assess the impact of the explanations, generated using decision trees, on building transparency, calibrating trust, and improving the overall human-robot team’s performance. We discuss the design of the explainable model and the positive impact of the explanations on outcome measures.},
note = {ISSN: 1944-9437},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Kuang, Zhengfei; Li, Jiaman; He, Mingming; Wang, Tong; Zhao, Yajie
DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points Proceedings Article
In: pp. 542–549, IEEE Computer Society, 2022, ISBN: 978-1-66549-062-7.
Abstract | Links | BibTeX | Tags: VGL
@inproceedings{kuang_densegap_2022,
title = {DenseGAP: Graph-Structured Dense Correspondence Learning with Anchor Points},
author = {Zhengfei Kuang and Jiaman Li and Mingming He and Tong Wang and Yajie Zhao},
url = {https://www.computer.org/csdl/proceedings-article/icpr/2022/09956472/1IHpppIuqOc},
doi = {10.1109/ICPR56361.2022.9956472},
isbn = {978-1-66549-062-7},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
pages = {542–549},
publisher = {IEEE Computer Society},
abstract = {Establishing dense correspondence between two images is a fundamental computer vision problem, which is typically tackled by matching local feature descriptors. However, without global awareness, such local features are often insufficient for disambiguating similar regions. And computing the pairwise feature correlation across images is both computation-expensive and memory-intensive. To make the local features aware of the global context and improve their matching accuracy, we introduce DenseGAP, a new solution for efficient Dense correspondence learning with a Graph-structured neural network conditioned on Anchor Points. Specifically, we first propose a graph structure that utilizes anchor points to provide sparse but reliable prior on inter- and intra-image context and propagates them to all image points via directed edges. We also design a graph-structured network to broadcast multi-level contexts via light-weighted message-passing layers and generate high-resolution feature maps at low memory cost. Finally, based on the predicted feature maps, we introduce a coarse-to-fine framework for accurate correspondence prediction using cycle consistency. Our feature descriptors capture both local and global information, thus enabling a continuous feature field for querying arbitrary points at high resolution. Through comprehensive ablative experiments and evaluations on large-scale indoor and outdoor datasets, we demonstrate that our method advances the state-of-the-art of correspondence learning on most benchmarks.},
keywords = {VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.
Robots with Theory of Mind for Humans: A Survey Proceedings Article
In: 2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 993–1000, 2022, (ISSN: 1944-9437).
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_robots_2022,
title = {Robots with Theory of Mind for Humans: A Survey},
author = {Nikolos Gurney and David V. Pynadath},
url = {https://ieeexplore.ieee.org/abstract/document/9900662},
doi = {10.1109/RO-MAN53752.2022.9900662},
year = {2022},
date = {2022-08-01},
booktitle = {2022 31st IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
pages = {993–1000},
abstract = {Theory of Mind (ToM) is a psychological construct that captures the ability to ascribe mental states to others and then use those representations for explaining and predicting behavior. We review recent progress in endowing artificially intelligent robots with ToM. A broad array of modeling, experimental, and benchmarking approaches and methods are present in the extant literature. Unlike other domains of human cognition for which research has achieved super-human capabilities, ToM for robots lacks a unified construct and is not consistently benchmarked or validated—realities which possibly hinder progress in this domain. We argue that this is, at least in part, due to inconsistent defining of ToM, no presence of a unifying modeling construct, and the absence of a shared data resource. We believe these would improve the ability of the research community to compare the ToM abilities of different systems. We suggest that establishing a shared definition of ToM, creating a shared data resource that supports consistent benchmarking & validation, and developing a generalized modeling tool are critical steps towards giving robots ToM capabilities that lay observers will recognize as such.},
note = {ISSN: 1944-9437},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Proceedings Article
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240–3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Paetzel-Prüsmann, Maike; Georgila, Kallirroi
Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task Proceedings Article
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5768–5777, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{karkada_strategy-level_2022,
title = {Strategy-level Entrainment of Dialogue System Users in a Creative Visual Reference Resolution Task},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Maike Paetzel-Prüsmann and Kallirroi Georgila},
url = {https://aclanthology.org/2022.lrec-1.620},
year = {2022},
date = {2022-06-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5768–5777},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we study entrainment of users playing a creative reference resolution game with an autonomous dialogue system. The language understanding module in our dialogue system leverages annotated human-wizard conversational data, openly available knowledge graphs, and crowd-augmented data. Unlike previous entrainment work, our dialogue system does not attempt to make the human conversation partner adopt lexical items in their dialogue, but rather to adapt their descriptive strategy to one that is simpler to parse for our natural language understanding unit. By deploying this dialogue system through a crowd-sourced study, we show that users indeed entrain on a “strategy-level” without the change of strategy impinging on their creativity. Our work thus presents a promising future research direction for developing dialogue management systems that can strategically influence people's descriptive strategy to ease the system's language understanding in creative tasks.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Tur, Ada; Traum, David
Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis Proceedings Article
In: Proceedings of the Thirteenth Language Resources and Evaluation Conference, pp. 5813–5820, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{tur_comparing_2022,
title = {Comparing Approaches to Language Understanding for Human-Robot Dialogue: An Error Taxonomy and Analysis},
author = {Ada Tur and David Traum},
url = {https://aclanthology.org/2022.lrec-1.625},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages = {5813–5820},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this paper, we compare two different approaches to language understanding for a human-robot interaction domain in which a human commander gives navigation instructions to a robot. We contrast a relevance-based classifier with a GPT-2 model, using about 2000 input-output examples as training data. With this level of training data, the relevance-based model outperforms the GPT-2 based model 79% to 8%. We also present a taxonomy of types of errors made by each model, indicating that they have somewhat different strengths and weaknesses, so we also examine the potential for a combined model.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Haiwei; Liu, Jiayi; Chen, Weikai; Liu, Shichen; Zhao, Yajie
Exemplar-based Pattern Synthesis with Implicit Periodic Field Network Proceedings Article
In: 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3698–3707, IEEE, New Orleans, LA, USA, 2022, ISBN: 978-1-66546-946-3.
Links | BibTeX | Tags: UARC, VGL
@inproceedings{chen_exemplar-based_2022,
title = {Exemplar-based Pattern Synthesis with Implicit Periodic Field Network},
author = {Haiwei Chen and Jiayi Liu and Weikai Chen and Shichen Liu and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9879904/},
doi = {10.1109/CVPR52688.2022.00369},
isbn = {978-1-66546-946-3},
year = {2022},
date = {2022-06-01},
urldate = {2023-02-10},
booktitle = {2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3698–3707},
publisher = {IEEE},
address = {New Orleans, LA, USA},
keywords = {UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Tadimeti, Divya; Georgila, Kallirroi; Traum, David
Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference, pp. 6001–6008, European Language Resources Association, Marseille, France, 2022.
Abstract | Links | BibTeX | Tags: Natural Language, UARC
@inproceedings{tadimeti_evaluation_2022,
title = {Evaluation of Off-the-shelf Speech Recognizers on Different Accents in a Dialogue Domain},
author = {Divya Tadimeti and Kallirroi Georgila and David Traum},
url = {https://aclanthology.org/2022.lrec-1.645},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference},
pages = {6001–6008},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We evaluate several publicly available off-the-shelf (commercial and research) automatic speech recognition (ASR) systems on dialogue agent-directed English speech from speakers with General American vs. non-American accents. Our results show that the performance of the ASR systems for non-American accents is considerably worse than for General American accents. Depending on the recognizer, the absolute difference in performance between General American accents and all non-American accents combined can vary approximately from 2% to 12%, with relative differences varying approximately between 16% and 49%. This drop in performance becomes even larger when we consider specific categories of non-American accents indicating a need for more diligent collection of and training on non-native English speaker data in order to narrow this performance gap. There are performance differences across ASR systems, and while the same general pattern holds, with more errors for non-American accents, there are some accents for which the best recognizer is different than in the overall case. We expect these results to be useful for dialogue system designers in developing more robust inclusive dialogue systems, and for ASR providers in taking into account performance requirements for different accents.},
keywords = {Natural Language, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698–4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: Emotions
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710–1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {Emotions},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Weeks, Danaan DeNeve; Lindsey, Emily; Davis, Matt; Kennedy, Alana; Nye, Benjamin; Nelson, David; Porter, Molly; Swartout, William; Sinatra, Gale
TAR AR: Researching How Augmented Reality Activities Can Facilitate Visitor Learning at La Brea Tar Pits Proceedings Article
In: GSA, 2022.
Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{deneve_weeks_tar_2022,
title = {TAR AR: Researching How Augmented Reality Activities Can Facilitate Visitor Learning at La Brea Tar Pits},
author = {Danaan DeNeve Weeks and Emily Lindsey and Matt Davis and Alana Kennedy and Benjamin Nye and David Nelson and Molly Porter and William Swartout and Gale Sinatra},
url = {https://gsa.confex.com/gsa/2022CD/webprogram/Paper373373.html},
year = {2022},
date = {2022-03-01},
urldate = {2023-03-31},
publisher = {GSA},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D; Jain, Aditya; Ramirez, Dilan; Core, Mark G; Swartout, William
Designing a Rapid Adaptive Content Registry (RACR) for Adaptive Learning Proceedings Article
In: 2022.
@inproceedings{nye_designing_2022,
title = {Designing a Rapid Adaptive Content Registry (RACR) for Adaptive Learning},
author = {Benjamin D Nye and Aditya Jain and Dilan Ramirez and Mark G Core and William Swartout},
year = {2022},
date = {2022-01-01},
abstract = {Despite meta-analyses showing strong learning gains for adaptive learning, few domain areas are covered by adaptive learning. A key reason for this is a content bottleneck: currently, adaptive systems require highly-trained computer scientists and educational specialists to add new content. To explore this issue, the Rapid Adaptive Content Registry (RACR) project is researching a pipeline of interactive tools designed for content managers with little or no training to incorporate content into an adaptive learning ecosystem. This prototype consists of four components:
1) Adaptive Module Registry for composing a set of learning resources and learning objectives (competencies) in an intuitive content-management UI;
2) Rapid Content Analysis Service, which leverages machine learning to analyze web pages (static or dynamic), PDFs, or short videos to generate metadata tags for competencies, estimated duration, and complexity;
3) Preview and Text Extraction interface to review, test, and manually extract text from resources; and
4) Module Simulator to analyze the ability of the available content to adapt to different simulated student patterns (e.g., struggling learner, learner starting with partial mastery, etc.)
This paper outlines the design principles, machine learning performance, and formative usability testing process for this toolkit. For this research, the performance metrics are authoring time, metadata tag quality, deployment reliability (valid content), and personalized pathways (differentiation between different kinds of learners). A comparison of machine learning models based on BERT-S to generate competency tags is presented, which indicates that a general model (not tag-specific) is reasonable for cold-start labels. Initial testing indicates potential usefulness of such a tool, but frustration with delays and limitations for tagging more complex learning resources (e.g., videos, simulations). Strategies and issues for integrating this tool into an enterprise ecosystem are also discussed, such as how specialized tools should integrate with more traditional content management systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
1) Adaptive Module Registry for composing a set of learning resources and learning objectives (competencies) in an intuitive content-management UI;
2) Rapid Content Analysis Service, which leverages machine learning to analyze web pages (static or dynamic), PDFs, or short videos to generate metadata tags for competencies, estimated duration, and complexity;
3) Preview and Text Extraction interface to review, test, and manually extract text from resources; and
4) Module Simulator to analyze the ability of the available content to adapt to different simulated student patterns (e.g., struggling learner, learner starting with partial mastery, etc.)
This paper outlines the design principles, machine learning performance, and formative usability testing process for this toolkit. For this research, the performance metrics are authoring time, metadata tag quality, deployment reliability (valid content), and personalized pathways (differentiation between different kinds of learners). A comparison of machine learning models based on BERT-S to generate competency tags is presented, which indicates that a general model (not tag-specific) is reasonable for cold-start labels. Initial testing indicates potential usefulness of such a tool, but frustration with delays and limitations for tagging more complex learning resources (e.g., videos, simulations). Strategies and issues for integrating this tool into an enterprise ecosystem are also discussed, such as how specialized tools should integrate with more traditional content management systems.
Leitner, Maxyn; Greenwald, Eric; Montgomery, Ryan; Wang, Ning
Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education Proceedings Article
In: Proceedings of the 30th International Conference on Computers in Education, 2022.
Abstract | Links | BibTeX | Tags: AI, UARC
@inproceedings{leitner_design_2022,
title = {Design and Evaluation of ARIN-561: An Educational Game for Youth Artificial Intelligence Education},
author = {Maxyn Leitner and Eric Greenwald and Ryan Montgomery and Ning Wang},
url = {https://par.nsf.gov/servlets/purl/10440195},
year = {2022},
date = {2022-01-01},
booktitle = {Proceedings of the 30th International Conference on Computers in Education},
abstract = {Artificial Intelligence (AI) is increasingly vital to our everyday lives. Future generations will not only consume AI, but work with AI-driven tools and contribute to the development of AI. As such, students will need exposure to AI knowledge at a younger age. Despite this need, relatively little is currently known about how to most effectively provide AI education to K-12 (kindergarten through 12th grade) students. In this paper, we discuss the design of an educational game for high-school AI education called ARIN-561. The game centered around two agents – a player character and a companion robot, as the story and learning experience unfold through conversations between the two agents and explorations that bond the two agents A series of studies were carried out at high schools in the United States to evaluate the efficacy of the game. Results indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: Human Factors in Virtual Environments and Game Design, AHFE Open Acces, 2022, ISBN: 978-1-958651-26-1, (ISSN: 27710718 Issue: 50).
Abstract | Links | BibTeX | Tags: MedVR, UARC
@inproceedings{talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
isbn = {978-1-958651-26-1},
year = {2022},
date = {2022-01-01},
urldate = {2023-04-03},
booktitle = {Human Factors in Virtual Environments and Game Design},
volume = {50},
publisher = {AHFE Open Acces},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
note = {ISSN: 27710718
Issue: 50},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; Pynadath, David V.; Wang, Ning
Measuring and Predicting Human Trust in Recommendations from an AI Teammate Proceedings Article
In: Degen, Helmut; Ntoa, Stavroula (Ed.): Artificial Intelligence in HCI, pp. 22–34, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05643-7.
Abstract | Links | BibTeX | Tags: AI, Social Simulation, UARC
@inproceedings{gurney_measuring_2022,
title = {Measuring and Predicting Human Trust in Recommendations from an AI Teammate},
author = {Nikolos Gurney and David V. Pynadath and Ning Wang},
editor = {Helmut Degen and Stavroula Ntoa},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05643-7_2},
doi = {10.1007/978-3-031-05643-7_2},
isbn = {978-3-031-05643-7},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in HCI},
pages = {22–34},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Predicting compliance with AI recommendations and knowing when to intervene are critical facets of human-AI teaming. AIs are typically deployed in settings where their abilities to evaluate decision variables far exceed the abilities of their human counterparts. However, even though AIs excel at weighing multiple issues and computing near optimal solutions with speed and accuracy beyond that of any human, they still make mistakes. Thus, perfect compliance may be undesirable. This means, just as individuals must know when to follow the advice of other people, it is critical for them to know when to adopt the recommendations from their AI. Well-calibrated trust is thought to be a fundamental aspect of this type of knowledge. We compare the ability of a common trust inventory and the ability of a behavioral measure of trust to predict compliance and success in a reconnaissance mission. We interpret the experimental results to suggest that the behavioral measure is a better predictor of overall mission compliance and success. We discuss how this measure could possibly be used in compliance interventions and related open questions.},
keywords = {AI, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Greenwald, Eric; Montgomery, Ryan; Leitner, Maxyn
ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 528–531, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: AI, UARC
@inproceedings{wang_arin-561_2022,
title = {ARIN-561: An Educational Game for Learning Artificial Intelligence for High-School Students},
author = {Ning Wang and Eric Greenwald and Ryan Montgomery and Maxyn Leitner},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
url = {https://link.springer.com/chapter/10.1007/978-3-031-11647-6_108},
doi = {10.1007/978-3-031-11647-6_108},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {528–531},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Artificial Intelligence (AI) is increasingly vital to our future generations, who will join a workforce that utilizes AI-driven tools and contributes to the advancement of AI. Today’s students will need exposure to AI knowledge at a younger age. Relatively little is currently known about how to most effectively provide AI education to K-12 students. In this paper, we discuss the design and evaluation of an educational game for high-school AI education called ARIN-561. Results from pilot studies indicate the potential of ARIN-561 to build AI knowledge, especially when students spend more time in the game.},
keywords = {AI, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic Proceedings Article
In: Kurosu, Masaaki (Ed.): Human-Computer Interaction. User Experience and Behavior, pp. 580–590, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-05412-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{wang_toward_2022,
title = {Toward Charismatic Virtual Agents: How to Animate Your Speech and Be Charismatic},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu},
url = {https://link.springer.com/chapter/10.1007/978-3-031-05412-9_39},
doi = {10.1007/978-3-031-05412-9_39},
isbn = {978-3-031-05412-9},
year = {2022},
date = {2022-01-01},
booktitle = {Human-Computer Interaction. User Experience and Behavior},
pages = {580–590},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal charismatic strategies based on the research on charismatic leaders, which was then used to re-write an existing tutorial on the human circulatory system to express charisma. We then collected voice recordings of the tutorial in both charismatic and non-charismatic voices using actors from a crowd-sourcing platform. In this paper, we present the analysis of the charismatic and non-charismatic voice recordings, and discuss what nonverbal behaviors in speeches contribute to perceived charisma. Results can shed light on the synthesis of charismatic speeches for virtual characters.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gurney, Nikolos; King, Tyler; Miller, John H.
An Experimental Method for Studying Complex Choices Proceedings Article
In: Stephanidis, Constantine; Antona, Margherita; Ntoa, Stavroula; Salvendy, Gavriel (Ed.): HCI International 2022 – Late Breaking Posters, pp. 39–45, Springer Nature Switzerland, Cham, 2022, ISBN: 978-3-031-19679-9.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{gurney_experimental_2022,
title = {An Experimental Method for Studying Complex Choices},
author = {Nikolos Gurney and Tyler King and John H. Miller},
editor = {Constantine Stephanidis and Margherita Antona and Stavroula Ntoa and Gavriel Salvendy},
url = {https://link.springer.com/chapter/10.1007/978-3-031-19679-9_6},
doi = {10.1007/978-3-031-19679-9_6},
isbn = {978-3-031-19679-9},
year = {2022},
date = {2022-01-01},
booktitle = {HCI International 2022 – Late Breaking Posters},
pages = {39–45},
publisher = {Springer Nature Switzerland},
address = {Cham},
series = {Communications in Computer and Information Science},
abstract = {The promise of computational decision aids, from review sites to emerging augmented cognition technology, is the potential for better choice outcomes. This promise is grounded in the notion that we understand human decision processes well enough to design useful interventions. Although researchers have made considerable advances in the understanding of human judgment and decision making, these efforts are mostly based on the analysis of simple, often linear choices. Cumulative Prospect Theory (CPT), a famous explanation for decision making under uncertainty, was developed and validated using binary choice experiments in which options varied on a single dimension. Behavioral science has largely followed this simplified methodology. Here, we introduce an experimental paradigm specifically for studying humans making complex choices that incorporate multiple variables with nonlinear interactions. The task involves tuning dials, each of which controls a different dimension of a nonlinear problem. Initial results show that in such an environment participants demonstrate classic cognitive artifacts, such as anchoring and adjusting, along with falling into exploitive traps that prevent adequate exploration of these complex decisions. Preventing such errors suggest a potentially valuable role for deploying algorithmic decision aids to enhance decision making in complex choices.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Proceedings Article
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661–674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Proceedings Article
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163–167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas Brett; Chinara, Chinmay
Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations Proceedings Article
In: 2022.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, MR, VR
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {DTIC, MedVR, MR, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}