Publications
Search
Okado, Yuko; Nye, Benjamin D.; Aguirre, Angelica; Swartout, William
In: Wang, Ning; Rebolledo-Mendez, Genaro; Matsuda, Noboru; Santos, Olga C.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education, vol. 13916, pp. 189–201, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36271-2 978-3-031-36272-9, (Series Title: Lecture Notes in Computer Science).
@incollection{wang_can_2023,
title = {Can Virtual Agents Scale Up Mentoring?: Insights from College Students’ Experiences Using the CareerFair.ai Platform at an American Hispanic-Serving Institution},
author = {Yuko Okado and Benjamin D. Nye and Angelica Aguirre and William Swartout},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Noboru Matsuda and Olga C. Santos and Vania Dimitrova},
url = {https://link.springer.com/10.1007/978-3-031-36272-9_16},
doi = {10.1007/978-3-031-36272-9_16},
isbn = {978-3-031-36271-2 978-3-031-36272-9},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-23},
booktitle = {Artificial Intelligence in Education},
volume = {13916},
pages = {189–201},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Georgila, Kallirroi
Considerations for Child Speech Synthesis for Dialogue Systems Proceedings Article
In: Los Angeles, CA, 2023.
@inproceedings{georgila_considerations_2023,
title = {Considerations for Child Speech Synthesis for Dialogue Systems},
author = {Kallirroi Georgila},
url = {https://kgeorgila.github.io/publications/georgila_aiaic23.pdf},
year = {2023},
date = {2023-01-01},
address = {Los Angeles, CA},
abstract = {We present a number of important issues for consideration with regard to child speech synthesis for dialogue systems. We specifically discuss challenges in building child synthetic voices compared to adult synthetic voices, synthesizing expressive conversational speech, and evaluating speech synthesis quality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation Journal Article
In: Proceedings of the Annual Meeting of the Cognitive Science Society, vol. Volume 45, 2023.
@article{hale_risk_2023,
title = {Risk Aversion and Demographic Factors Affect Preference Elicitation and Outcomes of a Salary Negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://escholarship.org/uc/item/7n01v4f9#main},
year = {2023},
date = {2023-01-01},
journal = {Proceedings of the Annual Meeting of the Cognitive Science Society},
volume = {Volume 45},
abstract = {Women and minorities obtain lower salaries when negotiating their employment compensation. Some have suggested that automated negotiation and dispute-resolution technology might address such material inequities. These algorithms elicit the multi-criteria preferences of each side of a dispute and arrive at solutions that are efficient and "provably" fair. In a study that explores the potential benefit of these methods, we highlight cognitive factors that may allow inequities to persist despite these methods. Specifically, risk-averse individuals express lower preferences for salary and as risk-aversion is more common in women and minorities, this translates into a ``provably'' fair lower salary. While this may reflect actual underlying differences in preferences across groups, individuals may be confounding their preferences for salary with their risk preference (i.e., their fear of not reaching an agreement), such that these groups achieve worse outcomes than they should. We further highlight that methodological choices in how negotiation processes are often studied can obscure the magnitude of this effect.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lu, Shuhong; Yoon, Youngwoo; Feng, Andrew
Co-Speech Gesture Synthesis using Discrete Gesture Token Learning Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{lu_co-speech_2023,
title = {Co-Speech Gesture Synthesis using Discrete Gesture Token Learning},
author = {Shuhong Lu and Youngwoo Yoon and Andrew Feng},
url = {https://arxiv.org/abs/2303.12822},
doi = {10.48550/ARXIV.2303.12822},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
abstract = {Synthesizing realistic co-speech gestures is an important and yet unsolved problem for creating believable motions that can drive a humanoid robot to interact and communicate with human users. Such capability will improve the impressions of the robots by human users and will find applications in education, training, and medical services. One challenge in learning the co-speech gesture model is that there may be multiple viable gesture motions for the same speech utterance. The deterministic regression methods can not resolve the conflicting samples and may produce over-smoothed or damped motions. We proposed a two-stage model to address this uncertainty issue in gesture synthesis by modeling the gesture segments as discrete latent codes. Our method utilizes RQ-VAE in the first stage to learn a discrete codebook consisting of gesture tokens from training data. In the second stage, a two-level autoregressive transformer model is used to learn the prior distribution of residual codes conditioned on input speech context. Since the inference is formulated as token sampling, multiple gesture sequences could be generated given the same speech input using top-k sampling. The quantitative results and the user study showed the proposed method outperforms the previous methods and is able to generate realistic and diverse gesture motions.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M. De; Gratch, Jonathan; Marsella, Stacy; Pelachaud, Catherine
Social Functions of Machine Emotional Expressions Journal Article
In: Proc. IEEE, pp. 1–16, 2023, ISSN: 0018-9219, 1558-2256.
@article{de_melo_social_2023,
title = {Social Functions of Machine Emotional Expressions},
author = {Celso M. De Melo and Jonathan Gratch and Stacy Marsella and Catherine Pelachaud},
url = {https://ieeexplore.ieee.org/document/10093227/},
doi = {10.1109/JPROC.2023.3261137},
issn = {0018-9219, 1558-2256},
year = {2023},
date = {2023-01-01},
urldate = {2023-08-04},
journal = {Proc. IEEE},
pages = {1–16},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Johnson, Emmanuel; Gratch, Jonathan; Gil, Yolanda
Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation Book Section
In: Wang, Ning; Rebolledo-Mendez, Genaro; Dimitrova, Vania; Matsuda, Noboru; Santos, Olga C. (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky, vol. 1831, pp. 530–535, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-36335-1 978-3-031-36336-8, (Series Title: Communications in Computer and Information Science).
@incollection{wang_virtual_2023,
title = {Virtual Agent Approach for Teaching the Collaborative Problem Solving Skill of Negotiation},
author = {Emmanuel Johnson and Jonathan Gratch and Yolanda Gil},
editor = {Ning Wang and Genaro Rebolledo-Mendez and Vania Dimitrova and Noboru Matsuda and Olga C. Santos},
url = {https://link.springer.com/10.1007/978-3-031-36336-8_82},
doi = {10.1007/978-3-031-36336-8_82},
isbn = {978-3-031-36335-1 978-3-031-36336-8},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners, Doctoral Consortium and Blue Sky},
volume = {1831},
pages = {530–535},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Communications in Computer and Information Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sato, Motoaki; Terada, Kazunori; Gratch, Jonathan
Teaching Reverse Appraisal to Improve Negotiation Skills Journal Article
In: IEEE Trans. Affective Comput., pp. 1–14, 2023, ISSN: 1949-3045, 2371-9850.
@article{sato_teaching_2023,
title = {Teaching Reverse Appraisal to Improve Negotiation Skills},
author = {Motoaki Sato and Kazunori Terada and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/10189838/},
doi = {10.1109/TAFFC.2023.3285931},
issn = {1949-3045, 2371-9850},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
journal = {IEEE Trans. Affective Comput.},
pages = {1–14},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tak, Ala N.; Gratch, Jonathan
Is GPT a Computational Model of Emotion? Detailed Analysis Journal Article
In: 2023, (Publisher: arXiv Version Number: 1).
@article{tak_is_2023,
title = {Is GPT a Computational Model of Emotion? Detailed Analysis},
author = {Ala N. Tak and Jonathan Gratch},
url = {https://arxiv.org/abs/2307.13779},
doi = {10.48550/ARXIV.2307.13779},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
abstract = {This paper investigates the emotional reasoning abilities of the GPT family of large language models via a component perspective. The paper first examines how the model reasons about autobiographical memories. Second, it systematically varies aspects of situations to impact emotion intensity and coping tendencies. Even without the use of prompt engineering, it is shown that GPT's predictions align significantly with human-provided appraisals and emotional labels. However, GPT faces difficulties predicting emotion intensity and coping responses. GPT-4 showed the highest performance in the initial study but fell short in the second, despite providing superior results after minor prompt engineering. This assessment brings up questions on how to effectively employ the strong points and address the weak areas of these models, particularly concerning response variability. These studies underscore the merits of evaluating models from a componential perspective.},
note = {Publisher: arXiv
Version Number: 1},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Karpurapu, Abhilash; Jajodia, Aditya; Merchant, Chirag
The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis Book Section
In: Kurosu, Masaaki; Hashizume, Ayako (Ed.): Human-Computer Interaction, vol. 14013, pp. 407–418, Springer Nature Switzerland, Cham, 2023, ISBN: 978-3-031-35601-8 978-3-031-35602-5, (Series Title: Lecture Notes in Computer Science).
@incollection{kurosu_relationship_2023,
title = {The Relationship Between Pauses and Emphasis: Implications for Charismatic Speech Synthesis},
author = {Ning Wang and Abhilash Karpurapu and Aditya Jajodia and Chirag Merchant},
editor = {Masaaki Kurosu and Ayako Hashizume},
url = {https://link.springer.com/10.1007/978-3-031-35602-5_29},
doi = {10.1007/978-3-031-35602-5_29},
isbn = {978-3-031-35601-8 978-3-031-35602-5},
year = {2023},
date = {2023-01-01},
urldate = {2023-09-20},
booktitle = {Human-Computer Interaction},
volume = {14013},
pages = {407–418},
publisher = {Springer Nature Switzerland},
address = {Cham},
note = {Series Title: Lecture Notes in Computer Science},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burçin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah L; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Jazizadeh, Farrokh; Liu, Ruying; Zhu, Runhe; Marks, Frederick; Roll, Shawn; Seyedrezaei, Mirmahdi; Taylor, John E.; Höelscher, Christoph; Khan, Azam; Langevin, Jared; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Schaumann, Davide; Zhao, Jie
Ten questions concerning human-building interaction research for improving the quality of life Journal Article
In: Building and Environment, vol. 226, pp. 109681, 2022, ISSN: 0360-1323.
@article{becerik-gerber_ten_2022,
title = {Ten questions concerning human-building interaction research for improving the quality of life},
author = {Burçin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah L Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Farrokh Jazizadeh and Ruying Liu and Runhe Zhu and Frederick Marks and Shawn Roll and Mirmahdi Seyedrezaei and John E. Taylor and Christoph Höelscher and Azam Khan and Jared Langevin and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Davide Schaumann and Jie Zhao},
url = {https://www.sciencedirect.com/science/article/pii/S0360132322009118},
doi = {10.1016/j.buildenv.2022.109681},
issn = {0360-1323},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Building and Environment},
volume = {226},
pages = {109681},
abstract = {This paper seeks to address ten questions that explore the burgeoning field of Human-Building Interaction (HBI), an interdisciplinary field that represents the next frontier in convergent research and innovation to enable the dynamic interplay of human and building interactional intelligence. The field of HBI builds on several existing efforts in historically separate research fields/communities and aims to understand how buildings affect human outcomes and experiences, as well as how humans interact with, adapt to, and affect the built environment and its systems, to support buildings that can learn, enable adaptation, and evolve at different scales to improve the quality-of-life of its users while optimizing resource usage and service availability. Questions were developed by a diverse group of researchers with backgrounds in design, engineering, computer science, social science, and health science. Answers to these questions draw conclusions from what has been achieved to date as reported in the available literature and establish a foundation for future HBI research. This paper aims to encourage interdisciplinary collaborations in HBI research to change the way people interact with and perceive technology within the context of buildings and inform the design, construction, and operation of next-generation, intelligent built environments. In doing so, HBI research can realize a myriad of benefits for human users, including improved productivity, health, cognition, convenience, and comfort, all of which are essential to societal well-being.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Becerik-Gerber, Burcin; Lucas, Gale; Aryal, Ashrant; Awada, Mohamad; Bergés, Mario; Billington, Sarah; Boric-Lubecke, Olga; Ghahramani, Ali; Heydarian, Arsalan; Höelscher, Christoph; Jazizadeh, Farrokh; Khan, Azam; Langevin, Jared; Liu, Ruying; Marks, Frederick; Mauriello, Matthew Louis; Murnane, Elizabeth; Noh, Haeyoung; Pritoni, Marco; Roll, Shawn; Schaumann, Davide; Seyedrezaei, Mirmahdi; Taylor, John E.; Zhao, Jie; Zhu, Runhe
The field of human building interaction for convergent research and innovation for intelligent built environments Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 22092, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
@article{becerik-gerber_field_2022,
title = {The field of human building interaction for convergent research and innovation for intelligent built environments},
author = {Burcin Becerik-Gerber and Gale Lucas and Ashrant Aryal and Mohamad Awada and Mario Bergés and Sarah Billington and Olga Boric-Lubecke and Ali Ghahramani and Arsalan Heydarian and Christoph Höelscher and Farrokh Jazizadeh and Azam Khan and Jared Langevin and Ruying Liu and Frederick Marks and Matthew Louis Mauriello and Elizabeth Murnane and Haeyoung Noh and Marco Pritoni and Shawn Roll and Davide Schaumann and Mirmahdi Seyedrezaei and John E. Taylor and Jie Zhao and Runhe Zhu},
url = {https://www.nature.com/articles/s41598-022-25047-y},
doi = {10.1038/s41598-022-25047-y},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {22092},
abstract = {Human-Building Interaction (HBI) is a convergent field that represents the growing complexities of the dynamic interplay between human experience and intelligence within built environments. This paper provides core definitions, research dimensions, and an overall vision for the future of HBI as developed through consensus among 25 interdisciplinary experts in a series of facilitated workshops. Three primary areas contribute to and require attention in HBI research: humans (human experiences, performance, and well-being), buildings (building design and operations), and technologies (sensing, inference, and awareness). Three critical interdisciplinary research domains intersect these areas: control systems and decision making, trust and collaboration, and modeling and simulation. Finally, at the core, it is vital for HBI research to center on and support equity, privacy, and sustainability. Compelling research questions are posed for each primary area, research domain, and core principle. State-of-the-art methods used in HBI studies are discussed, and examples of original research are offered to illustrate opportunities for the advancement of HBI research.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pauw, Lisanne S.; Sauter, Disa A.; Kleef, Gerben A.; Lucas, Gale M.; Gratch, Jonathan; Fischer, Agneta H.
The avatar will see you now: Support from a virtual human provides socio-emotional benefits Journal Article
In: Computers in Human Behavior, vol. 136, pp. 107368, 2022, ISSN: 07475632.
@article{pauw_avatar_2022,
title = {The avatar will see you now: Support from a virtual human provides socio-emotional benefits},
author = {Lisanne S. Pauw and Disa A. Sauter and Gerben A. Kleef and Gale M. Lucas and Jonathan Gratch and Agneta H. Fischer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S074756322200190X},
doi = {10.1016/j.chb.2022.107368},
issn = {07475632},
year = {2022},
date = {2022-11-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {136},
pages = {107368},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lugrin, Birgit; Pelachaud, Catherine; André, Elisabeth; Aylett, Ruth; Bickmore, Timothy; Breazeal, Cynthia; Broekens, Joost; Dautenhahn, Kerstin; Gratch, Jonathan; Kopp, Stefan; Nadel, Jacqueline; Paiva, Ana; Wykowska, Agnieszka
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 561–626, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{lugrin_challenge_2022,
title = {Challenge Discussion on Socially Interactive Agents: Considerations on Social Interaction, Computational Architectures, Evaluation, and Ethics},
author = {Birgit Lugrin and Catherine Pelachaud and Elisabeth André and Ruth Aylett and Timothy Bickmore and Cynthia Breazeal and Joost Broekens and Kerstin Dautenhahn and Jonathan Gratch and Stefan Kopp and Jacqueline Nadel and Ana Paiva and Agnieszka Wykowska},
url = {https://doi.org/10.1145/3563659.3563677},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {561–626},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hartholt, Arno; Mozgai, Sharon
Platforms and Tools for SIA Research and Development Book Section
In: The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application, vol. 48, pp. 261–304, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9896-1.
@incollection{hartholt_platforms_2022,
title = {Platforms and Tools for SIA Research and Development},
author = {Arno Hartholt and Sharon Mozgai},
url = {https://doi.org/10.1145/3563659.3563668},
isbn = {978-1-4503-9896-1},
year = {2022},
date = {2022-11-01},
urldate = {2023-03-31},
booktitle = {The Handbook on Socially Interactive Agents: 20 years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics Volume 2: Interactivity, Platforms, Application},
volume = {48},
pages = {261–304},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
edition = {1},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Yin, Yufeng; Xu, Jiashu; Zu, Tianxin; Soleymani, Mohammad
X-Norm: Exchanging Normalization Parameters for Bimodal Fusion Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 605–614, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
@inproceedings{yin_x-norm_2022,
title = {X-Norm: Exchanging Normalization Parameters for Bimodal Fusion},
author = {Yufeng Yin and Jiashu Xu and Tianxin Zu and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3536221.3556581},
doi = {10.1145/3536221.3556581},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {605–614},
publisher = {ACM},
address = {Bengaluru India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Shuhong; Feng, Andrew
The DeepMotion entry to the GENEA Challenge 2022 Proceedings Article
In: INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION, pp. 790–796, ACM, Bengaluru India, 2022, ISBN: 978-1-4503-9390-4.
@inproceedings{lu_deepmotion_2022,
title = {The DeepMotion entry to the GENEA Challenge 2022},
author = {Shuhong Lu and Andrew Feng},
url = {https://dl.acm.org/doi/10.1145/3536221.3558059},
doi = {10.1145/3536221.3558059},
isbn = {978-1-4503-9390-4},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-24},
booktitle = {INTERNATIONAL CONFERENCE ON MULTIMODAL INTERACTION},
pages = {790–796},
publisher = {ACM},
address = {Bengaluru India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Shin, Samuel; Yoon, Youngwoo
A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos Proceedings Article
In: Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games, pp. 1–7, ACM, Guanajuato Mexico, 2022, ISBN: 978-1-4503-9888-6.
@inproceedings{feng_tool_2022,
title = {A Tool for Extracting 3D Avatar-Ready Gesture Animations from Monocular Videos},
author = {Andrew Feng and Samuel Shin and Youngwoo Yoon},
url = {https://dl.acm.org/doi/10.1145/3561975.3562953},
doi = {10.1145/3561975.3562953},
isbn = {978-1-4503-9888-6},
year = {2022},
date = {2022-11-01},
urldate = {2023-08-04},
booktitle = {Proceedings of the 15th ACM SIGGRAPH Conference on Motion, Interaction and Games},
pages = {1–7},
publisher = {ACM},
address = {Guanajuato Mexico},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Fast, Nathanael J.
The power to harm: AI assistants pave the way to unethical behavior Journal Article
In: Current Opinion in Psychology, vol. 47, pp. 101382, 2022, ISSN: 2352250X.
@article{gratch_power_2022,
title = {The power to harm: AI assistants pave the way to unethical behavior},
author = {Jonathan Gratch and Nathanael J. Fast},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352250X22001014},
doi = {10.1016/j.copsyc.2022.101382},
issn = {2352250X},
year = {2022},
date = {2022-10-01},
urldate = {2022-09-28},
journal = {Current Opinion in Psychology},
volume = {47},
pages = {101382},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Proceedings Article
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, DTIC, Natural Language, Virtual Humans
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21–29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {ARL, Dialogue, DTIC, Natural Language, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bell, Benjamin; Bennett, Winston Wink; Nye, Benjamin; Kelsey, Elaine
Helping Instructor Pilots Detect and Respond to Engagement Lapses in Simulations Proceedings Article
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems. Adaptation Strategies and Methods, pp. 3–14, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-77873-6.
Abstract | Links | BibTeX | Tags: Machine Learning, Virtual Humans
@inproceedings{bell_helping_2021,
title = {Helping Instructor Pilots Detect and Respond to Engagement Lapses in Simulations},
author = {Benjamin Bell and Winston Wink Bennett and Benjamin Nye and Elaine Kelsey},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/chapter/10.1007/978-3-030-77873-6_1},
doi = {10.1007/978-3-030-77873-6_1},
isbn = {978-3-030-77873-6},
year = {2021},
date = {2021-01-01},
booktitle = {Adaptive Instructional Systems. Adaptation Strategies and Methods},
pages = {3–14},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {Adapting training in real time can be challenging for instructors. Real-time simulation can present rapid sequences of events, making it difficult for an instructor to attribute errors or omissions to specific underling gaps in skills and knowledge. Monitoring multiple students simultaneously imposes additional attentional workload on an instructor. This challenge can be further exacerbated when an instructor’s view of the student is obscured by virtual reality (VR) equipment. To support instructors’ ability to adapt training, Eduworks and USC’s Institute for Creative Technologies are developing machine learning (ML) models that can measure user engagement during training simulations and offer recommendations for restoring lapses in engagement. We have created a system, called the Observational Motivation and Engagement Generalized Appliance (OMEGA), which we tested in the context of a new U.S. Air Force approach to Specialized Undergraduate Pilot Training (SUPT) called Pilot Training Next (PTN). PTN integrates traditional flying sorties with VR-enabled ground-based training devices to achieve training efficiencies, improve readiness, and increase throughput. The virtual environment provides a rich source of raw data that machine learning models can use to associate user activity with user engagement. We created a testbed for data capture to construct the ML models, based on theoretical foundations we developed previously. Our research explores OMEGA’s potential to help alert an instructor pilot (IP) to student distraction by flagging attention and engagement lapses. Our hypothesis is that OMEGA could help an IP adapt learning, and potentially manage multiple students at the same time, with alerts of lapsed attention and recommendations for restoring engagement. To test this hypothesis, we ran pilots through multiple PTN scenarios to create data for training the model. In this paper, we report on work to create machine learning models using three different techniques, and present model performance data using standard machine learning metrics. We discuss the modeling approach used to generate instructor recommendations. Future work will present results from a formative evaluation using instructor pilots. These early findings provide preliminary validation for the use of ML models for learning to detect engagement from the rich data sources characteristic of virtual environments. These findings will be applicable across a broad range of conventional and VR training applications.},
keywords = {Machine Learning, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: VHTL, Virtual Humans
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Natural Language, UARC, Virtual Humans
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {ARO-Coop, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pacheco, Luz; Merchant, Chirag; Skistad, Kristian; Jethwani, Aayushi
The Design of Charismatic Behaviors for Virtual Humans Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{wang_design_2020,
title = {The Design of Charismatic Behaviors for Virtual Humans},
author = {Ning Wang and Luz Pacheco and Chirag Merchant and Kristian Skistad and Aayushi Jethwani},
url = {https://doi.org/10.1145/3383652.3423867},
doi = {10.1145/3383652.3423867},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '20},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal and nonverbal (with the focus on voice) charismatic strategies based on the analysis of behaviors of charismatic leaders. We developed scripted speech dialogues with the verbal strategies and recorded the speeches with actors using the nonverbal strategies. The dialogue is further implemented in a virtual human, embedded in a virtual classroom, to give a lecture on the human circulatory system. We conducted a study with the virtual human to assess the impact of charismatic verbal and nonverbal behaviors on perceived charisma. The results show the positive impact of the use of verbal strategies and how the use of voice can influence such impact. The results shed light on the next steps needed to automatically generate charismatic speech, voices, and gestures for virtual characters.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315–332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
Abstract | Links | BibTeX | Tags: ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {ARO-Coop, Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Gratch, Jonathan
The Effects of Experience on Deception in Human-Agent Negotiation Journal Article
In: Journal of Artificial Intelligence Research, vol. 68, pp. 633–660, 2020, ISSN: 1076-9757.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@article{mell_effects_2020,
title = {The Effects of Experience on Deception in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jonathan Gratch},
url = {https://www.jair.org/index.php/jair/article/view/11924},
doi = {10.1613/jair.1.11924},
issn = {1076-9757},
year = {2020},
date = {2020-08-01},
urldate = {2023-03-31},
journal = {Journal of Artificial Intelligence Research},
volume = {68},
pages = {633–660},
abstract = {Negotiation is the complex social process by which multiple parties come to mutual agreement over a series of issues. As such, it has proven to be a key challenge problem for designing adequately social AIs that can effectively navigate this space. Artificial AI agents that are capable of negotiating must be capable of realizing policies and strategies that govern offer acceptances, offer generation, preference elicitation, and more. But the next generation of agents must also adapt to reflect their users’ experiences.
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.
Nye, Benjamin D.; Davis, Dan M.; Rizvi, Sanad Z.; Carr, Kayla; Swartout, William; Thacker, Raj; Shaw, Kenneth
Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors Journal Article
In: Journal of Research on Technology in Education, pp. 1–23, 2020, ISSN: 1539-1523, 1945-0818.
Abstract | Links | BibTeX | Tags: Learning Sciences, Virtual Humans
@article{nye_feasibility_2020,
title = {Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors},
author = {Benjamin D. Nye and Dan M. Davis and Sanad Z. Rizvi and Kayla Carr and William Swartout and Raj Thacker and Kenneth Shaw},
url = {https://www.tandfonline.com/doi/full/10.1080/15391523.2020.1771640},
doi = {10.1080/15391523.2020.1771640},
issn = {1539-1523, 1945-0818},
year = {2020},
date = {2020-07-01},
journal = {Journal of Research on Technology in Education},
pages = {1–23},
abstract = {One-on-one mentoring is an effective method to help novices with career development. However, traditional mentoring scales poorly. To address this problem, MentorPal emulates conversations with a panel of virtual mentors based on recordings of real STEM professionals. Students freely ask questions as they might in a career fair, while machine learning algorithms attempt to provide the best answers. MentorPal has developed strategies for the rapid development of new virtual mentors, where training data will be sparse. In a usability study, 31 high school students self-reported a) increased career knowledge and confidence, b) positive ease-of-use, and that c) mentors were helpful (87%) but often did not cover their preferred career (29%). Results demonstrate the feasibility of scalable virtual mentoring, but efficacy studies are needed to evaluate the impact of virtual mentors, particularly for groups with limited STEM opportunities.},
keywords = {Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304–307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Proceedings Article
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121–2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
Abstract | Links | BibTeX | Tags: ARO-Coop, UARC, Virtual Humans
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {ARO-Coop, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Proceedings Article
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222–3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net? Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735–742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Proceedings Article
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Proceedings Article
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bellas, Alexandria; Perrin, Stefawn; Malone, Brandon; Rogers, Kaytlin; Lucas, Gale; Phillips, Elizabeth; Tossell, Chad; de Visser, Ewart
Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams Proceedings Article
In: Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS), pp. 160–163, IEEE, Charlottesville, VA, USA, 2020, ISBN: 978-1-72817-145-6.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{bellas_rapport_2020,
title = {Rapport Building with Social Robots as a Method for Improving Mission Debriefing in Human-Robot Teams},
author = {Alexandria Bellas and Stefawn Perrin and Brandon Malone and Kaytlin Rogers and Gale Lucas and Elizabeth Phillips and Chad Tossell and Ewart de Visser},
url = {https://ieeexplore.ieee.org/document/9106643/},
doi = {10.1109/SIEDS49339.2020.9106643},
isbn = {978-1-72817-145-6},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 Systems and Information Engineering Design Symposium (SIEDS)},
pages = {160–163},
publisher = {IEEE},
address = {Charlottesville, VA, USA},
abstract = {Conflicts may arise at any time during military debriefing meetings, especially in high intensity deployed settings. When such conflicts arise, it takes time to get everyone back into a receptive state of mind so that they engage in reflective discussion rather than unproductive arguing. It has been proposed by some that the use of social robots equipped with social abilities such as emotion regulation through rapport building may help to deescalate these situations to facilitate critical operational decisions. However, in military settings, the same AI agent used in the pre-brief of a mission may not be the same one used in the debrief. The purpose of this study was to determine whether a brief rapport-building session with a social robot could create a connection between a human and a robot agent, and whether consistency in the embodiment of the robot agent was necessary for maintaining this connection once formed. We report the results of a pilot study conducted at the United States Air Force Academy which simulated a military mission (i.e., Gravity and Strike). Participants’ connection with the agent, sense of trust, and overall likeability revealed that early rapport building can be beneficial for military missions.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pilly, Praveen K.; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Lerner, Itamar; Jones, Aaron P.; Robert, Bradley; Bryant, Natalie B.; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael D.
In: Frontiers in Neuroscience, vol. 13, pp. 1416, 2020, ISSN: 1662-453X.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_one-shot_2020,
title = {One-Shot Tagging During Wake and Cueing During Sleep With Spatiotemporal Patterns of Transcranial Electrical Stimulation Can Boost Long-Term Metamemory of Individual Episodes in Humans},
author = {Praveen K. Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Itamar Lerner and Aaron P. Jones and Bradley Robert and Natalie B. Bryant and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael D. Howard},
url = {https://www.frontiersin.org/article/10.3389/fnins.2019.01416/full},
doi = {10.3389/fnins.2019.01416},
issn = {1662-453X},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Neuroscience},
volume = {13},
pages = {1416},
abstract = {Targeted memory reactivation (TMR) during slow-wave oscillations (SWOs) in sleep has been demonstrated with sensory cues to achieve about 5–12% improvement in post-nap memory performance on simple laboratory tasks. But prior work has not yet addressed the one-shot aspect of episodic memory acquisition, or dealt with the presence of interference from ambient environmental cues in real-world settings. Further, TMR with sensory cues may not be scalable to the multitude of experiences over one’s lifetime. We designed a novel non-invasive non-sensory paradigm that tags one-shot experiences of minute-long naturalistic episodes in immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). In particular, we demonstrated that these STAMPs can be reapplied as brief pulses during SWOs in sleep to achieve about 10–20% improvement in the metamemory of targeted episodes compared to the control episodes at 48 hours after initial viewing. We found that STAMPs can not only facilitate but also impair metamemory for the targeted episodes based on an interaction between presleep metamemory and the number of STAMP applications during sleep. Overnight metamemory improvements were mediated by spectral power increases following the offset of STAMPs in the slow-spindle band (8–12 Hz) for left temporal areas in the scalp electroencephalography (EEG) during sleep. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1–32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gennaro, Mauro; Krumhuber, Eva G.; Lucas, Gale
Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood Journal Article
In: Frontiers in Psychology, vol. 10, pp. 3061, 2020, ISSN: 1664-1078.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{de_gennaro_effectiveness_2020,
title = {Effectiveness of an Empathic Chatbot in Combating Adverse Effects of Social Exclusion on Mood},
author = {Mauro Gennaro and Eva G. Krumhuber and Gale Lucas},
url = {https://www.frontiersin.org/article/10.3389/fpsyg.2019.03061/full},
doi = {10.3389/fpsyg.2019.03061},
issn = {1664-1078},
year = {2020},
date = {2020-01-01},
journal = {Frontiers in Psychology},
volume = {10},
pages = {3061},
abstract = {From past research it is well known that social exclusion has detrimental consequences for mental health. To deal with these adverse effects, socially excluded individuals frequently turn to other humans for emotional support. While chatbots can elicit social and emotional responses on the part of the human interlocutor, their effectiveness in the context of social exclusion has not been investigated. In the present study, we examined whether an empathic chatbot can serve as a buffer against the adverse effects of social ostracism. After experiencing exclusion on social media, participants were randomly assigned to either talk with an empathetic chatbot about it (e.g., “I’m sorry that this happened to you”) or a control condition where their responses were merely acknowledged (e.g., “Thank you for your feedback”). Replicating previous research, results revealed that experiences of social exclusion dampened the mood of participants. Interacting with an empathetic chatbot, however, appeared to have a mitigating impact. In particular, participants in the chatbot intervention condition reported higher mood than those in the control condition. Theoretical, methodological, and practical implications, as well as directions for future research are discussed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2019
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Proceedings Article
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113–131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
Abstract | Links | BibTeX | Tags: UARC, VHTL, Virtual Humans
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chawla, Kushal; Srinivasan, Balaji Vasan; Chhaya, Niyati
Generating Formality-Tuned Summaries Using Input-Dependent Rewards Proceedings Article
In: Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pp. 833–842, Association for Computational Linguistics, Hong Kong, China, 2019.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{chawla_generating_2019,
title = {Generating Formality-Tuned Summaries Using Input-Dependent Rewards},
author = {Kushal Chawla and Balaji Vasan Srinivasan and Niyati Chhaya},
url = {https://www.aclweb.org/anthology/K19-1078},
doi = {10.18653/v1/K19-1078},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL)},
pages = {833–842},
publisher = {Association for Computational Linguistics},
address = {Hong Kong, China},
abstract = {Abstractive text summarization aims at generating human-like summaries by understanding and paraphrasing the given input content. Recent efforts based on sequence-to-sequence networks only allow the generation of a single summary. However, it is often desirable to accommodate the psycho-linguistic preferences of the intended audience while generating the summaries. In this work, we present a reinforcement learning based approach to generate formality-tailored summaries for an input article. Our novel input-dependent reward function aids in training the model with stylistic feedback on sampled and ground-truth summaries together. Once trained, the same model can generate formal and informal summary variants. Our automated and qualitative evaluations show the viability of the proposed framework.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
Abstract | Links | BibTeX | Tags: MedVR, UARC, VHTL, Virtual Humans
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231–245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Haring, Kerstin S.; Tobias, Jessica; Waligora, Justin; Phillips, Elizabeth; Tenhundfeld, Nathan L; LUCAS, Gale; Visser, Ewart J; GRATCH, Jonathan; Tossell, Chad
Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing Proceedings Article
In: Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), IEEE, New Delhi, India, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{haring_conflict_2019,
title = {Conflict Mediation in Human-Machine Teaming: Using a Virtual Agent to Support Mission Planning and Debriefing},
author = {Kerstin S. Haring and Jessica Tobias and Justin Waligora and Elizabeth Phillips and Nathan L Tenhundfeld and Gale LUCAS and Ewart J Visser and Jonathan GRATCH and Chad Tossell},
url = {https://ieeexplore.ieee.org/abstract/document/8956414},
doi = {10.1109/RO-MAN46459.2019.8956414},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 28th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)},
publisher = {IEEE},
address = {New Delhi, India},
abstract = {Socially intelligent artificial agents and robots are anticipated to become ubiquitous in home, work, and military environments. With the addition of such agents to human teams it is crucial to evaluate their role in the planning, decision making, and conflict mediation processes. We conducted a study to evaluate the utility of a virtual agent that provided mission planning support in a three-person human team during a military strategic mission planning scenario. The team consisted of a human team lead who made the final decisions and three supporting roles, two humans and the artificial agent. The mission outcome was experimentally designed to fail and introduced a conflict between the human team members and the leader. This conflict was mediated by the artificial agent during the debriefing process through discuss or debate and open communication strategies of conflict resolution [1]. Our results showed that our teams experienced conflict. The teams also responded socially to the virtual agent, although they did not find the agent beneficial to the mediation process. Finally, teams collaborated well together and perceived task proficiency increased for team leaders. Socially intelligent agents show potential for conflict mediation, but need careful design and implementation to improve team processes and collaboration.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95–104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Messner, Eva-Maria; Song, Siyang; Liu, Shuo; Zhao, Ziping; Mallol-Ragolta, Adria; Ren, Zhao; Soleymani, Mohammad; Pantic, Maja; Schuller, Björn; Valstar, Michel; Cummins, Nicholas; Cowie, Roddy; Tavabi, Leili; Schmitt, Maximilian; Alisamir, Sina; Amiriparian, Shahin
AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition Proceedings Article
In: Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19, pp. 3–12, ACM Press, Nice, France, 2019, ISBN: 978-1-4503-6913-8.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ringeval_avec_2019,
title = {AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition},
author = {Fabien Ringeval and Eva-Maria Messner and Siyang Song and Shuo Liu and Ziping Zhao and Adria Mallol-Ragolta and Zhao Ren and Mohammad Soleymani and Maja Pantic and Björn Schuller and Michel Valstar and Nicholas Cummins and Roddy Cowie and Leili Tavabi and Maximilian Schmitt and Sina Alisamir and Shahin Amiriparian},
url = {http://dl.acm.org/citation.cfm?doid=3347320.3357688},
doi = {10.1145/3347320.3357688},
isbn = {978-1-4503-6913-8},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19},
pages = {3–12},
publisher = {ACM Press},
address = {Nice, France},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2019) 'State-of-Mind, Detecting Depression with AI, and Cross-cultural Affect Recognition' is the ninth competition event aimed at the comparison of multimedia processing and machine learning methods for automatic audiovisual health and emotion analysis, with all participants competing strictly under the same conditions. The goal of the Challenge is to provide a common benchmark test set for multimodal information processing and to bring together the health and emotion recognition communities, as well as the audiovisual processing communities, to compare the relative merits of various approaches to health and emotion recognition from real-life data. This paper presents the major novelties introduced this year, the challenge guidelines, the data used, and the performance of the baseline systems on the three proposed tasks: state-of-mind recognition, depression assessment with AI, and cross-cultural affect sensing, respectively.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Establishing Social Dialog between Buildings and Their Users Journal Article
In: International Journal of Human–Computer Interaction, vol. 35, no. 17, pp. 1545–1556, 2019, ISSN: 1044-7318, 1532-7590.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{khashe_establishing_2019,
title = {Establishing Social Dialog between Buildings and Their Users},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2018.1555346},
doi = {10.1080/10447318.2018.1555346},
issn = {1044-7318, 1532-7590},
year = {2019},
date = {2019-10-01},
journal = {International Journal of Human–Computer Interaction},
volume = {35},
number = {17},
pages = {1545–1556},
abstract = {Behavioral intervention strategies have yet to become successful in the development of initiatives to foster pro-environmental behaviors in buildings. In this paper, we explored the potentials of increasing the effectiveness of requests aiming to promote pro-environmental behaviors by engaging users in a social dialog, given the effects of two possible personas that are more related to the buildings (i.e., building vs. building manager). We tested our hypotheses and evaluated our findings in virtual and physical environments and found similar effects in both environments. Our results showed that social dialog involvement persuaded respondents to perform more pro-environmental actions. However, these effects were significant when the requests were delivered by an agent representing the building. In addition, these strategies were not equally effective across all types of people and their effects varied for people with different characteristics. Our findings provide useful design choices for persuasive technologies aiming to promote pro-environmental behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon; Fast, Ed; Liewer, Matt; Reilly, Adam; Whitcup, Wendy; Rizzo, Albert "Skip"
Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 205–207, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019-1,
title = {Virtual Humans in Augmented Reality: A First Step towards Real-World Embedded Virtual Roleplayers},
author = {Arno Hartholt and Sharon Mozgai and Ed Fast and Matt Liewer and Adam Reilly and Wendy Whitcup and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3352766},
doi = {10.1145/3349537.3352766},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {205–207},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {We present one of the first applications of virtual humans in Augmented Reality (AR), which allows young adults with Autism Spectrum Disorder (ASD) the opportunity to practice job interviews. It uses the Magic Leap’s AR hardware sensors to provide users with immediate feedback on six different metrics, including eye gaze, blink rate and head orientation. The system provides two characters, with three conversational modes each. Ported from an existing desktop application, the main development lessons learned were: 1) provide users with navigation instructions in the user interface, 2) avoid dark colors as they are rendered transparently, 3) use dynamic gaze so characters maintain eye contact with the user, 4) use hardware sensors like eye gaze to provide user feedback, and 5) use surface detection to place characters dynamically in the world.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Aydogan, Reyhan; Baarslag, Tim; Jonker, Catholijn M.
The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_likeability-success_2019,
title = {The Likeability-Success Tradeoff: Results of the 2nd Annual Human-Agent Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Reyhan Aydogan and Tim Baarslag and Catholijn M. Jonker},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {We present the results of the 2nd Annual Human-Agent League of the Automated Negotiating Agent Competition. Building on the success of the previous year’s results, a new challenge was issued that focused exploring the likeability-success tradeoff in negotiations. By examining a series of repeated negotiations, actions may affect the relationship between automated negotiating agents and their human competitors over time. The results presented herein support a more complex view of human-agent negotiation and capture of integrative potential (win-win solutions). We show that, although likeability is generally seen as a tradeoff to winning, agents are able to remain well-liked while winning if integrative potential is not discovered in a given negotiation. The results indicate that the top-performing agent in this competition took advantage of this loophole by engaging in favor exchange across negotiations (cross-game logrolling). These exploratory results provide information about the effects of different submitted “black-box” agents in humanagent negotiation and provide a state-of-the-art benchmark for human-agent design.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Gratch, Jonathan; Parkinson, Brian; Shore, Danielle
Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the, pp. 7, IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hoegen_signals_2019,
title = {Signals of Emotion Regulation in a Social Dilemma: Detection from Face and Context},
author = {Rens Hoegen and Jonathan Gratch and Brian Parkinson and Danielle Shore},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII) roceedings of the},
pages = {7},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {In social decision-making tasks, facial expressions are informative signals that indicate motives and intentions. As people are aware that their expressions influence partner behavior, expressions may be strategically regulated in competitive environments to influence a social partner’s decisionmaking. In this work, we examine facial expressions and their strategic regulation within the context of an iterated prisoner’s dilemma. Utilizing video-cued rating procedures, we examine several key questions about the functionality of facial expressions in social decision-making. First, we assess the extent to which emotion and expression regulation are accurately detected from dynamic facial expressions in interpersonal interactions. Second, we explore which facial cues are utilized to evaluate emotion and regulation information. Finally, we investigate the role of context in participants’ emotion and regulation judgments. Results show that participants accurately perceive facial emotion and expression regulation, although they are better at recognizing emotions than regulation. Using automated expression analysis and stepwise regression, we constructed models that use action units from participant videos to predict their video-cued emotion and regulation ratings. We show that these models perform similarly and, in some cases, better than participants do. Moreover, these models demonstrate that game state information improves predictive accuracy, thus implying that context information is important in the evaluation of facial expressions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Gratch, Jonathan
Smiles Signal Surprise in a Social Dilemma Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lei_smiles_2019,
title = {Smiles Signal Surprise in a Social Dilemma},
author = {Su Lei and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {This study examines spontaneous facial expressions in an iterated prisoner’s dilemma with financial stakes. Our goal was to identify typical facial expressions associated with key events during the interaction (e.g., cooperation or exploitation) and contrast these reactions with alternative theories of the meaning of facial expressions. Specifically, we examined if expressions reflect individual self-interest (e.g., winning) or social motives (e.g., promoting fairness) and the extent to which surprise might moderate the intensity of facial displays. In contrast to predictions of scientific and folk theories of expression, smiles were the only expressions consistently elicited, regardless of the reward or fairness of outcomes. Further, these smiles serve as a reliable indicator of the surprisingness of the event, but not its pleasure (contradicting research on both the meaning of smiles and indicators of surprise). To our knowledge, this is the first study to indicate that smiles signal surprise.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Rizzo, Albert; Gratch, Jonathan; Scherer, Stefan; Stratou, Giota; Boberg, Jill; Morency, Louis-Philippe
Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers Book Section
In: The Impact of Virtual and Augmented Reality on Individuals and Society, pp. 256–264, Frontiers Media SA, 2019.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@incollection{lucas_reporting_2019,
title = {Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers},
author = {Gale M. Lucas and Albert Rizzo and Jonathan Gratch and Stefan Scherer and Giota Stratou and Jill Boberg and Louis-Philippe Morency},
url = {https://books.google.com/books?hl=en&lr=&id=N724DwAAQBAJ&oi=fnd&pg=PP1&dq=The+Impact+of+Virtual+and+Augmented+Reality+on+Individuals+and+Society&ots=ZMD1P9T-K5&sig=Qqh7iHZ4Xq2iRyYecrECHwNNE38#v=onepage&q=The%20Impact%20of%20Virtual%20and%20Augmented%20Reality%20on%20Individuals%20and%20Society&f=false},
year = {2019},
date = {2019-09-01},
booktitle = {The Impact of Virtual and Augmented Reality on Individuals and Society},
pages = {256–264},
publisher = {Frontiers Media SA},
abstract = {A common barrier to healthcare for psychiatric conditions is the stigma associated with these disorders. Perceived stigma prevents many from reporting their symptoms. Stigma is a particularly pervasive problem among military service members, preventing them from reporting symptoms of combat-related conditions like posttraumatic stress disorder (PTSD). However, research shows (increased reporting by service members when anonymous assessments are used. For example, service members report more symptoms of PTSD when they anonymously answer the Post-Deployment Health Assessment (PDHA) symptom checklist compared to the official PDHA, which is identifiable and linked to their military records. To investigate the factors that influence reporting of psychological symptoms by service members, we used a transformative technology: automated virtual humans that interview people about their symptoms. Such virtual human interviewers allow simultaneous use of two techniques for eliciting disclosure that would otherwise be incompatible; they afford anonymity while also building rapport. We examined whether virtual human interviewers could increase disclosure of mental health symptoms among active-duty service members that just returned from a year-long deployment in Afghanistan. Service members reported more symptoms during a conversation with a virtual human interviewer than on the official PDHA. They also reported more to a virtual human interviewer than on an anonymized PDHA. A second, larger sample of active-duty and former service members found a similar effect that approached statistical significance. Because respondents in both studies shared more with virtual human interviewers than an anonymized PDHA—even though both conditions control for stigma and ramifications for service members’ military records—virtual human interviewers that build rapport may provide a superior option to encourage reporting.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161–167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Forbus, Kenneth D.
Expanding and Repositioning Cognitive Science Journal Article
In: Topics in Cognitive Science, 2019, ISSN: 1756-8757, 1756-8765.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{rosenbloom_expanding_2019,
title = {Expanding and Repositioning Cognitive Science},
author = {Paul S. Rosenbloom and Kenneth D. Forbus},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/tops.12468},
doi = {10.1111/tops.12468},
issn = {1756-8757, 1756-8765},
year = {2019},
date = {2019-08-01},
journal = {Topics in Cognitive Science},
abstract = {Cognitive science has converged in many ways with cognitive psychology, but while also maintaining a distinctive interdisciplinary nature. Here we further characterize this existing state of the field before proposing how it might be reconceptualized toward a broader and more distinct, and thus more stable, position in the realm of sciences.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-Lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-Ann
Can a Signing Virtual Human Engage a Baby's Attention? Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 162–169, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_can_2019,
title = {Can a Signing Virtual Human Engage a Baby's Attention?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-Lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329463},
doi = {10.1145/3308532.3329463},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {162–169},
publisher = {ACM Press},
address = {Paris, France},
abstract = {The child developmental period of ages 6-12 months marks a widely understood “critical period” for healthy language learning, during which, failure to receive exposure to language can place babies at risk for language and reading problems spanning life. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period. Technology has been used to augment linguistic input (e.g., auditory devices; language videotapes) but research finds limitations in learning. We evaluated an AI system that uses an Avatar (provides language and socially contingent interactions) and a robot (aids attention to the Avatar) to facilitate infants’ ability to learn aspects of American Sign Language (ASL), and asked three questions: (1) Can babies with little/no exposure to ASL distinguish among the Avatar’s different conversational modes (Linguistic Nursery Rhymes; Social Gestures; Idle/nonlinguistic postures; 3rd person observer)? (2) Can an Avatar stimulate babies’ production of socially contingent responses, and crucially, nascent language responses? (3) What is the impact of parents’ presence/absence of conversational participation? Surprisingly, babies (i) spontaneously distinguished among Avatar conversational modes, (ii) produced varied socially contingent responses to Avatar’s modes, and (iii) parents influenced an increase in babies’ response tokens to some Avatar modes, but the overall categories and pattern of babies’ behavioral responses remained proportionately similar irrespective of parental participation. Of note, babies produced the greatest percentage of linguistic responses to the Avatar’s Linguistic Nursery Rhymes versus other Avatar conversational modes. This work demonstrates the potential for Avatars to facilitate language learning in young babies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stocco, Andrea; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence Technical Report
Neuroscience 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@techreport{stocco_analysis_2019,
title = {Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence},
author = {Andrea Stocco and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {http://biorxiv.org/lookup/doi/10.1101/703777},
doi = {10.1101/703777},
year = {2019},
date = {2019-07-01},
pages = {38},
institution = {Neuroscience},
abstract = {The Common Model of Cognition (CMC) is a recently proposed, consensus architecture intended to capture decades of progress in cognitive science on modeling human and human-like intelligence. Because of the broad agreement around it and preliminary mappings of its components to specific brain areas, we hypothesized that the CMC could be a candidate model of the large-scale functional architecture of the human brain. To test this hypothesis, we analyzed functional MRI data from 200 participants and seven different tasks that cover the broad range of cognitive domains. The CMC components were identified with functionally homologous brain regions through canonical fMRI analysis, and their communication pathways were translated into predicted patterns of effective connectivity between regions. The resulting dynamic linear model was implemented and fitted using Dynamic Causal Modeling, and compared against four alternative brain architectures that had been previously proposed in the field of neuroscience (two hierarchical architectures and two hub-and-spoke architectures) using a Bayesian approach. The results show that, in all cases, the CMC vastly outperforms all other architectures, both within each domain and across all tasks. The results suggest that a common, general architecture that could be used for artificial intelligence effectively underpins all aspects of human cognition, from the overall functional architecture of the human brain to higher level thought processes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hartholt, Arno; Mozgai, Sharon; Rizzo, Albert "Skip"
Virtual Job Interviewing Practice for High-Anxiety Populations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 238–240, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: MedVR, VHTL, Virtual Humans
@inproceedings{hartholt_virtual_2019,
title = {Virtual Job Interviewing Practice for High-Anxiety Populations},
author = {Arno Hartholt and Sharon Mozgai and Albert "Skip" Rizzo},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329417},
doi = {10.1145/3308532.3329417},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {238–240},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present a versatile system for training job interviewing skills that focuses specifically on segments of the population facing increased challenges during the job application process. In particular, we target those with Autism Spectrum Disorder (ADS), veterans transitioning to civilian life, and former convicts integrating back into society. The system itself follows the SAIBA framework and contains several interviewer characters, who each represent a different type of vocational field, (e.g. service industry, retail, office, etc.) Each interviewer can be set to one of three conversational modes, which not only affects what they say and how they say it, but also their supporting body language. This approach offers varying difficulties, allowing users to start practicing with interviewers who are more encouraging and accommodating before moving on to personalities that are more direct and indifferent. Finally, the user can place the interviewers in different environmental settings (e.g. conference room, restaurant, executive office, etc.), allowing for many different combinations in which to practice.},
keywords = {MedVR, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 212–214, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_expert-model_2019,
title = {An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329433},
doi = {10.1145/3308532.3329433},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {212–214},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing -more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other, more limited techniques (such as linear regression models or boosted decision trees). We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Minha; Lucas, Gale; Mell, Johnathan; Johnson, Emmanuel; Gratch, Jonathan
What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 38–45, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lee_whats_2019,
title = {What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations},
author = {Minha Lee and Gale Lucas and Johnathan Mell and Emmanuel Johnson and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329465},
doi = {10.1145/3308532.3329465},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {38–45},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In this article we examine how perceptions of a virtual agent’s mind shape behavior in human-agent negotiations. We varied descriptions and communicative behavior of virtual agents on two dimensions according to the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude). Participants then engaged in negotiations with the different agents. People scored more points and engaged in shorter negotiations with agents described to be cognitively intelligent, and got lower points and had longer negotiations with agents that were described to be cognitively unintelligent. Accordingly, agents described as having low-agency ended up earning more points than those with high-agency. Within the negotiations themselves, participants sent more happy and surprise emojis and emotionally valenced messages to agents described to be emotional. This high degree of described patiency also affected perceptions of the agent’s moral standing and relatability. In short, manipulating the perceived mind of agents affects how people negotiate with them. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S; Ustun, Volkan
An Architectural Integration of Temporal Motivation Theory for Decision Making Proceedings Article
In: In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_architectural_2019,
title = {An Architectural Integration of Temporal Motivation Theory for Decision Making},
author = {Paul S Rosenbloom and Volkan Ustun},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_7.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {Temporal Motivation Theory (TMT) is incorporated into the Sigma cognitive architecture to explore the ability of this combination to yield human-like decision making. In conjunction with Lazy Reinforcement Learning (LRL), which provides the inputs required for this form of decision making, experiments are run on a simple reinforcement learning task, a preference reversal task, and an uncertain two-choice task.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}