Publications
Search
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Proceedings Article
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn
Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 14566, 2022, ISSN: 2045-2322, (Number: 1 Publisher: Nature Publishing Group).
@article{awada_cognitive_2022,
title = {Cognitive performance, creativity and stress levels of neurotypical young adults under different white noise levels},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn Roll},
url = {https://www.nature.com/articles/s41598-022-18862-w},
doi = {10.1038/s41598-022-18862-w},
issn = {2045-2322},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {14566},
abstract = {Noise is often considered a distractor; however recent studies suggest that sub-attentive individuals or individuals diagnosed with attention deficit hyperactivity disorder can benefit from white noise to enhance their cognitive performance. Research regarding the effect of white noise on neurotypical adults presents mixed results, thus the implications of white noise on the neurotypical population remain unclear. Thus, this study investigates the effect of 2 white noise conditions, white noise level at 45 dB and white noise level at 65 dB, on the cognitive performance, creativity, and stress levels of neurotypical young adults in a private office space. These conditions are compared to a baseline condition where participants are exposed to the office ambient noise. Our findings showed that the white noise level at 45 dB resulted in better cognitive performance in terms of sustained attention, accuracy, and speed of performance as well as enhanced creativity and lower stress levels. On the other hand, the 65 dB white noise condition led to improved working memory but higher stress levels, which leads to the conclusion that different tasks might require different noise levels for optimal performance. These results lay the foundation for the integration of white noise into office workspaces as a tool to enhance office workers’ performance.},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: Yearb Med Inform, vol. 31, no. 1, pp. 226–227, 2022, ISSN: 0943-4747, 2364-0502, (Publisher: Georg Thieme Verlag KG).
@article{de_melo_heuristic_2022,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso Melo and Jonathan Gratch and Frank Krueger},
url = {http://www.thieme-connect.de/DOI/DOI?10.1055/s-0042-1742544},
doi = {10.1055/s-0042-1742544},
issn = {0943-4747, 2364-0502},
year = {2022},
date = {2022-08-01},
urldate = {2023-03-31},
journal = {Yearb Med Inform},
volume = {31},
number = {1},
pages = {226–227},
abstract = {he authors conducted a study of how human interaction with machines needs to be studied, given the advent of intelligent systems in everyday life (such as autonomous vehicles) and how COVID-19 experiences shape human altruistic responses to machines. The authors correctly claim that more study of how humans can collaborate, and their attitudes and behavior toward machines differs from social norms with humans. They make use of the ‘Computers as Social Actors’ theory of Reeves and Nass (1996), which was influential in human computer and robot interaction research. It argues that people heuristically treat machines like people, and that encouraging intuitive thinking, in contrast to deliberation, led to increased cooperation in non-strategic settings. The authors are the first to apply and test this with concrete cognitive studies. The dictator game is used to measure altruism; the user has options to give tokens to another user (in this case the computer or a ‘human’ (both delivered by computer message to obscure the source). 186 participants were used as senders, across 40 US states, and provided a diverse sample. They were administered the abbreviated Post-Traumatic Stress Disorder (PTSD) checklist (to measure COVID-19 impact), and three subjective scales to gain insight on mechanisms. These were the Cognitive Reflection test to measure if those impacted engage in reduced reflection, i.e., more intuitive thinking, the Faith in Technology scale, and the Moral Foundations Questionnaire. Results showed a reduction in the usual bias against fairness toward machines the more the user had been impacted by COVID-19. There were also sharp increases in intuitive (and incorrect) thinking and faith in technology among the most highly affected group. The authors through multiple mediation analysis showed that faith in technology and heuristic thinking mediate the offer bias. They also caution that in times of stress the disproportional impact of COVID-19 on vulnerable groups leads to the need for ethical guidelines and regulations to ensure altruism/cooperation shown to machines is well deserved. They also point out the factors such as individual stress propensity, education level, and socioeconomic status could make individuals susceptible to heuristic thinking, and other social norms such as reciprocity, trust and fairness may also shape collaboration with machines.},
note = {Publisher: Georg Thieme Verlag KG},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Proceedings Article
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Proceedings Article
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902–1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots Journal Article
In: J. Comput. Civ. Eng., vol. 36, no. 3, pp. 04022006, 2022, ISSN: 0887-3801, 1943-5487.
@article{adami_impact_2022,
title = {Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0001016},
doi = {10.1061/(ASCE)CP.1943-5487.0001016},
issn = {0887-3801, 1943-5487},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
journal = {J. Comput. Civ. Eng.},
volume = {36},
number = {3},
pages = {04022006},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Proceedings Article
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698–4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fujiwara, Ken; Hoegen, Rens; Gratch, Jonathan; Dunbar, Norah E.
Synchrony facilitates altruistic decision making for non-human avatars Journal Article
In: Computers in Human Behavior, vol. 128, pp. 107079, 2022, ISSN: 07475632.
@article{fujiwara_synchrony_2022,
title = {Synchrony facilitates altruistic decision making for non-human avatars},
author = {Ken Fujiwara and Rens Hoegen and Jonathan Gratch and Norah E. Dunbar},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0747563221004027},
doi = {10.1016/j.chb.2021.107079},
issn = {07475632},
year = {2022},
date = {2022-03-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {128},
pages = {107079},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Visser, Ewart J.; Topoglu, Yigit; Joshi, Shawn; Krueger, Frank; Phillips, Elizabeth; Gratch, Jonathan; Tossell, Chad C.; Ayaz, Hasan
Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance Journal Article
In: Sensors, vol. 22, no. 3, pp. 1287, 2022, ISSN: 1424-8220.
@article{de_visser_designing_2022,
title = {Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance},
author = {Ewart J. Visser and Yigit Topoglu and Shawn Joshi and Frank Krueger and Elizabeth Phillips and Jonathan Gratch and Chad C. Tossell and Hasan Ayaz},
url = {https://www.mdpi.com/1424-8220/22/3/1287},
doi = {10.3390/s22031287},
issn = {1424-8220},
year = {2022},
date = {2022-02-01},
urldate = {2022-09-28},
journal = {Sensors},
volume = {22},
number = {3},
pages = {1287},
abstract = {To understand how to improve interactions with dog-like robots, we evaluated the importance of “dog-like” framing and physical appearance on interaction, hypothesizing multiple interactive benefits of each. We assessed whether framing Aibo as a puppy (i.e., in need of development) versus simply a robot would result in more positive responses and interactions. We also predicted that adding fur to Aibo would make it appear more dog-like, likable, and interactive. Twenty-nine participants engaged with Aibo in a 2 × 2 (framing × appearance) design by issuing commands to the robot. Aibo and participant behaviors were monitored per second, and evaluated via an analysis of commands issued, an analysis of command blocks (i.e., chains of commands), and using a T-pattern analysis of participant behavior. Participants were more likely to issue the “Come Here” command than other types of commands. When framed as a puppy, participants used Aibo’s dog name more often, praised it more, and exhibited more unique, interactive, and complex behavior with Aibo. Participants exhibited the most smiling and laughing behaviors with Aibo framed as a puppy without fur. Across conditions, after interacting with Aibo, participants felt Aibo was more trustworthy, intelligent, warm, and connected than at their initial meeting. This study shows the benefits of introducing a socially robotic agent with a particular frame and importance on realism (i.e., introducing the robot dog as a puppy) for more interactive engagement.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Baarslag, Tim; Kaisers, Michael; Gerding, Enrico H.; Jonker, Catholijn M.; Gratch, Jonathan
In: Karagözoğlu, Emin; Hyndman, Kyle B. (Ed.): Bargaining, pp. 387–406, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-76665-8 978-3-030-76666-5.
@incollection{baarslag_self-sufficient_2022,
title = {Self-sufficient, Self-directed, and Interdependent Negotiation Systems: A Roadmap Toward Autonomous Negotiation Agents},
author = {Tim Baarslag and Michael Kaisers and Enrico H. Gerding and Catholijn M. Jonker and Jonathan Gratch},
editor = {Emin Karagözoğlu and Kyle B. Hyndman},
url = {https://link.springer.com/10.1007/978-3-030-76666-5_18},
doi = {10.1007/978-3-030-76666-5_18},
isbn = {978-3-030-76665-8 978-3-030-76666-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-27},
booktitle = {Bargaining},
pages = {387–406},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Hoegen, Jessie; DeVault, David; Gratch, Jonathan
Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus Journal Article
In: IEEE Transactions on Affective Computing, pp. 1–12, 2022, ISSN: 1949-3045, (Conference Name: IEEE Transactions on Affective Computing).
@article{hoegen_exploring_2022,
title = {Exploring the Function of Expressions in Negotiation: the DyNego-WOZ Corpus},
author = {Jessie Hoegen and David DeVault and Jonathan Gratch},
doi = {10.1109/TAFFC.2022.3223030},
issn = {1949-3045},
year = {2022},
date = {2022-01-01},
journal = {IEEE Transactions on Affective Computing},
pages = {1–12},
abstract = {For affective computing to have an impact outside the laboratory, facial expressions must be studied in rich naturalistic situations. We argue negotiations are one such situation as they are ubiquitous in daily life, often evoke strong emotions, and perceived emotion shapes decisions and outcomes. Negotiations are a growing focus in AI research and applications, including agents that negotiate directly with people and attempt to use affective information. We introduce the DyNego-WOZ Corpus, which includes dyadic negotiation between participants and wizard-controlled virtual humans. We demonstrate the value of this corpus to the affective computing community by examining participants' facial expressions in response to a virtual human negotiation partner. We show that people's facial expressions typically co-occur with the end of their partner's speech (suggesting they reflect a reaction to the content of this speech), that these reactions do not correspond to prototypical emotional expressions, and that these reactions can help predict the expresser's subsequent action. We highlight challenges in working with such naturalistic data, including difficulties of expression recognition during speech, and the extreme variability of expressions, both across participants and within a negotiation. Our findings reinforce arguments that facial expressions convey more than emotional state but serve important communicative functions.},
note = {Conference Name: IEEE Transactions on Affective Computing},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Shi, Weiyan; Zhang, Jingwen; Lucas, Gale; Yu, Zhou; Gratch, Jonathan
Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks Journal Article
In: 2022, (Publisher: arXiv Version Number: 2).
@article{chawla_social_2022,
title = {Social Influence Dialogue Systems: A Survey of Datasets and Models For Social Influence Tasks},
author = {Kushal Chawla and Weiyan Shi and Jingwen Zhang and Gale Lucas and Zhou Yu and Jonathan Gratch},
url = {https://arxiv.org/abs/2210.05664},
doi = {10.48550/ARXIV.2210.05664},
year = {2022},
date = {2022-01-01},
urldate = {2023-08-22},
abstract = {Dialogue systems capable of social influence such as persuasion, negotiation, and therapy, are essential for extending the use of technology to numerous realistic scenarios. However, existing research primarily focuses on either task-oriented or open-domain scenarios, a categorization that has been inadequate for capturing influence skills systematically. There exists no formal definition or category for dialogue systems with these skills and data-driven efforts in this direction are highly limited. In this work, we formally define and introduce the category of social influence dialogue systems that influence users' cognitive and emotional responses, leading to changes in thoughts, opinions, and behaviors through natural conversations. We present a survey of various tasks, datasets, and methods, compiling the progress across seven diverse domains. We discuss the commonalities and differences between the examined systems, identify limitations, and recommend future directions. This study serves as a comprehensive reference for social influence dialogue systems to inspire more dedicated research and discussion in this emerging area.},
note = {Publisher: arXiv
Version Number: 2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Proceedings Article
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1–12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Proceedings Article
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1–5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Schuller, Bjorn W.; Picard, Rosalind; Andre, Elisabeth; Gratch, Jonathan; Tao, Jianhua
Intelligent Signal Processing for Affective Computing [From the Guest Editors] Journal Article
In: IEEE Signal Process. Mag., vol. 38, no. 6, pp. 9–11, 2021, ISSN: 1053-5888, 1558-0792.
@article{schuller_intelligent_2021,
title = {Intelligent Signal Processing for Affective Computing [From the Guest Editors]},
author = {Bjorn W. Schuller and Rosalind Picard and Elisabeth Andre and Jonathan Gratch and Jianhua Tao},
url = {https://ieeexplore.ieee.org/document/9591500/},
doi = {10.1109/MSP.2021.3096415},
issn = {1053-5888, 1558-0792},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Signal Process. Mag.},
volume = {38},
number = {6},
pages = {9–11},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Proceedings Article
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112–120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
et al A Rizzo,
Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond! Proceedings Article
In: Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021), Serpa, Portugal, 2021.
@inproceedings{a_rizzo_et_al_normative_2021,
title = {Normative Data for a Next Generation Virtual Classroom for Attention Assessment in Children with ADHD and Beyond!},
author = {et al A Rizzo},
url = {http://studio.hei-lab.ulusofona.pt/archive/},
year = {2021},
date = {2021-09-01},
booktitle = {Proceedings of the 13th International Conference on Disability, Virtual Reality and Associated Technologies (ICDVRAT 2021)},
address = {Serpa, Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2019
Rosenbloom, Paul S
(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition Proceedings Article
In: In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_symmetry_2019,
title = {(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition},
author = {Paul S Rosenbloom},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_6.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {A range of dichotomies from across the cognitive sciences are reduced to either (a)symmetry or (non)monotonicity. Taking the cross-product of these two elemental dichotomies then yields a deeper understanding of both two key trichotomies –based on control and content hierarchies – and the Common Model of Cognition, with results that bear on the structure of integrative cognitive architectures, models and systems, and on their commonalities, differences and gaps.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Roediger, Sarah; Lucas, Gale; Gratch, Jonathan
Assessing Common Errors Students Make When Negotiating Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 30–37, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{johnson_assessing_2019,
title = {Assessing Common Errors Students Make When Negotiating},
author = {Emmanuel Johnson and Sarah Roediger and Gale Lucas and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329470},
doi = {10.1145/3308532.3329470},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {30–37},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research has shown that virtual agents can be effective tools for teaching negotiation. Virtual agents provide an opportunity for students to practice their negotiation skills which leads to better outcomes. However, these negotiation training agents often lack the ability to understand the errors students make when negotiating, thus limiting their effectiveness as training tools. In this article, we argue that automated opponent-modeling techniques serve as effective methods for diagnosing important negotiation mistakes. To demonstrate this, we analyze a large number of participant traces generated while negotiating with a set of automated opponents. We show that negotiators’ performance is closely tied to their understanding of an opponent’s preferences. We further show that opponent modeling techniques can diagnose specific errors including: failure to elicit diagnostic information from an opponent, failure to utilize the information that was elicited, and failure to understand the transparency of an opponent. These results show that opponent modeling techniques can be effective methods for diagnosing and potentially correcting crucial negotiation errors.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Lehr, Janina; Krämer, Nicole; Gratch, Jonathan
The Effectiveness of Social Influence Tactics when Used by a Virtual Agent Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 22–29, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_effectiveness_2019,
title = {The Effectiveness of Social Influence Tactics when Used by a Virtual Agent},
author = {Gale M. Lucas and Janina Lehr and Nicole Krämer and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329464},
doi = {10.1145/3308532.3329464},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {22–29},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research in social science distinguishes between two types of social influence: informational and normative. Informational social influence is driven by the desire to evaluate ambiguous situations correctly, whereas normative social influence is driven by the desire to be liked and gain social acceptance from another person. Although we know from research that humans can effectively use either of these techniques to persuade other humans, scholars have yet to examine the relative effectiveness of informational versus normative social influence when used by virtual agents. We report a study in which users interact with a system that persuades them either using informational or normative social influence. Furthermore, to compare agents to human interlocutors, users are told that the system is either teleoperated by a human (avatar) or fully-automated (agent). Using this design, we are able to compare the effectiveness of virtual agents (vs humans) in employing informational versus normative social influence. Participants interacted with the system, which employed a Wizard-of-Oz operated virtual agent that tried to persuade the user to agree with its rankings on a “survival task.” Controlling for initial divergence in rankings between user and the agent, there was a significant main effect such that informational social influence resulted in greater influence than normative influence. However, this was qualified by an interaction that approached significance; users were, if anything, more persuaded by informational influence when they believe the agent was AI (compared to a human), whereas there was no difference between the agent and avatar in the normative influence condition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sohail, Usman; Traum, David
A Blissymbolics Translation System Proceedings Article
In: Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies, pp. 32–36, Association for Computational Linguistics, Minneapolis, Minnesota, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{sohail_blissymbolics_2019,
title = {A Blissymbolics Translation System},
author = {Usman Sohail and David Traum},
url = {http://aclweb.org/anthology/W19-1705},
doi = {10.18653/v1/W19-1705},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the Eighth Workshop on Speech and Language Processing for Assistive Technologies},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {Minneapolis, Minnesota},
abstract = {Blissymbolics (Bliss) is a pictographic writing system that is used by people with communication disorders. Bliss attempts to create a writing system that makes words easier to distinguish by using pictographic symbols that encapsulate meaning rather than sound, as the English alphabet does for example. Users of Bliss rely on human interpreters to use Bliss. We created a translation system from Bliss to natural English with the hopes of decreasing the reliance on human interpreters by the Bliss community. We first discuss the basic rules of Blissymbolics. Then we point out some of the challenges associated with developing computer assisted tools for Blissymbolics. Next we talk about our ongoing work in developing a translation system, including current limitations, and future work. We conclude with a set of examples showing the current capabilities of our translation system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Kyusong; Zhao, Tiancheng; Ultes, Stefan; Rojas-Barahona, Lina; Pincus, Eli; Traum, David; Eskenazi, Maxine
An Assessment Framework for DialPort Book Section
In: Advanced Social Interaction with Agents, vol. 510, pp. 79–85, Springer International Publishing, Cham, 2019, ISBN: 978-3-319-92107-5 978-3-319-92108-2.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{lee_assessment_2019,
title = {An Assessment Framework for DialPort},
author = {Kyusong Lee and Tiancheng Zhao and Stefan Ultes and Lina Rojas-Barahona and Eli Pincus and David Traum and Maxine Eskenazi},
url = {http://link.springer.com/10.1007/978-3-319-92108-2_10},
doi = {10.1007/978-3-319-92108-2_10},
isbn = {978-3-319-92107-5 978-3-319-92108-2},
year = {2019},
date = {2019-06-01},
urldate = {2019-10-28},
booktitle = {Advanced Social Interaction with Agents},
volume = {510},
pages = {79–85},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Collecting a large amount of real human-computer interaction data in various domains is a cornerstone in the development of better data-driven spoken dialog systems. The DialPort project is creating a portal to collect a constant stream of real user conversational data on a variety of topics. In order to keep real users attracted to DialPort, it is crucial to develop a robust evaluation framework to monitor and maintain high performance. Different from earlier spoken dialog systems, DialPort has a heterogeneous set of spoken dialog systems gathered under one outward-looking agent. In order to access this new structure, we have identified some unique challenges that DialPort will encounter so that it can appeal to real users and have created a novel evaluation scheme that quantitatively assesses their performance in these situations. We look at assessment from the point of view of the system developer as well as that of the end user.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Pilly, Praveen K; Skorheim, Steven W.; Hubbard, Ryan J.; Ketz, Nicholas A.; Roach, Shane M.; Jones, Aaron P.; Bradley, Robert; Bryant, Natalie B.; Lerner, Itamar; Hartholt, Arno; Mullins, Teagan S.; Choe, Jaehoon; Clark, Vincent P.; Howard, Michael P.
Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans Journal Article
In: bioRxiv, pp. 110, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{pilly_spatiotemporal_2019,
title = {Spatiotemporal patterns of transcranial electrical stimulation can strengthen the metamemory of individual episodic memories in humans},
author = {Praveen K Pilly and Steven W. Skorheim and Ryan J. Hubbard and Nicholas A. Ketz and Shane M. Roach and Aaron P. Jones and Robert Bradley and Natalie B. Bryant and Itamar Lerner and Arno Hartholt and Teagan S. Mullins and Jaehoon Choe and Vincent P. Clark and Michael P. Howard},
url = {https://www.biorxiv.org/content/10.1101/672378v1.abstract},
doi = {10.1101/672378},
year = {2019},
date = {2019-06-01},
journal = {bioRxiv},
pages = {110},
abstract = {Long-term retention of memories critically depends on consolidation processes, which occur during slow-wave oscillations (SWOs) in non-rapid eye movement (NREM) sleep. We designed a non-invasive system that can tag one-shot experiences of naturalistic episodes within immersive virtual reality (VR) with unique spatiotemporal amplitude-modulated patterns (STAMPs) of transcranial electrical stimulation (tES). We demonstrate that these STAMPs can be re-applied during UP states of SWOs on two consecutive nights to achieve a 19.43% improvement in the metamemory of targeted episodes at 48 hours after the one-shot viewing, compared to the control episodes. Further, we found an interaction between pre-sleep metamemory of targeted episodes and the number of STAMP applications for those episodes during sleep, and that STAMPs elicit increases in left temporal slow-spindle (9-12 Hz) power that are predictive of overnight metamemory improvements. These results prescribe an optimal strategy to leverage STAMPs for boosting metamemory based on pre-sleep performance and tracking the STAMPinduced biomarker during sleep, and suggest that real-world episodic memories can be modulated in a targeted manner even with coarser, non-invasive spatiotemporal stimulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Song, Yale; Soleymani, Mohammad
Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval Proceedings Article
In: Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10, IEEE, Long Beach, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{song_polysemous_2019,
title = {Polysemous Visual-Semantic Embedding for Cross-Modal Retrieval},
author = {Yale Song and Mohammad Soleymani},
url = {https://arxiv.org/abs/1906.04402},
year = {2019},
date = {2019-06-01},
booktitle = {Proceedings of the 2019 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {10},
publisher = {IEEE},
address = {Long Beach, CA},
abstract = {Visual-semantic embedding aims to find a shared latent space where related visual and textual instances are close to each other. Most current methods learn injective embedding functions that map an instance to a single point in the shared space. Unfortunately, injective embedding cannot effectively handle polysemous instances with multiple possible meanings; at best, it would find an average representation of different meanings. This hinders its use in real-world scenarios where individual instances and their cross-modal associations are often ambiguous. In this work, we introduce Polysemous Instance Embedding Networks (PIE-Nets) that compute multiple and diverse representations of an instance by combining global context with locally-guided features via multi-head self-attention and residual learning. To learn visual-semantic embedding, we tie-up two PIE-Nets and optimize them jointly in the multiple instance learning framework. Most existing work on cross-modal retrieval focus on image-text pairs of data. Here, we also tackle a more challenging case of video-text retrieval. To facilitate further research in video-text retrieval, we release a new dataset of 50K video-sentence pairs collected from social media, dubbed MRW (my reaction when). We demonstrate our approach on both image-text and video-text retrieval scenarios using MS-COCO, TGIF, and our new MRW dataset.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Lucas, Gale; Kim, Peter; Gratch, Jonathan
Intelligent Tutoring System for Negotiation Skills Training Book Section
In: Artificial Intelligence in Education, vol. 11626, pp. 122–127, Springer International Publishing, Cham, Switzerland, 2019, ISBN: 978-3-030-23206-1 978-3-030-23207-8.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{johnson_intelligent_2019,
title = {Intelligent Tutoring System for Negotiation Skills Training},
author = {Emmanuel Johnson and Gale Lucas and Peter Kim and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-030-23207-8_23},
doi = {10.1007/978-3-030-23207-8_23},
isbn = {978-3-030-23206-1 978-3-030-23207-8},
year = {2019},
date = {2019-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {11626},
pages = {122–127},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Intelligent tutoring systems have proven very effective at teaching hard skills such as math and science, but less research has examined how to teach “soft” skills such as negotiation. In this paper, we introduce an effective approach to teaching negotiation tactics. Prior work showed that students can improve through practice with intelligent negotiation agents. We extend this work by proposing general methods of assessment and feedback that could be applied to a variety of such agents. We evaluate these techniques through a human subject study. Our study demonstrates that personalized feedback improves students’ use of several foundational tactics.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Editor, Managing; Adler, Aaron; Dasgupta, Prithviraj; DePalma, Nick; Eslami, Mohammed; Freedman, Richard; Laird, John; Lebiere, Christian; Lohan, Katrin; Mead, Ross; Roberts, Mark; Rosenbloom, Paul; Senft, Emmanuel; Stein, Frank; Williams, Tom; Wray, Kyle Hollins; Yaman, Fusun; Zilberstein, Shlomo
Reports of the 2018 AAAI Fall Symposium Journal Article
In: AI Magazine, vol. 40, no. 2, pp. 66–72, 2019, ISSN: 2371-9621, 0738-4602.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{editor_reports_2019,
title = {Reports of the 2018 AAAI Fall Symposium},
author = {Managing Editor and Aaron Adler and Prithviraj Dasgupta and Nick DePalma and Mohammed Eslami and Richard Freedman and John Laird and Christian Lebiere and Katrin Lohan and Ross Mead and Mark Roberts and Paul Rosenbloom and Emmanuel Senft and Frank Stein and Tom Williams and Kyle Hollins Wray and Fusun Yaman and Shlomo Zilberstein},
url = {http://www.aaai.org/ojs/index.php/aimagazine/article/view/2887},
doi = {10.1609/aimag.v40i2.2887},
issn = {2371-9621, 0738-4602},
year = {2019},
date = {2019-06-01},
journal = {AI Magazine},
volume = {40},
number = {2},
pages = {66–72},
abstract = {The AAAI 2018 Fall Symposium Series was held Thursday through Saturday, October 18–20, at the Westin Arlington Gateway in Arlington, Virginia, adjacent to Washington, D.C. The titles of the eight symposia were Adversary-Aware Learning Techniques and Trends in Cybersecurity; Artificial Intelligence for Synthetic Biology; Artificial Intelligence in Government and Public Sector; A Common Model of Cognition; Gathering for Artificial Intelligence and Natural System; Integrating Planning, Diagnosis, and Causal Reasoning; Interactive Learning in Artificial Intelligence for HumanRobot Interaction; and Reasoning and Learning in Real-World Systems for Long-Term Autonomy. The highlights of each symposium (except the Gathering for Artificial Intelligence and Natural System symposium, whose organizers failed to submit a summary) are presented in this report.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Becerik-Gerber, Burcin; Lucas, Gale; Southers, Erroll; Pynadath, David V
Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents Journal Article
In: Computing in Civil Engineering, pp. 8, 2019.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{zhu_information_2019,
title = {Information Requirements for Virtual Environments to Study Human-Building Interactions during Active Shooter Incidents},
author = {Runhe Zhu and Burcin Becerik-Gerber and Gale Lucas and Erroll Southers and David V Pynadath},
url = {https://ascelibrary.org/doi/10.1061/9780784482445.024},
doi = {10.1061/9780784482445.024},
year = {2019},
date = {2019-06-01},
journal = {Computing in Civil Engineering},
pages = {8},
abstract = {Active shooter incidents present an increasing American homeland security threat to public safety and human life. Several municipal law enforcement agencies have released building design guidelines intended to offer increased resilience and resistance to potential attacks. However, these design recommendations mainly focus on terrorist attacks, prioritizing the enhancement of building security, whereas their impact on safety during active shooter incidents, and corresponding human-building interactions (HBIs) that influence the outcomes (response performance), remain unclear. To respond to this research gap, virtual reality, with its ability to manipulate environmental variables and scenarios while providing safe non-invasive environments, could be a promising method to conduct human-subject studies in the context of active shooter incidents. In this paper, we identify the requirements for developing virtual environments that represent active shooter incidents in buildings to study HBIs and their impacts on the response performance. Key components constituting virtual environments were considered and presented. These include: (1) what types of buildings should be modeled in virtual environments; (2) how to select protective building design recommendations for active shooter incidents and model them in virtual environments; (3) what types of adversary and crowd behavior should be modeled; and (4) what types of interactions among participants, buildings, adversaries, and crowds should be included in virtual environments. Findings on the above key components were summarized to provide recommendations for future research directions.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Aryal, Ashrant; Becerik-Gerber, Burcin; Anselmo, Francesco; Roll, Shawn C.; Lucas, Gale M.
Smart Desks to Promote Comfort, Health, and Productivity in Offices: A Vision for Future Workplaces Journal Article
In: Frontiers in Built Environment, vol. 5, 2019, ISSN: 2297-3362.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{aryal_smart_2019,
title = {Smart Desks to Promote Comfort, Health, and Productivity in Offices: A Vision for Future Workplaces},
author = {Ashrant Aryal and Burcin Becerik-Gerber and Francesco Anselmo and Shawn C. Roll and Gale M. Lucas},
url = {https://www.frontiersin.org/article/10.3389/fbuil.2019.00076/full},
doi = {10.3389/fbuil.2019.00076},
issn = {2297-3362},
year = {2019},
date = {2019-06-01},
journal = {Frontiers in Built Environment},
volume = {5},
abstract = {People spend most of their day in buildings, and a large portion of the energy in buildings is used to control the indoor environment for creating acceptable conditions for occupants. However, the majority of the building systems are controlled based on a “one size fits all” scheme which cannot account for individual occupant preferences. This leads to discomfort, low satisfaction and negative impacts on occupants’ productivity, health and well-being. In this paper, we describe our vision of how recent advances in Internet of Things (IoT) and machine learning can be used to add intelligence to an office desk to personalize the environment around the user. The smart desk can learn individual user preferences for the indoor environment, personalize the environment based on user preferences and act as an intelligent support system for improving user comfort, health and productivity. We briefly describe the recent advances made in different domains that can be leveraged to enhance occupant experience in buildings and describe the overall framework for the smart desk. We conclude the paper with a discussion of possible avenues for further research.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Zhao, Yajie; Huang, Zeng; Li, Tianye; Chen, Weikai; LeGendre, Chloe; Ren, Xinglei; Xing, Jun; Shapiro, Ari; Li, Hao
Learning Perspective Undistortion of Portraits Journal Article
In: arXiv:1905.07515 [cs], 2019.
Abstract | Links | BibTeX | Tags: Graphics, Virtual Humans
@article{zhao_learning_2019,
title = {Learning Perspective Undistortion of Portraits},
author = {Yajie Zhao and Zeng Huang and Tianye Li and Weikai Chen and Chloe LeGendre and Xinglei Ren and Jun Xing and Ari Shapiro and Hao Li},
url = {http://arxiv.org/abs/1905.07515},
year = {2019},
date = {2019-05-01},
journal = {arXiv:1905.07515 [cs]},
abstract = {Near-range portrait photographs often contain perspective distortion artifacts that bias human perception and challenge both facial recognition and reconstruction techniques. We present the first deep learning based approach to remove such artifacts from unconstrained portraits. In contrast to the previous state-of-the-art approach, our method handles even portraits with extreme perspective distortion, as we avoid the inaccurate and error-prone step of first fitting a 3D face model. Instead, we predict a distortion correction flow map that encodes a per-pixel displacement that removes distortion artifacts when applied to the input image. Our method also automatically infers missing facial features, i.e. occluded ears caused by strong perspective distortion, with coherent details. We demonstrate that our approach significantly outperforms the previous state-of-the-art both qualitatively and quantitatively, particularly for portraits with extreme perspective distortion or facial expressions. We further show that our technique benefits a number of fundamental tasks, significantly improving the accuracy of both face recognition and 3D reconstruction and enables a novel camera calibration technique from a single portrait. Moreover, we also build the first perspective portrait database with a large diversity in identities, expression and poses, which will benefit the related research in this area.},
keywords = {Graphics, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gilani, Setareh Nasihati; Traum, David; Sortino, Rachel; Gallagher, Grady; Aaron-lozano, Kailyn; Padilla, Cryss; Shapiro, Ari; Lamberton, Jason; Petitto, Laura-ann
Can a Virtual Human Facilitate Language Learning in a Young Baby? Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gilani_can_2019,
title = {Can a Virtual Human Facilitate Language Learning in a Young Baby?},
author = {Setareh Nasihati Gilani and David Traum and Rachel Sortino and Grady Gallagher and Kailyn Aaron-lozano and Cryss Padilla and Ari Shapiro and Jason Lamberton and Laura-ann Petitto},
url = {https://dl.acm.org/citation.cfm?id=3332035},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {There is a significant paucity of work on language learning systems for young infants [2, 5, 19] despite the widely understood critical importance that this developmental period has for healthy language and cognitive growth, and related reading and academic success [6, 14]. Deaf babies constitute one vulnerable population as they can experience dramatically reduced or no access to usable linguistic input during this period [18]. This causes potentially devastating impact on children's linguistic, cognitive, and social skills [9, 10, 15, 16, 20]. We introduced an AI system, called RAVE (Robot, AVatar, thermal Enhanced language learning tool), designed specifically for babies within the age range of 6-12 months [8, 17]. RAVE consists of two agents: a virtual human (provides language and socially contingent interactions) and an embodied robot (provides socially engaging physical cues to babies and directs babies' attention to the virtual human). Detailed description of the system's constituent components and dialogue algorithms are presented in [17] and [8].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rychlowska, Magdalena; Schalk, Job; Gratch, Jonathan; Breitinger, Eva; Manstead, Antony S. R.
Beyond actions: Reparatory effects of regret in intergroup trust games Journal Article
In: Journal of Experimental Social Psychology, vol. 82, pp. 74–84, 2019, ISSN: 00221031.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{rychlowska_beyond_2019,
title = {Beyond actions: Reparatory effects of regret in intergroup trust games},
author = {Magdalena Rychlowska and Job Schalk and Jonathan Gratch and Eva Breitinger and Antony S. R. Manstead},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0022103118303974},
doi = {10.1016/j.jesp.2019.01.006},
issn = {00221031},
year = {2019},
date = {2019-05-01},
journal = {Journal of Experimental Social Psychology},
volume = {82},
pages = {74–84},
abstract = {Intergroup trust is vital for cooperation and societal well-being, but is harder to establish than interpersonal trust. We investigate whether expressions of negative emotions, in particular regret, following economic decisions can shape intergroup trust. In each of three studies participants were members of a group playing a tworound trust game with another group. In the first round, they observed an outgroup member who acted fairly or unfairly towards the ingroup and then expressed positive (i.e., happiness) or negative (i.e., regret, unhappiness) emotions about this behavior. In the second round, participants played with another outgroup member. Emotions displayed by the outgroup representative following unfair behavior in round 1 influenced participants' allocations in round 2, which were higher following regret and unhappiness than following positive emotions. Thus, emotions expressed by one outgroup member affected interactions with other members who had not communicated emotions. Findings of Study 3 revealed that these effects were driven by regret increasing intergroup trust, rather than by happiness decreasing it. Moreover, participants' allocations were predicted by their perceptions of the extent to which the outgroup representative wished to change her behavior. Together, the findings reveal that regret expressions influence intergroup trust by attenuating the detrimental effects of unfair behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Koeman, Vincent J; Hindriks, Koen V; Gratch, Jonathan; Jonker, Catholijn M
Recognising and Explaining Bidding Strategies in Negotiation Support Systems Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 3, ACM, Montreal, Canada, 2019, ISBN: 978-1-4503-6309-9.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{koeman_recognising_2019,
title = {Recognising and Explaining Bidding Strategies in Negotiation Support Systems},
author = {Vincent J Koeman and Koen V Hindriks and Jonathan Gratch and Catholijn M Jonker},
url = {https://dl.acm.org/citation.cfm?id=3332011},
isbn = {978-1-4503-6309-9},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {3},
publisher = {ACM},
address = {Montreal, Canada},
abstract = {To improve a negotiator's ability to recognise bidding strategies, we pro-actively provide explanations that are based on the opponent's bids and the negotiator's guesses about the opponent's strategy. We introduce an aberration detection mechanism for recognising strategies and the notion of an explanation matrix. The aberration detection mechanism identifies when a bid falls outside the range of expected behaviour for a specific strategy. The explanation matrix is used to decide when to provide what explanations. We evaluated our work experimentally in a task in which participants are asked to identify their opponent's strategy in the environment of a negotiation support system, namely the Pocket Negotiator (PN). We implemented our explanation mechanism in the PN and experimented with different explanation matrices. As the number of correct guesses increases with explanations, indirectly, these experiments show the effectiveness of our aberration detection mechanism. Our experiments with over 100 participants show that suggesting consistent strategies is more effective than explaining why observed behaviour is inconsistent.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Core, Mark G; Nye, Benjamin D; Karumbaiah, Shamya; Auerbach, Daniel; Ram, Maya
Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training Proceedings Article
In: Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, pp. 9, IFAAMAS, Montreal, Canada, 2019.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{georgila_using_2019,
title = {Using Reinforcement Learning to Optimize the Policies of an Intelligent Tutoring System for Interpersonal Skills Training},
author = {Kallirroi Georgila and Mark G Core and Benjamin D Nye and Shamya Karumbaiah and Daniel Auerbach and Maya Ram},
url = {http://www.ifaamas.org/Proceedings/aamas2019/pdfs/p737.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {9},
publisher = {IFAAMAS},
address = {Montreal, Canada},
abstract = {Reinforcement Learning (RL) has been applied successfully to Intelligent Tutoring Systems (ITSs) in a limited set of well-defined domains such as mathematics and physics. This work is unique in using a large state space and for applying RL to tutoring interpersonal skills. Interpersonal skills are increasingly recognized as critical to both social and economic development. In particular, this work enhances an ITS designed to teach basic counseling skills that can be applied to challenging issues such as sexual harassment and workplace conflict. An initial data collection was used to train RL policies for the ITS, and an evaluation with human participants compared a hand-crafted ITS which had been used for years with students (control) versus the new ITS guided by RL policies. The RL condition differed from the control condition most notably in the strikingly large quantity of guidance it provided to learners. Both systems were effective and there was an overall significant increase from pre- to post-test scores. Although learning gains did not differ significantly between conditions, learners had a significantly higher self-rating of confidence in the RL condition. Confidence and learning gains were both part of the reward function used to train the RL policies, and it could be the case that there was the most room for improvement in confidence, an important learner emotion. Thus, RL was successful in improving an ITS for teaching interpersonal skills without the need to prune the state space (as previously done).},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Traum, David
Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies Proceedings Article
In: Proceedings of the 9th Language and Technology Conference, LTC, Poznań, Poland, 2019.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_developing_2019,
title = {Developing a Virtual Reality Wildfire Simulation to Analyze Human Communication and Interaction with a Robotic Swarm During Emergencies},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and David Traum},
url = {http://www-scf.usc.edu/ nasihati/publications/HLTCEM_2019.pdf},
year = {2019},
date = {2019-05-01},
booktitle = {Proceedings of the 9th Language and Technology Conference},
publisher = {LTC},
address = {Poznań, Poland},
abstract = {Search and rescue missions involving robots face multiple challenges. The ratio of operators to robots is frequently one to one or higher, operators tasked with robots must contend with cognitive overload for long periods, and the robots themselves may be discomfiting to located survivors. To improve on the current state, we propose a swarm of robots equipped with natural language abilities and guided by a central virtual “spokesperson” able to access “plays”. The spokesperson may assist the operator with tasking the robots in their exploration of a zone, which allows the operator to maintain a safe distance. The use of multiple robots enables rescue personnel to cover a larger swath of ground, and the natural language component allows the robots to communicate with survivors located on site. This capability frees the operator to handle situations requiring personal attention, and overall can accelerate the location and assistance of survivors. In order to develop this system, we are creating a virtual reality simulation, in order to conduct a study and analysis of how humans communicate with these swarms of robots. The data collected from this experiment will inform how to best design emergency response swarm robots that are effectively able to communicate with the humans around them.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Proceedings Article
In: Proceedings of IWSDS 2019, pp. 12, Siracusa, Italy, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gervits_classication-based_2019,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://www.semanticscholar.org/paper/A-Classification-Based-Approach-to-Automating-Gervits-Leuski/262cf9e3a14e370d46a5e65f7872b32482d9ea69?tab=abstract&citingPapersSort=is-influential&citingPapersLimit=10&citingPapersOffset=0&year%5B0%5D=&year%5B1%5D=&citedPapersSort=is-influential&citedPapersLimit=10&citedPapersOffset=10},
year = {2019},
date = {2019-04-01},
booktitle = {Proceedings of IWSDS 2019},
pages = {12},
address = {Siracusa, Italy},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multi-floor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Woo, Simon S.; Artstein, Ron; Kaiser, Elsi; Le, Xiao; Mirkovic, Jelena
Using Episodic Memory for User Authentication Journal Article
In: ACM Transactions on Privacy and Security, vol. 22, no. 2, pp. Article 11, 2019.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{woo_using_2019,
title = {Using Episodic Memory for User Authentication},
author = {Simon S. Woo and Ron Artstein and Elsi Kaiser and Xiao Le and Jelena Mirkovic},
url = {https://doi.org/10.1145/3308992},
doi = {10.1145/3308992},
year = {2019},
date = {2019-04-01},
journal = {ACM Transactions on Privacy and Security},
volume = {22},
number = {2},
pages = {Article 11},
abstract = {Passwords are widely used for user authentication, but they are often difficult for a user to recall, easily cracked by automated programs, and heavily reused. Security questions are also used for secondary authentication. They are more memorable than passwords, because the question serves as a hint to the user, but they are very easily guessed. We propose a new authentication mechanism, called “life-experience passwords (LEPs).” Sitting somewhere between passwords and security questions, an LEP consists of several facts about a user-chosen life event—such as a trip, a graduation, a wedding, and so on. At LEP creation, the system extracts these facts from the user’s input and transforms them into questions and answers. At authentication, the system prompts the user with questions and matches the answers with the stored ones. We show that question choice and design make LEPs much more secure than security questions and passwords, while the question-answer format promotes low password reuse and high recall. Specifically, we find that: (1) LEPs are 109–1014 × stronger than an ideal, randomized, eight-character password; (2) LEPs are up to 3 × more memorable than passwords and on par with security questions; and (3) LEPs are reused half as often as passwords. While both LEPs and security questions use personal experiences for authentication, LEPs use several questions that are closely tailored to each user. This increases LEP security against guessing attacks. In our evaluation, only 0.7% of LEPs were guessed by casual friends, and 9.5% by family members or close friends—roughly half of the security question guessing rate. On the downside, LEPs take around 5 × longer to input than passwords. So, these qualities make LEPs suitable for multi-factor authentication at high-value servers, such as financial or sensitive work servers, where stronger authentication strength is needed.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Bernardet, Ulysses; Kanq, Sin-Hwa; Feng, Andrew; DiPaola, Steve; Shapiro, Ari
Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study Proceedings Article
In: 2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE), pp. 1–9, IEEE, Osaka, Japan, 2019, ISBN: 978-1-72813-219-8.
Abstract | Links | BibTeX | Tags: MxR, UARC, Virtual Humans
@inproceedings{bernardet_speech_2019,
title = {Speech Breathing in Virtual Humans: An Interactive Model and Empirical Study},
author = {Ulysses Bernardet and Sin-Hwa Kanq and Andrew Feng and Steve DiPaola and Ari Shapiro},
url = {https://ieeexplore.ieee.org/document/8714737/},
doi = {10.1109/VHCIE.2019.8714737},
isbn = {978-1-72813-219-8},
year = {2019},
date = {2019-03-01},
booktitle = {2019 IEEE Virtual Humans and Crowds for Immersive Environments (VHCIE)},
pages = {1–9},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Human speech production requires the dynamic regulation of air through the vocal system. While virtual character systems commonly are capable of speech output, they rarely take breathing during speaking – speech breathing – into account. We believe that integrating dynamic speech breathing systems in virtual characters can significantly contribute to augmenting their realism. Here, we present a novel control architecture aimed at generating speech breathing in virtual characters. This architecture is informed by behavioral, linguistic and anatomical knowledge of human speech breathing. Based on textual input and controlled by a set of lowand high-level parameters, the system produces dynamic signals in real-time that control the virtual character’s anatomy (thorax, abdomen, head, nostrils, and mouth) and sound production (speech and breathing).},
keywords = {MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Leuski, Anton; Benn, Grace; Klassen, Eric; Fast, Edward; Liewer, Matt; Hartholt, Arno; Traum, David
PRIMER: An Emotionally Aware Virtual Agent Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 10, ACM, Los Angeles, CA, 2019.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_primer_2019,
title = {PRIMER: An Emotionally Aware Virtual Agent},
author = {Carla Gordon and Anton Leuski and Grace Benn and Eric Klassen and Edward Fast and Matt Liewer and Arno Hartholt and David Traum},
url = {https://www.research.ibm.com/haifa/Workshops/user2agent2019/},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {10},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {PRIMER is a proof-of-concept system designed to show the potential of immersive dialogue agents and virtual environments that adapt and respond to both direct verbal input and indirect emotional input. The system has two novel interfaces: (1) for the user, an immersive VR environment and an animated virtual agent both of which adapt and react to the user’s direct input as well as the user’s perceived emotional state, and (2) for an observer, an interface that helps track the perceived emotional state of the user, with visualizations to provide insight into the system’s decision making process. While the basic system architecture can be adapted for many potential real world applications, the initial version of this system was designed to assist clinical social workers in helping children cope with bullying. The virtual agent produces verbal and non-verbal behaviors guided by a plan for the counseling session, based on in-depth discussions with experienced counselors, but is also reactive to both initiatives that the user takes, e.g. asking their own questions, and the user’s perceived emotional state.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Gordon, Carla; Sohail, Usman; Merchant, Chirag; Jones, Andrew; Campbell, Julia; Trimmer, Matthew; Bevington, Jeffrey; Engen, COL Christopher; Traum, David
Digital Survivor of Sexual Assault Proceedings Article
In: Proceedings of the 24th International Conference on Intelligent User Interfaces, pp. 417–425, ACM, Marina del Rey, California, 2019, ISBN: 978-1-4503-6272-6.
Abstract | Links | BibTeX | Tags: DoD, Graphics, MedVR, UARC, Virtual Humans
@inproceedings{artstein_digital_2019,
title = {Digital Survivor of Sexual Assault},
author = {Ron Artstein and Carla Gordon and Usman Sohail and Chirag Merchant and Andrew Jones and Julia Campbell and Matthew Trimmer and Jeffrey Bevington and COL Christopher Engen and David Traum},
url = {https://doi.org/10.1145/3301275.3302303},
doi = {10.1145/3301275.3302303},
isbn = {978-1-4503-6272-6},
year = {2019},
date = {2019-03-01},
booktitle = {Proceedings of the 24th International Conference on Intelligent User Interfaces},
pages = {417–425},
publisher = {ACM},
address = {Marina del Rey, California},
abstract = {The Digital Survivor of Sexual Assault (DS2A) is an interface that allows a user to have a conversational experience with a survivor of sexual assault, using Artificial Intelligence technology and recorded videos. The application uses a statistical classifier to retrieve contextually appropriate pre-recorded video utterances by the survivor, together with dialogue management policies which enable users to conduct simulated conversations with the survivor about the sexual assault, its aftermath, and other pertinent topics. The content in the application has been specifically elicited to support the needs for the training of U.S. Army professionals in the Sexual Harassment/Assault Response and Prevention (SHARP) Program, and the application comes with an instructional support package. The system has been tested with approximately 200 users, and is presently being used in the SHARP Academy's capstone course.},
keywords = {DoD, Graphics, MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bönsch, Andrea; Feng, Andrew; Patel, Parth; Shapiro, Ari
Volumetric Video Capture using Unsynchronized, Low-cost Cameras: Proceedings Article
In: Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications, pp. 255–261, SCITEPRESS - Science and Technology Publications, Prague, Czech Republic, 2019, ISBN: 978-989-758-354-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{bonsch_volumetric_2019,
title = {Volumetric Video Capture using Unsynchronized, Low-cost Cameras:},
author = {Andrea Bönsch and Andrew Feng and Parth Patel and Ari Shapiro},
url = {http://www.scitepress.org/DigitalLibrary/Link.aspx?doi=10.5220/0007373202550261},
doi = {10.5220/0007373202550261},
isbn = {978-989-758-354-4},
year = {2019},
date = {2019-02-01},
booktitle = {Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
pages = {255–261},
publisher = {SCITEPRESS - Science and Technology Publications},
address = {Prague, Czech Republic},
abstract = {Volumetric video can be used in virtual and augmented reality applications to show detailed animated performances by human actors. In this paper, we describe a volumetric capture system based on a photogrammetry cage with unsynchronized, low-cost cameras which is able to generate high-quality geometric data for animated avatars. This approach requires, inter alia, a subsequent synchronization of the captured videos.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lerner, Itamar; Ketz, Nicholas A.; Jones, Aaron P.; Bryant, Natalie B.; Robert, Bradley; Skorheim, Steven W.; Hartholt, Arno; Rizzo, Albert S.; Gluck, Mark A.; Clark, Vincent P.; Pilly, Praveen K.
In: Scientific Reports, vol. 9, no. 1, 2019, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{lerner_transcranial_2019,
title = {Transcranial Current Stimulation During Sleep Facilitates Insight into Temporal Rules, but does not Consolidate Memories of Individual Sequential Experiences},
author = {Itamar Lerner and Nicholas A. Ketz and Aaron P. Jones and Natalie B. Bryant and Bradley Robert and Steven W. Skorheim and Arno Hartholt and Albert S. Rizzo and Mark A. Gluck and Vincent P. Clark and Praveen K. Pilly},
url = {http://www.nature.com/articles/s41598-018-36107-7},
doi = {10.1038/s41598-018-36107-7},
issn = {2045-2322},
year = {2019},
date = {2019-02-01},
journal = {Scientific Reports},
volume = {9},
number = {1},
abstract = {Slow-wave sleep (SWS) is known to contribute to memory consolidation, likely through the reactivation of previously encoded waking experiences. Contemporary studies demonstrate that when auditory or olfactory stimulation is administered during memory encoding and then reapplied during SWS, memory consolidation can be enhanced, an effect that is believed to rely on targeted memory reactivation (TMR) induced by the sensory stimulation. Here, we show that transcranial current stimulations (tCS) during sleep can also be used to induce TMR, resulting in the facilitation of high-level cognitive processes. Participants were exposed to repeating sequences in a realistic 3D immersive environment while being stimulated with particular tCS patterns. A subset of these tCS patterns was then reapplied during sleep stages N2 and SWS coupled to slow oscillations in a closed-loop manner. We found that in contrast to our initial hypothesis, performance for the sequences corresponding to the reapplied tCS patterns was no better than for other sequences that received stimulations only during wake or not at all. In contrast, we found that the more stimulations participants received overnight, the more likely they were to detect temporal regularities governing the learned sequences the following morning, with tCS-induced beta power modulations during sleep mediating this effect.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Human Cooperation When Acting Through Autonomous Machines Journal Article
In: Proceedings of the National Academy of Sciences, vol. 116, no. 9, pp. 3482–3487, 2019, ISSN: 0027-8424, 1091-6490.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{de_melo_human_2019,
title = {Human Cooperation When Acting Through Autonomous Machines},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://www.pnas.org/lookup/doi/10.1073/pnas.1817656116},
doi = {10.1073/pnas.1817656116},
issn = {0027-8424, 1091-6490},
year = {2019},
date = {2019-02-01},
journal = {Proceedings of the National Academy of Sciences},
volume = {116},
number = {9},
pages = {3482–3487},
abstract = {Recent times have seen an emergence of intelligent machines that act autonomously on our behalf, such as autonomous vehicles. Despite promises of increased efficiency, it is not clear whether this paradigm shift will change how we decide when our self-interest (e.g., comfort) is pitted against the collective interest (e.g., environment). Here we show that acting through machines changes the way people solve these social dilemmas and we present experimental evidence showing that participants program their autonomous vehicles to act more cooperatively than if they were driving themselves. We show that this happens because programming causes selfish short-term rewards to become less salient, leading to considerations of broader societal goals. We also show that the programmed behavior is influenced by past experience. Finally, we report evidence that the effect generalizes beyond the domain of autonomous vehicles. We discuss implications for designing autonomous machines that contribute to a more cooperative society},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Chu, Veronica C.; Lucas, Gale M.; Lei, Su; Mozgai, Sharon; Khooshabeh, Peter; Gratch, Jonathan
Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat Journal Article
In: Frontiers in Human Neuroscience, vol. 13, 2019, ISSN: 1662-5161.
Abstract | Links | BibTeX | Tags: ARL, DoD, MedVR, UARC, VHTL, Virtual Humans
@article{chu_emotion_2019,
title = {Emotion Regulation in the Prisoner’s Dilemma: Effects of Reappraisal on Behavioral Measures and Cardiovascular Measures of Challenge and Threat},
author = {Veronica C. Chu and Gale M. Lucas and Su Lei and Sharon Mozgai and Peter Khooshabeh and Jonathan Gratch},
url = {https://www.frontiersin.org/article/10.3389/fnhum.2019.00050/full},
doi = {10.3389/fnhum.2019.00050},
issn = {1662-5161},
year = {2019},
date = {2019-02-01},
journal = {Frontiers in Human Neuroscience},
volume = {13},
abstract = {The current study examines cooperation and cardiovascular responses in individuals that were defected on by their opponent in the first round of an iterated Prisoner’s Dilemma. In this scenario, participants were either primed with the emotion regulation strategy of reappraisal or no emotion regulation strategy, and their opponent either expressed an amused smile or a polite smile after the results were presented. We found that cooperation behavior decreased in the no emotion regulation group when the opponent expressed an amused smile compared to a polite smile. In the cardiovascular measures, we found significant differences between the emotion regulation conditions using the biopsychosocial (BPS) model of challenge and threat. However, the cardiovascular measures of participants instructed with the reappraisal strategy were only weakly comparable with a threat state of the BPS model, which involves decreased blood flow and perception of greater task demands than resources to cope with those demands. Conversely, the cardiovascular measures of participants without an emotion regulation were only weakly comparable with a challenge state of the BPS model, which involves increased blood flow and perception of having enough or more resources to cope with task demands.},
keywords = {ARL, DoD, MedVR, UARC, VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2018
Khashe, Saba; Lucas, Gale; Becerik-Gerber, Burcin; Gratch, Jonathan
Establishing Social Dialog between Buildings and Their Users Journal Article
In: International Journal of Human–Computer Interaction, pp. 1–12, 2018, ISSN: 1044-7318, 1532-7590.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{khashe_establishing_2018,
title = {Establishing Social Dialog between Buildings and Their Users},
author = {Saba Khashe and Gale Lucas and Burcin Becerik-Gerber and Jonathan Gratch},
url = {https://www.tandfonline.com/doi/full/10.1080/10447318.2018.1555346},
doi = {10.1080/10447318.2018.1555346},
issn = {1044-7318, 1532-7590},
year = {2018},
date = {2018-12-01},
journal = {International Journal of Human–Computer Interaction},
pages = {1--12},
abstract = {Behavioral intervention strategies have yet to become successful in the development of initiatives to foster pro-environmental behaviors in buildings. In this paper, we explored the potentials of increasing the effectiveness of requests aiming to promote pro-environmental behaviors by engaging users in a social dialog, given the effects of two possible personas that are more related to the buildings (i.e., building vs. building manager). We tested our hypotheses and evaluated our findings in virtual and physical environments and found similar effects in both environments. Our results showed that social dialog involvement persuaded respondents to perform more pro-environmental actions. However, these effects were significant when the requests were delivered by an agent representing the building. In addition, these strategies were not equally effective across all types of people and their effects varied for people with different characteristics. Our findings provide useful design choices for persuasive technologies aiming to promote pro-environmental behaviors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Joshi, Himanshu; Rosenbloom, Paul S; Ustun, Volkan
Exact, Tractable Inference in the Sigma Cognitive Architecture via Sum-Product Networks Journal Article
In: Advances in Cognitive Systems, pp. 31–47, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{joshi_exact_2018,
title = {Exact, Tractable Inference in the Sigma Cognitive Architecture via Sum-Product Networks},
author = {Himanshu Joshi and Paul S Rosenbloom and Volkan Ustun},
url = {http://www.cogsys.org/papers/ACSvol7/papers/paper-7-4.pdf},
year = {2018},
date = {2018-12-01},
journal = {Advances in Cognitive Systems},
pages = {31–47},
abstract = {Sum-product networks (SPNs) are a new kind of deep architecture that support exact, tractable inference over a large class of problems for which traditional graphical models cannot. The Sigma cognitive architecture is based on graphical models, posing a challenge for it to handle problems within this class, such as parsing with probabilistic grammars, a potentially important aspect of language processing. This work proves that an early unidirectional extension to Sigma’s graphical architecture, originally added in service of rule-like behavior but later also shown to support neural networks, can be leveraged to yield exact, tractable computations across this class of problems, and further demonstrates this tractability experimentally for probabilistic parsing. It thus shows that Sigma is able to specify any valid SPN and, despite its grounding in graphical models, retain the desirable inference properties of SPNs when solving them.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Schwartz, David; Lewine, Gabrielle; Shapiro, Ari; Feng, Andrew; Zhuang, Cindy
Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18, pp. 219–226, ACM Press, Sydney, NSW, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{wang_addressing_2018,
title = {Addressing Sexist Attitudes on a College Campus through Virtual Role-Play with Digital Doppelgangers},
author = {Ning Wang and David Schwartz and Gabrielle Lewine and Ari Shapiro and Andrew Feng and Cindy Zhuang},
url = {http://dl.acm.org/citation.cfm?doid=3267851.3267913},
doi = {10.1145/3267851.3267913},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents - IVA '18},
pages = {219–226},
publisher = {ACM Press},
address = {Sydney, NSW, Australia},
abstract = {Digital doppelgangers are virtual humans that highly resemble the real self but behave independently. Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS) system, enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, that implements a powerful cognitive dissonance-based paradigm for attitudinal and behavioral change, and integrated it with digital doppelgangers to influence a human user’s attitude towards sexism on college campuses. In this paper, we discuss the design and evaluation the RACAS system and the DELTA game-based environment. Results indicate the potential impact of the DELTA game-based environment in creating an immersive virtual experience for attitudinal change.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Knott, Benjamin A.; Gratch, Jonathan; Cangelosi, Angelo; Caverlee, James
ACM Transactions on Interactive Intelligent Systems (TiiS) Special Issue on Trust and Influence in Intelligent Human-Machine Interaction Journal Article
In: ACM Transactions on Interactive Intelligent Systems, vol. 8, no. 4, pp. 1–3, 2018, ISSN: 21606455.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{knott_acm_2018,
title = {ACM Transactions on Interactive Intelligent Systems (TiiS) Special Issue on Trust and Influence in Intelligent Human-Machine Interaction},
author = {Benjamin A. Knott and Jonathan Gratch and Angelo Cangelosi and James Caverlee},
url = {http://dl.acm.org/citation.cfm?doid=3292532.3281451},
doi = {10.1145/3281451},
issn = {21606455},
year = {2018},
date = {2018-11-01},
journal = {ACM Transactions on Interactive Intelligent Systems},
volume = {8},
number = {4},
pages = {1–3},
abstract = {Recent advances in machine intelligence and robotics have enabled new forms of human-computer interaction characterized by greater adaptability, shared decision-making, and mixed initiative. These advances are leading toward machines that can operate with relative autonomy but are designed to interact or engage with human counterparts in joint human-machine teams. The degree to which people trust machines is critical to the efficacy of these teams. People will cooperate with, and rely upon, intelligent agents they trust. Those they do not trust fall into disuse. As intelligent agents become more self-directed, learn from their experiences, and adapt behavior over time, the relationship between people and machines becomes more complex, and designing system behaviors to engender the proper level of trust becomes more challenging. Moreover, as intelligent systems become common in safety-critical domains, we must understand and assess the influence they might exert on human decision making to avoid unintended consequences, such as over-trust, compliance, or undue influence. Online social environments further complicate human-machine relationships. In the social media ecosystem, intelligent agents (e.g., chatbots) might act as aids or assistants but also as competitors or adversaries. In this context, research challenges include understanding how human-machine relationships evolve in social media and especially how humans develop trust and are susceptible to influence in social networks.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Boberg, Jill; Artstein, Ron; Gratch, Jonathan
Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 125–132, ACM, Sydney, Australia, 2018, ISBN: ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: VHTL, Virtual Humans
@inproceedings{mell_towards_2018,
title = {Towards a Repeated Negotiating Agent that Treats People Individually: Cooperation, Social Value Orientation, & Machiavellianism},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jill Boberg and Ron Artstein and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267910},
doi = {10.1145/3267851.3267910},
isbn = {ISBN: 978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {125–132},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of a study in which humans negotiate with computerized agents employing varied tactics over a repeated number of economic ultimatum games. We report that certain agents are highly effective against particular classes of humans: several individual difference measures for the human participant are shown to be critical in determining which agents will be successful. Asking for favors works when playing with pro-social people but backfires with more selfish individuals. Further, making poor offers invites punishment from Machiavellian individuals. These factors may be learned once and applied over repeated negotiations, which means user modeling techniques that can detect these differences accurately will be more successful than those that don’t. Our work additionally shows that a significant benefit of cooperation is also present in repeated games—after sufficient interaction. These results have deep significance to agent designers who wish to design agents that are effective in negotiating with a broad swath of real human opponents. Furthermore, it demonstrates the effectiveness of techniques which can reason about negotiation over time.},
keywords = {VHTL, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Georgila, Kallirroi; Choi, Hyungtak; Boberg, Jill; Traum, David
Evaluating Subjective Feedback for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue, pp. 64–72, Aix-en-Provence, France, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gordon_evaluating_2018,
title = {Evaluating Subjective Feedback for Internet of Things Dialogues},
author = {Carla Gordon and Kallirroi Georgila and Hyungtak Choi and Jill Boberg and David Traum},
url = {https://amubox.univ-amu.fr/s/6YcAg3TpLpfzGEn#pdfviewer},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 22nd Workshop on the Semantics and Pragmatics of Dialogue},
pages = {64–72},
address = {Aix-en-Provence, France},
abstract = {This paper discusses the process of determining which subjective features are seen as ideal in a dialogue system, and linking these features to objectively quantifiable behaviors. A corpus of simulated system-user dialogues in the Internet of Things domain was manually annotated with a set of system communicative and action responses, and crowd-sourced ratings and qualitative feedback of these dialogues were collected. This corpus of subjective feedback was analyzed, revealing that raters described top ranked dialogues as Intelligent, Natural, Pleasant, and as having Personality. Additionally, certain communicative and action responses were statistically more likely to be present in dialogues described as having these features. There was also found to be a lack of agreement among raters as to whether a direct communication style, or a conversational one was preferred, suggesting that future research and development should consider creating models for different communication styles.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S; Sajjadi, Seyed; Nuttall, Jeremy
Controlling Synthetic Characters in Simulations: A Case for Cognitive Architectures and Sigma Proceedings Article
In: Proceedings of I/ITSEC 2018, National Training and Simulation Association, Orlando, FL, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ustun_controlling_2018,
title = {Controlling Synthetic Characters in Simulations: A Case for Cognitive Architectures and Sigma},
author = {Volkan Ustun and Paul S Rosenbloom and Seyed Sajjadi and Jeremy Nuttall},
url = {http://bcf.usc.edu/ rosenblo/Pubs/Ustun_IITSEC2018_D.pdf},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of I/ITSEC 2018},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {Simulations, along with other similar applications like virtual worlds and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Cognitive architectures, which are models of the fixed structure underlying intelligent behavior in both natural and artificial systems, provide a conceptually valid common basis, as evidenced by the current efforts towards a standard model of the mind, to generate human-like intelligent behavior for these synthetic characters. Developments in the field of artificial intelligence, mainly in probabilistic graphical models and neural networks, open up new opportunities for cognitive architectures to make the synthetic characters more autonomous and to enrich their behavior. Sigma (Σ) is a cognitive architecture and system that strives to combine what has been learned from four decades of independent work on symbolic cognitive architectures, probabilistic graphical models, and more recently neural models, under its graphical architecture hypothesis. Sigma leverages an extended form of factor graphs towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of cognitive models that possess a Theory-of-Mind and that are perceptual, autonomous, interactive, affective, and adaptive. In this paper, we will introduce Sigma along with its diverse capabilities and then use three distinct proof-of-concept Sigma models to highlight combinations of these capabilities: (1) Distributional reinforcement learning models in a simple OpenAI Gym problem; (2) A pair of adaptive and interactive agent models that demonstrate rule-based, probabilistic, and social reasoning in a physical security scenario instantiated within the SmartBody character animation platform; and (3) A knowledge-free exploration model in which an agent leverages only architectural appraisal variables, namely attention and curiosity, to locate an item while building up a map in a Unity environment.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Schalk, Job Van Der; Lucas, Gale; Gratch, Jonathan
The impact of agent facial mimicry on social behavior in a prisoner’s dilemma Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 275–280, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hoegen_impact_2018,
title = {The impact of agent facial mimicry on social behavior in a prisoner’s dilemma},
author = {Rens Hoegen and Job Van Der Schalk and Gale Lucas and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267911},
doi = {10.1145/3267851.3267911},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {275–280},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {A long tradition of research suggests a relationship between emotional mimicry and pro-social behavior, but the nature of this relationship is unclear. Does mimicry cause rapport and cooperation, or merely reflect it? Virtual humans can provide unique insights into these social processes by allowing unprecedented levels of experimental control. In a 2 x 2 factorial design, we examined the impact of facial mimicry and counter-mimicry in the iterated prisoner’s dilemma. Participants played with an agent that copied their smiles and frowns or one that showed the opposite pattern – i.e., that frowned when they smiled. As people tend to smile more than frown, we independently manipulated the contingency of expressions to ensure any effects are due to mimicry alone, and not the overall positivity/negativity of the agent: i.e., participants saw either a reflection of their own expressions or saw the expressions shown to a previous participant. Results show that participants smiled significantly more when playing an agent that mimicked them. Results also show a complex association between smiling, feelings of rapport, and cooperation. We discuss the implications of these findings on virtual human systems and theories of cooperation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M; Kramer, Nicole; Peters, Clara; Taesch, Lisa-Sophie; Mell, Johnathan; Gratch, Jonathan
Effects of Perceived Agency and Message Tone in Responding to a Virtual Personal Trainer Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 247–254, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_effects_2018,
title = {Effects of Perceived Agency and Message Tone in Responding to a Virtual Personal Trainer},
author = {Gale M Lucas and Nicole Kramer and Clara Peters and Lisa-Sophie Taesch and Johnathan Mell and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3267855},
doi = {10.1145/3267851.3267855},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {247–254},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {Research has demonstrated promising benefits of applying virtual trainers to promote physical fitness. The current study investigated the value of virtual agents in the context of personal fitness, compared to trainers with greater levels of perceived agency (avatar or live human). We also explored the possibility that the effectiveness of the virtual trainer might depend on the affective tone it uses when trying to motivate users. Accordingly, participants received either positively or negatively valenced motivational messages from a virtual human they believed to be either an agent or an avatar, or they received the messages from a human instructor via skype. Both self-report and physiological data were collected. Like in-person coaches, the live human trainer who used negatively valenced messages were well-regarded; however, when the agent or avatar used negatively valenced messages, participants responded more poorly than when they used positively valenced ones. Perceived agency also affected rapport: compared to the agent, users felt more rapport with the live human trainer or the avatar. Regardless of trainer type, they also felt more rapport - and said they put in more effort - with trainers that used positively valenced messages than those that used negatively valenced ones. However, in reality, they put in more physical effort (as measured by heart rate) when trainers employed the more negatively valenced affective tone. We discuss implications for human–computer interaction.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan; Baarslag, Tim; Aydogran, Reyhan; Jonker, Catholijn M
Results of the First Annual Human-Agent League of the Automated Negotiating Agents Competition Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 23–28, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mell_results_2018,
title = {Results of the First Annual Human-Agent League of the Automated Negotiating Agents Competition},
author = {Johnathan Mell and Jonathan Gratch and Tim Baarslag and Reyhan Aydogran and Catholijn M Jonker},
url = {https://dl.acm.org/citation.cfm?id=3267907},
doi = {10.1145/3267851.3267907},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {23–28},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {We present the results of the first annual Human-Agent League of ANAC. By introducing a new human-agent negotiating platform to the research community at large, we facilitated new advancements in human-aware agents. This has succeeded in pushing the envelope in agent design, and creating a corpus of useful human-agent interaction data. Our results indicate a variety of agents were submitted, and that their varying strategies had distinct outcomes on many measures of the negotiation. These agents approach the problems endemic to human negotiation, including user modeling, bidding strategy, rapport techniques, and strategic bargaining. Some agents employed advanced tactics in information gathering or emotional displays and gained more points than their opponents, while others were considered more “likeable” by their partners.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Culture, Errors, and Rapport-building Dialogue in Social Agents Proceedings Article
In: Proceedings of the 18th International Conference on Intelligent Virtual Agents, pp. 51–58, ACM, Sydney, Australia, 2018, ISBN: 978-1-4503-6013-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_culture_2018,
title = {Culture, Errors, and Rapport-building Dialogue in Social Agents},
author = {Gale M Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {https://dl.acm.org/citation.cfm?id=3267887},
doi = {10.1145/3267851.3267887},
isbn = {978-1-4503-6013-5},
year = {2018},
date = {2018-11-01},
booktitle = {Proceedings of the 18th International Conference on Intelligent Virtual Agents},
pages = {51–58},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {This work explores whether culture impacts the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when agents make conversational errors. Our study uses an agent designed to persuade users to agree with its rankings on two tasks. Participants from the U.S. and Japan completed our study. We perform two manipulations: (1) The presence of conversational errors – the agent exhibited errors in the second task or not; (2) The presence of social dialogue – between the two tasks, users either engaged in a social dialogue with the agent or completed a control task. Replicating previous research, conversational errors reduce the agent’s influence. However, we found that culture matters: there was a marginally significant three-way interaction with culture, presence of social dialogue, and presence of errors. The pattern of results suggests that, for American participants, social dialogue backfired if it is followed by errors, presumably because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. However, for Japanese participants, social dialogue if anything mitigates the detrimental effect of errors; the negative effect of errors is only seen in the absence of a social dialogue. Agent design should therefore take the culture of the intended users into consideration when considering use of social dialogue to bolster agents against conversational errors.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Xijing; Krumhuber, Eva G.; Gratch, Jonathan
The interpersonal effects of emotions in money versus candy games Journal Article
In: Journal of Experimental Social Psychology, vol. 79, pp. 315–327, 2018.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{wang_interpersonal_2018,
title = {The interpersonal effects of emotions in money versus candy games},
author = {Xijing Wang and Eva G. Krumhuber and Jonathan Gratch},
url = {https://www.sciencedirect.com/science/article/pii/S0022103118301914},
year = {2018},
date = {2018-11-01},
journal = {Journal of Experimental Social Psychology},
volume = {79},
pages = {315–327},
abstract = {Emotional expressions significantly influence perceivers’ behavior in economic games and negotiations. The current research examined the interpersonal effects of emotions when such information cannot be used to guide behavior for increasing personal gain and when monetary rewards are made salient. For this, a one-shot Public Goods Game (Studies 1, 2, and 3) and Dictator Game (Studies 4 and 5) were employed, in which the dominant strategy to maximize personal payoff is independent from the counterplayers’ intention signaled through their facial expressions (happiness, sadness, and anger). To elicit a monetary mindset, we used money (vs. candy) as the mode of exchange in the games with (Studies 1 and 2) or without (Studies 3, 4, and 5) additional contextual framing (i.e. Wall Street Game vs. Community Game). Across five studies (N = 1211), participants were found to be more generous towards happy and sad targets compared to angry ones. Such behavioral response based on emotional information was accounted for by the trait impressions (i.e. likability, trustworthiness) formed of the counterplayer. This effect was significantly reduced when money acted as the mode of exchange, thereby making participants focus more on their selfgain. Together, the findings extend previous work by highlighting the social functional role of emotions in human exchange and its moderation by money as a transaction medium.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Bonial, Claire; Lukin, Stephanie M.; Hayes, Cory J.; Foots, Ashley; Artstein, Ron; Henry, Cassidy; Pollard, Kimberly A.; Gordon, Carla; Gervits, Felix; Leuski, Anton; Hill, Susan G.; Voss, Clare R.; Traum, David
Balancing Efficiency and Coverage in Human-Robot Dialogue Collection Proceedings Article
In: Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction, arXiv, Arlington, Virginia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{marge_balancing_2018,
title = {Balancing Efficiency and Coverage in Human-Robot Dialogue Collection},
author = {Matthew Marge and Claire Bonial and Stephanie M. Lukin and Cory J. Hayes and Ashley Foots and Ron Artstein and Cassidy Henry and Kimberly A. Pollard and Carla Gordon and Felix Gervits and Anton Leuski and Susan G. Hill and Clare R. Voss and David Traum},
url = {https://arxiv.org/abs/1810.02017},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the AAAI Fall Symposium on Interactive Learning in Artificial Intelligence for Human-Robot Interaction},
publisher = {arXiv},
address = {Arlington, Virginia},
abstract = {We describe a multi-phased Wizard-of-Oz approach to collecting human-robot dialogue in a collaborative search and navigation task. The data is being used to train an initial automated robot dialogue system to support collaborative exploration tasks. In the first phase, a wizard freely typed robot utterances to human participants. For the second phase, this data was used to design a GUI that includes buttons for the most common communications, and templates for communications with varying parameters. Comparison of the data gathered in these phases show that the GUI enabled a faster pace of dialogue while still maintaining high coverage of suitable responses, enabling more efficient targeted data collection, and improvements in natural language understanding using GUI-collected data. As a promising first step towardsinteractivelearning,thisworkshowsthatourapproach enables the collection of useful training data for navigationbased HRI tasks.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Traum, David; Merla, Arcangelo; Hee, Eugenia; Walker, Zoey; Manini, Barbara; Gallagher, Grady; Petitto, Laura-Ann
Multimodal Dialogue Management for Multiparty Interaction with Infants Proceedings Article
In: Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18, pp. 5–13, ACM Press, Boulder, CO, USA, 2018, ISBN: 978-1-4503-5692-3.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nasihati_gilani_multimodal_2018,
title = {Multimodal Dialogue Management for Multiparty Interaction with Infants},
author = {Setareh Nasihati Gilani and David Traum and Arcangelo Merla and Eugenia Hee and Zoey Walker and Barbara Manini and Grady Gallagher and Laura-Ann Petitto},
url = {http://dl.acm.org/citation.cfm?doid=3242969.3243029},
doi = {10.1145/3242969.3243029},
isbn = {978-1-4503-5692-3},
year = {2018},
date = {2018-10-01},
booktitle = {Proceedings of the 2018 on International Conference on Multimodal Interaction - ICMI '18},
pages = {5–13},
publisher = {ACM Press},
address = {Boulder, CO, USA},
abstract = {We present dialogue management routines for a system to engage in multiparty agent-infant interaction. The ultimate purpose of this research is to help infants learn a visual sign language by engaging them in naturalistic and socially contingent conversations during an early-life critical period for language development (ages 6 to 12 months) as initiated by an artificial agent. As a first step, we focus on creating and maintaining agent-infant engagement that elicits appropriate and socially contingent responses from the baby. Our system includes two agents, a physical robot and an animated virtual human. The system's multimodal perception includes an eye-tracker (measures attention) and a thermal infrared imaging camera (measures patterns of emotional arousal). A dialogue policy is presented that selects individual actions and planned multiparty sequences based on perceptual inputs about the baby's internal changing states of emotional engagement. The present version of the system was evaluated in interaction with 8 babies. All babies demonstrated spontaneous and sustained engagement with the agents for several minutes, with patterns of conversationally relevant and socially contingent behaviors. We further performed a detailed case-study analysis with annotation of all agent and baby behaviors. Results show that the baby's behaviors were generally relevant to agent conversations and contained direct evidence for socially contingent responses by the baby to specific linguistic samples produced by the avatar. This work demonstrates the potential for language learning from agents in very young babies and has especially broad implications regarding the use of artificial agents with babies who have minimal language exposure in early life.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Aljanaki, Anna; Soleymani, Mohammad
A data-driven approach to mid-level perceptual musical feature modeling Proceedings Article
In: Proceedings of the 19th International Society for Music Information Retrieval Conference, arXiv, Paris, France, 2018.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{aljanaki_data-driven_2018,
title = {A data-driven approach to mid-level perceptual musical feature modeling},
author = {Anna Aljanaki and Mohammad Soleymani},
url = {https://arxiv.org/abs/1806.04903},
year = {2018},
date = {2018-09-01},
booktitle = {Proceedings of the 19th International Society for Music Information Retrieval Conference},
publisher = {arXiv},
address = {Paris, France},
abstract = {Musical features and descriptors could be coarsely divided into three levels of complexity. The bottom level contains the basic building blocks of music, e.g., chords, beats and timbre. The middle level contains concepts that emerge from combining the basic blocks: tonal and rhythmic stability, harmonic and rhythmic complexity, etc. High-level descriptors (genre, mood, expressive style) are usually modeled using the lower level ones. The features belonging to the middle level can both improve automatic recognition of high-level descriptors, and provide new music retrieval possibilities. Mid-level features are subjective and usually lack clear definitions. However, they are very important for human perception of music, and on some of them people can reach high agreement, even though defining them and therefore, designing a hand-crafted feature extractor for them can be difficult. In this paper, we derive the mid-level descriptors from data. We collect and release a datasettextbackslashtextbackslashtextbackslashtextbackslashfootnotehttps://osf.io/5aupt/ of 5000 songs annotated by musicians with seven mid-level descriptors, namely, melodiousness, tonal and rhythmic stability, modality, rhythmic complexity, dissonance and articulation. We then compare several approaches to predicting these descriptors from spectrograms using deep-learning. We also demonstrate the usefulness of these mid-level features using music emotion recognition as an application.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rayatdoost, Soheil; Soleymani, Mohammad
CROSS-CORPUS EEG-BASED EMOTION RECOGNITION Proceedings Article
In: 2018 IEEE 28th International Workshop on Machine Learning for Signal Processing (MLSP), pp. 1–6, IEEE, Aalborg, Denmark, 2018, ISBN: 978-1-5386-5477-4.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{rayatdoost_cross-corpus_2018,
title = {CROSS-CORPUS EEG-BASED EMOTION RECOGNITION},
author = {Soheil Rayatdoost and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/8517037/},
doi = {10.1109/MLSP.2018.8517037},
isbn = {978-1-5386-5477-4},
year = {2018},
date = {2018-09-01},
booktitle = {2018 IEEE 28th International Workshop on Machine Learning for Signal Processing (MLSP)},
pages = {1–6},
publisher = {IEEE},
address = {Aalborg, Denmark},
abstract = {Lack of generalization is a common problem in automatic emotion recognition. The present study aims to explore the suitability of the existing EEG features for emotion recognition and investigate the performance of emotion recognition methods across different corpora. We introduce a novel dataset which includes spontaneous emotions and was analyzed in addition to the existing datasets for cross-corpus evaluation. We demonstrate that the performance of the existing methods significantly decreases when evaluated across different corpora. The best results are obtained by a convolutional neural network fed by spectral topography maps from different bands. We provide some evidence that stimuli-related sensory information is learned by machine learning models for emotion recognition using EEG signals.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Larue, Othalia; West, Robert; Rosenbloom, Paul S.; Dancy, Christopher L.; Samsonovich, Alexei V.; Petters, Dean; Juvina, Ion
Emotion in the Common Model of Cognition Journal Article
In: Procedia Computer Science, vol. 145, pp. 740–746, 2018, ISSN: 18770509.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{larue_emotion_2018,
title = {Emotion in the Common Model of Cognition},
author = {Othalia Larue and Robert West and Paul S. Rosenbloom and Christopher L. Dancy and Alexei V. Samsonovich and Dean Petters and Ion Juvina},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1877050918323317},
doi = {10.1016/j.procs.2018.11.045},
issn = {18770509},
year = {2018},
date = {2018-08-01},
journal = {Procedia Computer Science},
volume = {145},
pages = {740–746},
abstract = {Emotions play an important role in human cognition and therefore need to be present in the Common Model of Cognition. In this paper, the emotion working group focuses on functional aspects of emotions and describes what we believe are the points of interactions with the Common Model of Cognition. The present paper should not be viewed as a consensus of the group but rather as a first attempt to extract common and divergent aspects of different models of emotions and how they relate to the Common Model of Cognition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kralik, Jerald D.; Lee, Jee Hang; Rosenbloom, Paul S.; Jackson, Philip C.; Epstein, Susan L.; Romero, Oscar J.; Sanz, Ricardo; Larue, Othalia; Schmidtke, Hedda R.; Lee, Sang Wan; McGreggor, Keith
Metacognition for a Common Model of Cognition Journal Article
In: Procedia Computer Science, vol. 145, pp. 730–739, 2018, ISSN: 18770509.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{kralik_metacognition_2018,
title = {Metacognition for a Common Model of Cognition},
author = {Jerald D. Kralik and Jee Hang Lee and Paul S. Rosenbloom and Philip C. Jackson and Susan L. Epstein and Oscar J. Romero and Ricardo Sanz and Othalia Larue and Hedda R. Schmidtke and Sang Wan Lee and Keith McGreggor},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1877050918323329},
doi = {10.1016/j.procs.2018.11.046},
issn = {18770509},
year = {2018},
date = {2018-08-01},
journal = {Procedia Computer Science},
volume = {145},
pages = {730–739},
abstract = {This paper provides a starting point for the development of metacognition in a common model of cognition. It identifies significant theoretical work on metacognition from multiple disciplines that the authors believe worthy of consideration. After first defining cognition and metacognition, we outline three general categories of metacognition, provide an initial list of its main components, consider the more difficult problem of consciousness, and present examples of prominent artificial systems that have implemented metacognitive components. Finally, we identify pressing design issues for the future},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; Brixey, Jacqueline; Bui, Trung; Chang, Walter; Artstein, Ron; Georgila, Kallirroi
DialEdit: Annotations for Spoken Conversational Image Editing Proceedings Article
In: Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation, Association for Computational Linguistics, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_dialedit_2018,
title = {DialEdit: Annotations for Spoken Conversational Image Editing},
author = {Ramesh Manuvinakurike and Jacqueline Brixey and Trung Bui and Walter Chang and Ron Artstein and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-4701/w18-4701},
year = {2018},
date = {2018-08-01},
booktitle = {Proceedings of the 14th Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {Association for Computational Linguistics},
address = {Santa Fe, New Mexico},
abstract = {We present a spoken dialogue corpus and annotation scheme for conversational image editing, where people edit an image interactively through spoken language instructions. Our corpus contains spoken conversations between two human participants: users requesting changes to images and experts performing these modifications in real time. Our annotation scheme consists of 26 dialogue act labels covering instructions, requests, and feedback, together with actions and entities for the content of the edit requests. The corpus supports research and development in areas such as incremental intent recognition, visual reference resolution, image-grounded dialogue modeling, dialogue state tracking, and user modeling.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Welcome to the Real World: How Agent Strategy Increases Human Willingness to Deceive Proceedings Article
In: Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems, pp. 1250–1257, International Foundation for Autonomous Agents and Multiagent Systems, Stockholm, Sweden, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_welcome_2018,
title = {Welcome to the Real World: How Agent Strategy Increases Human Willingness to Deceive},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/citation.cfm?id=3237884},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 17th International Conference on Autonomous Agents and MultiAgent Systems},
pages = {1250–1257},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Stockholm, Sweden},
abstract = {Humans that negotiate through representatives often instruct those representatives to act in certain ways that align with both the client's goals and his or her social norms. However, which tactics and ethical norms humans endorse vary widely from person to person, and these endorsements may be easy to manipulate. This work presents the results of a study that demonstrates that humans that interact with an artificial agent may change what kinds of tactics and norms they endorse-often dramatically. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. Our work qualifies that initial picture, demonstrating that subsequent experience may change this tendency toward fairness. By exposing human negotiators to tough, automated agents, we are able to shift the participant's willingness to deceive others and utilize "hard-ball" negotiation techniques. In short, what techniques people decide to endorse is dependent upon their context and experience. We examine the effects of interacting with four different types of automated agents, each with a unique strategy, and how this subsequently changes which strategies a human negotiator might later endorse. In the study, which was conducted on an online negotiation platform, four different types of automated agents negotiate with humans over the course of a 10-minute interaction. The agents differ in a 2x2 design according to agent strategy (tough vs. fair) and agent attitude (nice vs. nasty). These results show that in this multi-issue bargaining task, humans that interacted with a tough agent were more willing to endorse deceptive techniques when instructing their own representative. These kinds of techniques were endorsed even if the agent the human encountered did not use deception as part of its strategy. In contrast to some previous work, there was not a significant effect of agent attitude. These results indicate the power of allowing people to program agents that follow their instructions, but also indicate that these social norms and tactic endorsements may be mutable in the presence of real negotiation experience.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lukin, Stephanie M.; Pollard, Kimberly A.; Bonial, Claire; Marge, Matthew; Henry, Cassidy; Artstein, Ron; Traum, David; Voss, Clare R.
Consequences and Factors of Stylistic Differences in Human-Robot Dialogue Proceedings Article
In: Proceedings of the SIGDIAL 2018 Conference, pp. 110–118, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{lukin_consequences_2018,
title = {Consequences and Factors of Stylistic Differences in Human-Robot Dialogue},
author = {Stephanie M. Lukin and Kimberly A. Pollard and Claire Bonial and Matthew Marge and Cassidy Henry and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/papers/W/W18/W18-5012/},
doi = {10.18653/v1/W18-5012},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the SIGDIAL 2018 Conference},
pages = {110–118},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {This paper identifies stylistic differences in instruction-giving observed in a corpus of human-robot dialogue. Differences in verbosity and structure (i.e., single-intent vs. multi-intent instructions) arose naturally without restrictions or prior guidance on how users should speak with the robot. Different styles were found to produce different rates of miscommunication, and correlations were found between style differences and individual user variation, trust, and interaction experience with the robot. Understanding potential consequences and factors that influence style can inform design of dialogue systems that are robust to natural variation from human users.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Muessig, Kathryn E.; Knudtson, Kelly A.; Soni, Karina; Larsen, Margo Adams; Traum, David; Dong, Willa; Conserve, Donaldson F.; Leuski, Anton; Artstein, Ron; Hightow-Weidman, Lisa B.
In: Digital Culture and Education, vol. 10, pp. 22–48, 2018, ISSN: 1836-8301.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{muessig_i_2018,
title = {“I Didn't Tell You Sooner Because I Didn't Know How to Handle it Myself”: Developing a Virtual Reality Program to Support HIV-Status Disclosure Decisions},
author = {Kathryn E. Muessig and Kelly A. Knudtson and Karina Soni and Margo Adams Larsen and David Traum and Willa Dong and Donaldson F. Conserve and Anton Leuski and Ron Artstein and Lisa B. Hightow-Weidman},
url = {http://www.digitalcultureandeducation.com/s/Muessig-et-al-July-2018.pdf},
issn = {1836-8301},
year = {2018},
date = {2018-07-01},
journal = {Digital Culture and Education},
volume = {10},
pages = {22–48},
abstract = {HIV status disclosure is associated with increased social support and protective behaviors against HIV transmission. Yet disclosure poses significant challenges in the face of persistent societal stigma. Few interventions focus on decision-making, self-efficacy, and communication skills to support disclosing HIV status to an intimate partner. Virtual reality (VR) and artificial intelligence (AI) technologies offer powerful tools to address this gap. Informed by Social Cognitive Theory, we created the Tough Talks VR program for HIV-positive young men who have sex with men (YMSM) to practice status disclosure safely and confidentially. Fifty-eight YMSM (ages 18 – 30, 88% HIV-positive) contributed 132 disclosure dialogues to develop the prototype through focus groups, usability testing, and a technical pilot. The prototype includes three disclosure scenarios (neutral, sympathetic, and negative response) and a database of 125 virtual character utterances. Participants select a VR scenario and realistic virtual character with whom to practice. In a pilot test of the fully automated neutral response scenario, the AI system responded appropriately to 71% of participant utterances. Most pilot study participants agreed Tough Talks was easy to use (9/11) and that they would like to use the system frequently (9/11). Tough Talks demonstrates that VR can be used to practice HIV status disclosure and lessons learned from program development offer insights for the use of AI systems for other areas of health and education.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; Bui, Trung; Chang, Walter; Georgila, Kallirroi
Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task Proceedings Article
In: Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pp. 284–295, Association for Computational Linguistics, Melbourne, Australia, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_conversational_2018,
title = {Conversational Image Editing: Incremental Intent Identification in a New Dialogue Task},
author = {Ramesh Manuvinakurike and Trung Bui and Walter Chang and Kallirroi Georgila},
url = {https://aclanthology.info/papers/W18-5033/w18-5033},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue},
pages = {284–295},
publisher = {Association for Computational Linguistics},
address = {Melbourne, Australia},
abstract = {We present “conversational image editing”, a novel real-world application domain combining dialogue, visual information, and the use of computer vision. We discuss the importance of dialogue incrementality in this task, and build various models for incremental intent identification based on deep learning and traditional classification algorithms. We show how our model based on convolutional neural networks outperforms models based on random forests, long short term memory networks, and conditional random fields. By training embeddings based on image-related dialogue corpora, we outperform pre-trained out-of-the-box embeddings, for intention identification tasks. Our experiments also provide evidence that incremental intent processing may be more efficient for the user and could save time in accomplishing tasks.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Karkada, Deepthi; Manuvinakurike, Ramesh; Georgila, Kallirroi
Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario Proceedings Article
In: Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation, arxiv.org, Santa Fe, New Mexico, 2018.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{karkada_towards_2018,
title = {Towards Understanding End-of-trip Instructions in a Taxi Ride Scenario},
author = {Deepthi Karkada and Ramesh Manuvinakurike and Kallirroi Georgila},
url = {https://arxiv.org/abs/1807.03950},
year = {2018},
date = {2018-07-01},
booktitle = {Proceedings of the Fourteenth Joint ACL - ISO Workshop on Interoperable Semantic Annotation},
publisher = {arxiv.org},
address = {Santa Fe, New Mexico},
abstract = {We introduce a dataset containing human-authored descriptions of target locations in an “end of-trip in a taxi ride” scenario. We describe our data collection method and a novel annotation scheme that supports understanding of such descriptions of target locations. Our dataset contains target location descriptions for both synthetic and real-world images as well as visual annotations (ground truth labels, dimensions of vehicles and objects, coordinates of the target location, distance and direction of the target location from vehicles and objects) that can be used in various visual and language tasks. We also perform a pilot experiment on how the corpus could be applied to visual reference resolution in this domain.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}