Publications
Search
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021-1,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-04-30},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-05-06},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
@article{de_melo_heuristic_2021-1,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: Journal on Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {Journal on Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gramlich, Michael A.; Smolenski, Derek J.; Norr, Aaron M.; Rothbaum, Barbara O.; Rizzo, Albert A.; Andrasik, Frank; Fantelli, Emily; Reger, Greg M.
In: Depression and Anxiety, pp. da.23141, 2021, ISSN: 1091-4269, 1520-6394.
@article{gramlich_psychophysiology_2021,
title = {Psychophysiology during exposure to trauma memories: Comparative effects of virtual reality and imaginal exposure for posttraumatic stress disorder},
author = {Michael A. Gramlich and Derek J. Smolenski and Aaron M. Norr and Barbara O. Rothbaum and Albert A. Rizzo and Frank Andrasik and Emily Fantelli and Greg M. Reger},
url = {https://onlinelibrary.wiley.com/doi/10.1002/da.23141},
doi = {10.1002/da.23141},
issn = {1091-4269, 1520-6394},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {Depression and Anxiety},
pages = {da.23141},
abstract = {Background: This investigation involved an in‐depth examination of psychophysiological responses during exposure to the trauma memory across 10 sessions among active duty soldiers with combat‐related posttraumatic stress disorder (PTSD) treated by Prolonged Exposure (PE) or Virtual Reality Exposure (VRE). We compared psychophysiological changes, session‐by‐session, between VRE and traditional imaginal exposure.
Methods: Heart rate (HR), galvanic skin response (GSR), and peripheral skin temperature were collected every 5 min during exposure sessions with 61 combat veterans of Iraq/Afghanistan and compared to the PTSD Checklist (PCL‐C) and Clinician‐Administered PTSD Scale (CAPS) outcomes using multilevel modeling. Results: Over the course of treatment, participants in the PE group had higher HR arousal compared to participants in the VRE group. With reference to GSR, in earlier sessions, participants demonstrated a within‐session increase, whereas, in later sessions, participants showed a within‐session habituation response. A significant interaction was found for GSR and treatment assignment for within‐session change, withinperson effect, predicting CAPS (d = 0.70) and PCL‐C (d = 0.66) outcomes.
Conclusion: Overall, these findings suggest that exposure to traumatic memories activates arousal across sessions, with GSR being most associated with reductions in PTSD symptoms for participants in the PE group.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Methods: Heart rate (HR), galvanic skin response (GSR), and peripheral skin temperature were collected every 5 min during exposure sessions with 61 combat veterans of Iraq/Afghanistan and compared to the PTSD Checklist (PCL‐C) and Clinician‐Administered PTSD Scale (CAPS) outcomes using multilevel modeling. Results: Over the course of treatment, participants in the PE group had higher HR arousal compared to participants in the VRE group. With reference to GSR, in earlier sessions, participants demonstrated a within‐session increase, whereas, in later sessions, participants showed a within‐session habituation response. A significant interaction was found for GSR and treatment assignment for within‐session change, withinperson effect, predicting CAPS (d = 0.70) and PCL‐C (d = 0.66) outcomes.
Conclusion: Overall, these findings suggest that exposure to traumatic memories activates arousal across sessions, with GSR being most associated with reductions in PTSD symptoms for participants in the PE group.
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: Journal on Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {Journal on Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13--34},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Frontiers in Robotics and AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Femminella, Brian; Hartholt, Arno; Rizzo, Skip
User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP) Journal Article
In: pp. 10, 2021.
@article{mozgai_user-centered_2021,
title = {User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP)},
author = {Sharon Mozgai and Brian Femminella and Arno Hartholt and Skip Rizzo},
url = {https://uploads-ssl.webflow.com/5f11f7e80d5a3b6dfdeeb614/5f9b3284d3d73e1da6a8f848_CHI_2021_Battle%20Buddy.pdf},
year = {2021},
date = {2021-01-01},
pages = {10},
abstract = {CCS Concepts: • Human-centered computing → Ubiquitous and mobile computing design and evaluation methods; HCI design and evaluation methods; User centered design; • Applied computing → Military; • Computing methodologies → Intelligent agents.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Inproceedings
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21--29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
The field of Affective Computing: An interdisciplinary perspective Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 36, no. 1, pp. 13, 2021.
@article{gratch_field_2021,
title = {The field of Affective Computing: An interdisciplinary perspective},
author = {Jonathan Gratch},
url = {https://people.ict.usc.edu/~gratch/CSCI534/Readings/Gratch%20-%20The%20field%20of%20affective%20computing.pdf},
year = {2021},
date = {2021-01-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {36},
number = {1},
pages = {13},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Jiaman; Kuang, Zhengfei; Zhao, Yajie; He, Mingming; Bladin, Karl; Li, Hao
Dynamic Facial Asset and Rig Generation from a Single Scan Journal Article
In: ACM Transactions on Graphics, vol. 39, no. 6, 2020.
@article{li_dynamic_2020,
title = {Dynamic Facial Asset and Rig Generation from a Single Scan},
author = {Jiaman Li and Zhengfei Kuang and Yajie Zhao and Mingming He and Karl Bladin and Hao Li},
url = {https://dl.acm.org/doi/10.1145/3414685.3417817},
doi = {doi/10.1145/3414685.3417817},
year = {2020},
date = {2020-11-01},
journal = {ACM Transactions on Graphics},
volume = {39},
number = {6},
abstract = {The creation of high-fidelity computer-generated (CG) characters for films and games is tied with intensive manual labor, which involves the creation of comprehensive facial assets that are often captured using complex hardware. To simplify and accelerate this digitization process, we propose a framework for the automatic generation of high-quality dynamic facial models, including rigs which can be readily deployed for artists to polish. Our framework takes a single scan as input to generate a set of personalized blendshapes, dynamic textures, as well as secondary facial components (e.g., teeth and eyeballs). Based on a facial database with over 4, 000 scans with pore-level details, varying expressions and identities, we adopt a self-supervised neural network to learn personalized blendshapes from a set of template expressions. We also model the joint distribution between identities and expressions, enabling the inference of a full set of personalized blendshapes with dynamic appearances from a single neutral input scan. Our generated personalized face rig assets are seamlessly compatible with professional production pipelines for facial animation and rendering. We demonstrate a highly robust and effective framework on a wide range of subjects, and showcase high-fidelity facial animations with automatically generated personalized dynamic textures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reger, Greg M.; Norr, Aaron M.; Rizzo, Albert “Skip”; Sylvers, Patrick; Peltan, Jessica; Fischer, Daniel; Trimmer, Matthew; Porter, Shelan; Gant, Pamela; Baer, John S.
In: JAMA Network Open, vol. 3, no. 10, 2020, ISSN: 2574-3805.
@article{reger_virtual_2020,
title = {Virtual Standardized Patients vs Academic Training for Learning Motivational Interviewing Skills in the US Department of Veterans Affairs and the US Military: A Randomized Trial},
author = {Greg M. Reger and Aaron M. Norr and Albert “Skip” Rizzo and Patrick Sylvers and Jessica Peltan and Daniel Fischer and Matthew Trimmer and Shelan Porter and Pamela Gant and John S. Baer},
url = {https://jamanetwork.com/journals/jamanetworkopen/fullarticle/2771733},
doi = {10.1001/jamanetworkopen.2020.17348},
issn = {2574-3805},
year = {2020},
date = {2020-10-01},
journal = {JAMA Network Open},
volume = {3},
number = {10},
abstract = {OBJECTIVE To evaluate the efficacy of training with a VSP on the acquisition and maintenance of MI skills compared with traditional academic study. DESIGN, SETTING, AND PARTICIPANTS This study was a 2-group, parallel-training randomized trial of 120 volunteer health care professionals recruited from a Department of Veterans Affairs and Department of Defense medical facility. Motivational interviewing skill was coded by external experts blinded to training group and skill assessment time points. Data were collected from October 17, 2016, to August 12, 2019. INTERVENTIONS After a computer course on MI, participants trained during two 45-minute sessions separated by 3 months. The 2 randomized training conditions included a branching storyline VSP, which provided MI skill rehearsal with immediate and summative feedback, and a control condition, which included academic study of content from the computerized MI course. MAIN OUTCOMES AND MEASURES Measurement of MI skill was based on recorded conversations with human standardized patients, assessed using the Motivational Interviewing Treatment Integrity 4.2.1 coding system, measured at baseline, after training, and after additional training in the randomized condition 3 months later. RESULTS A total of 120 volunteers (83 [69%] women), with a mean (SD) of 13.6 (10.3) years of health care experience, participated in the study; 61 were randomized to receive the intervention, and 59 were randomized to the control group. Those assigned to VSP training had significantly greater posttraining improvement in technical global scores (0.23; 95% CI, 0.03-0.44; P = .02), relational global scores (0.57; 95% CI, 0.33-0.81; P = .001), and the reflection-to-question ratio (0.23; 95% CI, 0.15-0.31; P = .001). Differences were maintained after the 3-month additional training session, with more improvements achieved after the 3-month training for the VSP trainees on the reflection-to- question ratio (0.15; 95% CI, 0.07-0.24; P = .001). CONCLUSIONS AND RELEVANCE This randomized trial demonstrated a successful transfer of training from a VSP to human standardized patients. The VSP MI skill outcomes were better than those achieved with academic study and were maintained over time. Virtual standardized patients},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2021
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021-1,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-04-30},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stocco, Andrea; Sibert, Catherine; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains Journal Article
In: NeuroImage, vol. 235, pp. 118035, 2021, ISSN: 10538119.
@article{stocco_analysis_2021,
title = {Analysis of the human connectome data supports the notion of a “Common Model of Cognition” for human and human-like intelligence across domains},
author = {Andrea Stocco and Catherine Sibert and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1053811921003128},
doi = {10.1016/j.neuroimage.2021.118035},
issn = {10538119},
year = {2021},
date = {2021-07-01},
urldate = {2021-05-06},
journal = {NeuroImage},
volume = {235},
pages = {118035},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Horstmann, Aike C.; Gratch, Jonathan; Krämer, Nicole C.
I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person Journal Article
In: International Journal of Human-Computer Studies, pp. 102683, 2021, ISSN: 10715819.
Abstract | Links | BibTeX | Tags:
@article{horstmann_i_2021,
title = {I Just Wanna Blame Somebody, Not Something! Reactions to a Computer Agent Giving Negative Feedback Based on the Instructions of a Person},
author = {Aike C. Horstmann and Jonathan Gratch and Nicole C. Krämer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1071581921001014},
doi = {10.1016/j.ijhcs.2021.102683},
issn = {10715819},
year = {2021},
date = {2021-06-01},
urldate = {2021-06-18},
journal = {International Journal of Human-Computer Studies},
pages = {102683},
abstract = {Previous research focused on differences between interacting with a person-controlled avatar and a computer-controlled virtual agent. This study however examines an aspiring form of technology called agent representative which constitutes a mix of the former two interaction partner types since it is a computer agent which was previously instructed by a person to take over a task on the person’s behalf. In an experimental lab study with a 2 x 3 between-subjects-design (N = 195), people believed to study together either with an agent representative, avatar, or virtual agent. The interaction partner was described to either possess high or low expertise, while always giving negative feedback regarding the participant’s performance. Results show small but interesting differences regarding the type of agency. People attributed the most agency and blame to the person(s) behind the software and reported the most negative affect when interacting with an avatar, which was less the case for a person’s agent representative and the least for a virtual agent. Level of expertise had no significant effect and other evaluation measures were not affected.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
Abstract | Links | BibTeX | Tags:
@article{de_melo_heuristic_2021-1,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data Journal Article
In: Journal on Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: Machine Learning, UARC, Virtual Humans
@article{mell_expert-model_2021,
title = {An expert-model and machine learning hybrid approach to predicting human-agent negotiation outcomes in varied data},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-021-00368-w},
doi = {10.1007/s12193-021-00368-w},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-15},
journal = {Journal on Multimodal User Interfaces},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other techniques (such as linear regression models or boosted decision trees). In a follow-up study, we show that the most successful models change as the dataset size increases and the prediction targets change, and show that boosted decision trees may not be suitable for the negotiation domain. We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation. Further, we show that this approach can be a stepping stone from purely exploratory research to targeted human-behavioral experimentation. Through our approach, areas of social artificial intelligence that have historically benefited from expert knowledge and traditional AI approaches can be combined with more recent proven-effective machine learning algorithms.},
keywords = {Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gramlich, Michael A.; Smolenski, Derek J.; Norr, Aaron M.; Rothbaum, Barbara O.; Rizzo, Albert A.; Andrasik, Frank; Fantelli, Emily; Reger, Greg M.
In: Depression and Anxiety, pp. da.23141, 2021, ISSN: 1091-4269, 1520-6394.
Abstract | Links | BibTeX | Tags: MedVR
@article{gramlich_psychophysiology_2021,
title = {Psychophysiology during exposure to trauma memories: Comparative effects of virtual reality and imaginal exposure for posttraumatic stress disorder},
author = {Michael A. Gramlich and Derek J. Smolenski and Aaron M. Norr and Barbara O. Rothbaum and Albert A. Rizzo and Frank Andrasik and Emily Fantelli and Greg M. Reger},
url = {https://onlinelibrary.wiley.com/doi/10.1002/da.23141},
doi = {10.1002/da.23141},
issn = {1091-4269, 1520-6394},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {Depression and Anxiety},
pages = {da.23141},
abstract = {Background: This investigation involved an in‐depth examination of psychophysiological responses during exposure to the trauma memory across 10 sessions among active duty soldiers with combat‐related posttraumatic stress disorder (PTSD) treated by Prolonged Exposure (PE) or Virtual Reality Exposure (VRE). We compared psychophysiological changes, session‐by‐session, between VRE and traditional imaginal exposure.
Methods: Heart rate (HR), galvanic skin response (GSR), and peripheral skin temperature were collected every 5 min during exposure sessions with 61 combat veterans of Iraq/Afghanistan and compared to the PTSD Checklist (PCL‐C) and Clinician‐Administered PTSD Scale (CAPS) outcomes using multilevel modeling. Results: Over the course of treatment, participants in the PE group had higher HR arousal compared to participants in the VRE group. With reference to GSR, in earlier sessions, participants demonstrated a within‐session increase, whereas, in later sessions, participants showed a within‐session habituation response. A significant interaction was found for GSR and treatment assignment for within‐session change, withinperson effect, predicting CAPS (d = 0.70) and PCL‐C (d = 0.66) outcomes.
Conclusion: Overall, these findings suggest that exposure to traumatic memories activates arousal across sessions, with GSR being most associated with reductions in PTSD symptoms for participants in the PE group.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Methods: Heart rate (HR), galvanic skin response (GSR), and peripheral skin temperature were collected every 5 min during exposure sessions with 61 combat veterans of Iraq/Afghanistan and compared to the PTSD Checklist (PCL‐C) and Clinician‐Administered PTSD Scale (CAPS) outcomes using multilevel modeling. Results: Over the course of treatment, participants in the PE group had higher HR arousal compared to participants in the VRE group. With reference to GSR, in earlier sessions, participants demonstrated a within‐session increase, whereas, in later sessions, participants showed a within‐session habituation response. A significant interaction was found for GSR and treatment assignment for within‐session change, withinperson effect, predicting CAPS (d = 0.70) and PCL‐C (d = 0.66) outcomes.
Conclusion: Overall, these findings suggest that exposure to traumatic memories activates arousal across sessions, with GSR being most associated with reductions in PTSD symptoms for participants in the PE group.
Melo, Celso M.; Gratch, Jonathan; Krueger, Frank
Heuristic thinking and altruism toward machines in people impacted by COVID-19 Journal Article
In: iScience, vol. 24, no. 3, pp. 102228, 2021, ISSN: 25890042.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{de_melo_heuristic_2021,
title = {Heuristic thinking and altruism toward machines in people impacted by COVID-19},
author = {Celso M. Melo and Jonathan Gratch and Frank Krueger},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2589004221001966},
doi = {10.1016/j.isci.2021.102228},
issn = {25890042},
year = {2021},
date = {2021-03-01},
urldate = {2021-04-14},
journal = {iScience},
volume = {24},
number = {3},
pages = {102228},
abstract = {Autonomous machines are poised to become pervasive, but most treat machines differently: we are willing to violate social norms and less likely to display altruism toward machines. Here, we report an unexpected effect that those impacted by COVID-19—as measured by a post-traumatic stress disorder scale—show a sharp reduction in this difference. Participants engaged in the dictator game with humans and machines and, consistent with prior research on disasters, those impacted by COVID-19 displayed more altruism to other humans. Unexpectedly, participants impacted by COVID-19 displayed equal altruism toward human and machine partners. A mediation analysis suggests that altruism toward machines was explained by an increase in heuristic thinking—reinforcing prior theory that heuristic thinking encourages people to treat machines like people—and faith in technology—perhaps reflecting long-term consequences on how we act with machines. These findings give insight, but also raise concerns, for the design of technology.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English Bilingual Chatbot Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 41–50, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Natural Language, UARC, Virtual Humans
@incollection{dharo_masheli_2021,
title = {Masheli: A Choctaw-English Bilingual Chatbot},
author = {Jacqueline Brixey and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_4},
doi = {10.1007/978-981-15-8395-7_4},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {41--50},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to practice. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: D'Haro, Luis Fernando; Callejas, Zoraida; Nakamura, Satoshi (Ed.): Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2021, ISBN: 9789811583940 9789811583957, (Series Title: Lecture Notes in Electrical Engineering).
Abstract | Links | BibTeX | Tags: Dialogue, Natural Language, UARC, Virtual Humans
@incollection{dharo_towards_2021,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
editor = {Luis Fernando D'Haro and Zoraida Callejas and Satoshi Nakamura},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
doi = {10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
note = {Series Title: Lecture Notes in Electrical Engineering},
keywords = {Dialogue, Natural Language, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Lee, Minha; Lucas, Gale; Gratch, Jonathan
Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games Journal Article
In: Journal on Multimodal User Interfaces, 2021, ISSN: 1783-7677, 1783-8738.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lee_comparing_2021,
title = {Comparing mind perception in strategic exchanges: human-agent negotiation, dictator and ultimatum games},
author = {Minha Lee and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/s12193-020-00356-6},
doi = {10.1007/s12193-020-00356-6},
issn = {1783-7677, 1783-8738},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
journal = {Journal on Multimodal User Interfaces},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In a comparative manner, we observed how perceived minds of agents shape people’s behavior in the dictator game, ultimatum game, and negotiation against artificial agents. To do so, we varied agents’ minds on two dimensions of the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude) via descriptions and dialogs. In our first study, agents with emotional capacity garnered more allocations in the dictator game, but in the ultimatum game, agents’ described agency and affective capacity, both led to greater offers. In the second study on negotiation, agents ascribed with low-agency traits earned more points than those with high-agency traits, though the negotiation tactic was the same for all agents. Although patiency did not impact game points, participants sent more happy and surprise emojis and emotionally valenced messages to agents that demonstrated emotional capacity during negotiations. Further, our exploratory analyses indicate that people related only to agents with perceived affective aptitude across all games. Both perceived agency and affective capacity contributed to moral standing after dictator and ultimatum games. But after negotiations, only agents with perceived affective capacity were granted moral standing. Manipulating mind dimensions of machines has differing effects on how people react to them in dictator and ultimatum games, compared to a more complex economic exchange like negotiation. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations, in contrast with simple economic games.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
The Promise and Peril of Automated Negotiators Journal Article
In: Negotiation Journal, vol. 37, no. 1, pp. 13–34, 2021, ISSN: 0748-4526, 1571-9979.
Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{gratch_promise_2021,
title = {The Promise and Peril of Automated Negotiators},
author = {Jonathan Gratch},
url = {https://onlinelibrary.wiley.com/doi/10.1111/nejo.12348},
doi = {10.1111/nejo.12348},
issn = {0748-4526, 1571-9979},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Negotiation Journal},
volume = {37},
number = {1},
pages = {13--34},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
Risk of Injury in Moral Dilemmas With Autonomous Vehicles Journal Article
In: Frontiers in Robotics and AI, vol. 7, pp. 572529, 2021, ISSN: 2296-9144.
Abstract | Links | BibTeX | Tags: Autonomous Vehicles, UARC, Virtual Humans
@article{de_melo_risk_2021,
title = {Risk of Injury in Moral Dilemmas With Autonomous Vehicles},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {https://www.frontiersin.org/articles/10.3389/frobt.2020.572529/full},
doi = {10.3389/frobt.2020.572529},
issn = {2296-9144},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-14},
journal = {Frontiers in Robotics and AI},
volume = {7},
pages = {572529},
abstract = {As autonomous machines, such as automated vehicles (AVs) and robots, become pervasive in society, they will inevitably face moral dilemmas where they must make decisions that risk injuring humans. However, prior research has framed these dilemmas in starkly simple terms, i.e., framing decisions as life and death and neglecting the influence of risk of injury to the involved parties on the outcome. Here, we focus on this gap and present experimental work that systematically studies the effect of risk of injury on the decisions people make in these dilemmas. In four experiments, participants were asked to program their AVs to either save five pedestrians, which we refer to as the utilitarian choice, or save the driver, which we refer to as the nonutilitarian choice. The results indicate that most participants made the utilitarian choice but that this choice was moderated in important ways by perceived risk to the driver and risk to the pedestrians. As a second contribution, we demonstrate the value of formulating AV moral dilemmas in a game-theoretic framework that considers the possible influence of others’ behavior. In the fourth experiment, we show that participants were more (less) likely to make the utilitarian choice, the more utilitarian (nonutilitarian) other drivers behaved; furthermore, unlike the game-theoretic prediction that decision-makers inevitably converge to nonutilitarianism, we found significant evidence of utilitarianism. We discuss theoretical implications for our understanding of human decision-making in moral dilemmas and practical guidelines for the design of autonomous machines that solve these dilemmas while, at the same time, being likely to be adopted in practice.},
keywords = {Autonomous Vehicles, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Femminella, Brian; Hartholt, Arno; Rizzo, Skip
User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP) Journal Article
In: pp. 10, 2021.
Abstract | Links | BibTeX | Tags: ARL, MedVR
@article{mozgai_user-centered_2021,
title = {User-Centered Design Model for Mobile Health (mHealth) Applications: A Military Case Study in Rapid Assessment Process (RAP)},
author = {Sharon Mozgai and Brian Femminella and Arno Hartholt and Skip Rizzo},
url = {https://uploads-ssl.webflow.com/5f11f7e80d5a3b6dfdeeb614/5f9b3284d3d73e1da6a8f848_CHI_2021_Battle%20Buddy.pdf},
year = {2021},
date = {2021-01-01},
pages = {10},
abstract = {CCS Concepts: • Human-centered computing → Ubiquitous and mobile computing design and evaluation methods; HCI design and evaluation methods; User centered design; • Applied computing → Military; • Computing methodologies → Intelligent agents.},
keywords = {ARL, MedVR},
pubstate = {published},
tppubtype = {article}
}
Kawano, Seiya; Yoshino, Koichiro; Traum, David; Nakamura, Satoshi
Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning Inproceedings
In: 1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction, pp. 21–29, ISCA, 2021.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, Natural Language, Virtual Humans
@inproceedings{kawano_dialogue_2021,
title = {Dialogue Structure Parsing on Multi-Floor Dialogue Based on Multi-Task Learning},
author = {Seiya Kawano and Koichiro Yoshino and David Traum and Satoshi Nakamura},
url = {http://www.isca-speech.org/archive/RobotDial_2021/abstracts/4.html},
doi = {10.21437/RobotDial.2021-4},
year = {2021},
date = {2021-01-01},
urldate = {2021-04-15},
booktitle = {1st RobotDial Workshop on Dialogue Models for Human-Robot Interaction},
pages = {21--29},
publisher = {ISCA},
abstract = {A multi-floor dialogue consists of multiple sets of dialogue participants, each conversing within their own floor, but also at least one multicommunicating member who is a participant of multiple floors and coordinating each to achieve a shared dialogue goal. The structure of such dialogues can be complex, involving intentional structure and relations that are within or across floors. In this study, we propose a neural dialogue structure parser based on multi-task learning and an attention mechanism on multi-floor dialogues in a collaborative robot navigation domain. Our experimental results show that our proposed model improved the dialogue structure parsing performance more than those of single models, which are trained on each dialogue structure parsing task in multi-floor dialogues.},
keywords = {ARL, Dialogue, Natural Language, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
The field of Affective Computing: An interdisciplinary perspective Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 36, no. 1, pp. 13, 2021.
Links | BibTeX | Tags: Virtual Humans
@article{gratch_field_2021,
title = {The field of Affective Computing: An interdisciplinary perspective},
author = {Jonathan Gratch},
url = {https://people.ict.usc.edu/~gratch/CSCI534/Readings/Gratch%20-%20The%20field%20of%20affective%20computing.pdf},
year = {2021},
date = {2021-01-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {36},
number = {1},
pages = {13},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2020
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
Abstract | Links | BibTeX | Tags: Narrative, STG, UARC
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {Narrative, STG, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Jiaman; Kuang, Zhengfei; Zhao, Yajie; He, Mingming; Bladin, Karl; Li, Hao
Dynamic Facial Asset and Rig Generation from a Single Scan Journal Article
In: ACM Transactions on Graphics, vol. 39, no. 6, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@article{li_dynamic_2020,
title = {Dynamic Facial Asset and Rig Generation from a Single Scan},
author = {Jiaman Li and Zhengfei Kuang and Yajie Zhao and Mingming He and Karl Bladin and Hao Li},
url = {https://dl.acm.org/doi/10.1145/3414685.3417817},
doi = {doi/10.1145/3414685.3417817},
year = {2020},
date = {2020-11-01},
journal = {ACM Transactions on Graphics},
volume = {39},
number = {6},
abstract = {The creation of high-fidelity computer-generated (CG) characters for films and games is tied with intensive manual labor, which involves the creation of comprehensive facial assets that are often captured using complex hardware. To simplify and accelerate this digitization process, we propose a framework for the automatic generation of high-quality dynamic facial models, including rigs which can be readily deployed for artists to polish. Our framework takes a single scan as input to generate a set of personalized blendshapes, dynamic textures, as well as secondary facial components (e.g., teeth and eyeballs). Based on a facial database with over 4, 000 scans with pore-level details, varying expressions and identities, we adopt a self-supervised neural network to learn personalized blendshapes from a set of template expressions. We also model the joint distribution between identities and expressions, enabling the inference of a full set of personalized blendshapes with dynamic appearances from a single neutral input scan. Our generated personalized face rig assets are seamlessly compatible with professional production pipelines for facial animation and rendering. We demonstrate a highly robust and effective framework on a wide range of subjects, and showcase high-fidelity facial animations with automatically generated personalized dynamic textures.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {article}
}
Reger, Greg M.; Norr, Aaron M.; Rizzo, Albert “Skip”; Sylvers, Patrick; Peltan, Jessica; Fischer, Daniel; Trimmer, Matthew; Porter, Shelan; Gant, Pamela; Baer, John S.
In: JAMA Network Open, vol. 3, no. 10, 2020, ISSN: 2574-3805.
Abstract | Links | BibTeX | Tags: MedVR
@article{reger_virtual_2020,
title = {Virtual Standardized Patients vs Academic Training for Learning Motivational Interviewing Skills in the US Department of Veterans Affairs and the US Military: A Randomized Trial},
author = {Greg M. Reger and Aaron M. Norr and Albert “Skip” Rizzo and Patrick Sylvers and Jessica Peltan and Daniel Fischer and Matthew Trimmer and Shelan Porter and Pamela Gant and John S. Baer},
url = {https://jamanetwork.com/journals/jamanetworkopen/fullarticle/2771733},
doi = {10.1001/jamanetworkopen.2020.17348},
issn = {2574-3805},
year = {2020},
date = {2020-10-01},
journal = {JAMA Network Open},
volume = {3},
number = {10},
abstract = {OBJECTIVE To evaluate the efficacy of training with a VSP on the acquisition and maintenance of MI skills compared with traditional academic study. DESIGN, SETTING, AND PARTICIPANTS This study was a 2-group, parallel-training randomized trial of 120 volunteer health care professionals recruited from a Department of Veterans Affairs and Department of Defense medical facility. Motivational interviewing skill was coded by external experts blinded to training group and skill assessment time points. Data were collected from October 17, 2016, to August 12, 2019. INTERVENTIONS After a computer course on MI, participants trained during two 45-minute sessions separated by 3 months. The 2 randomized training conditions included a branching storyline VSP, which provided MI skill rehearsal with immediate and summative feedback, and a control condition, which included academic study of content from the computerized MI course. MAIN OUTCOMES AND MEASURES Measurement of MI skill was based on recorded conversations with human standardized patients, assessed using the Motivational Interviewing Treatment Integrity 4.2.1 coding system, measured at baseline, after training, and after additional training in the randomized condition 3 months later. RESULTS A total of 120 volunteers (83 [69%] women), with a mean (SD) of 13.6 (10.3) years of health care experience, participated in the study; 61 were randomized to receive the intervention, and 59 were randomized to the control group. Those assigned to VSP training had significantly greater posttraining improvement in technical global scores (0.23; 95% CI, 0.03-0.44; P = .02), relational global scores (0.57; 95% CI, 0.33-0.81; P = .001), and the reflection-to-question ratio (0.23; 95% CI, 0.15-0.31; P = .001). Differences were maintained after the 3-month additional training session, with more improvements achieved after the 3-month training for the VSP trainees on the reflection-to- question ratio (0.15; 95% CI, 0.07-0.24; P = .001). CONCLUSIONS AND RELEVANCE This randomized trial demonstrated a successful transfer of training from a VSP to human standardized patients. The VSP MI skill outcomes were better than those achieved with academic study and were maintained over time. Virtual standardized patients},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Reilly, Adam; Fast, Ed; Mozgai, Sharon
Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–3, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hartholt_introducing_2020,
title = {Introducing Canvas: Combining Nonverbal Behavior Generation with User-Generated Content to Rapidly Create Educational Videos},
author = {Arno Hartholt and Adam Reilly and Ed Fast and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3383652.3423880},
doi = {10.1145/3383652.3423880},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Rapidly creating educational content that is effective, engaging, and low-cost is a challenge. We present Canvas, a tool for educators that addresses this challenge by enabling the generation of educational video, led by an intelligent virtual agent, that combines rapid nonverbal behavior generation techniques with end-user facing authoring tools. With Canvas, educators can easily produce compelling educational videos with a minimum of investment by leveraging existing content provided by the tool (e.g., characters and environments) while incorporating their own custom content (e.g., images and video clips). Canvas has been delivered to the Smithsonian Science Education Center and is currently being evaluated internally before wider release. We discuss the system, feature set, design process, and lessons learned.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale M.; Gratch, Jonathan
Varied Magnitude Favor Exchange in Human-Agent Negotiation Inproceedings
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Virtual Event Scotland UK, 2020, ISBN: 978-1-4503-7586-3.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{mell_varied_2020,
title = {Varied Magnitude Favor Exchange in Human-Agent Negotiation},
author = {Johnathan Mell and Gale M. Lucas and Jonathan Gratch},
url = {https://dl.acm.org/doi/10.1145/3383652.3423866},
doi = {10.1145/3383652.3423866},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Virtual Event Scotland UK},
abstract = {Agents that interact with humans in complex, social tasks need the ability to comprehend as well as employ common social strategies. In negotiation, there is ample evidence of such techniques being used efficaciously in human interchanges. In this work, we demonstrate a new design for socially-aware agents that employ one such technique—favor exchange—in order to gain value when playing against humans. In an online study of a robust, simulated social negotiation task, we show that these agents are effective against real human participants. In particular, we show that agents that ask for favors during the course of a repeated set of negotiations are more successful than those that do not. Additionally, previous work has demonstrated that humans can detect when agents betray them by failing to return favors that were previously promised. By contrast, this work indicates that these betrayal techniques may go largely undetected in complex scenarios.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Incollection
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41--50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Yeh, Shih-Ching; Lin, Sheng-Yang; Wu, Eric Hsiao-Kuang; Zhang, Kai-Feng; Xiu, Xu; Rizzo, Albert; Chung, Chia-Ru
A Virtual-Reality System Integrated With Neuro-Behavior Sensing for Attention-Deficit/Hyperactivity Disorder Intelligent Assessment Journal Article
In: IEEE Transactions on Neural Systems and Rehabilitation Engineering, vol. 28, no. 9, pp. 1899–1907, 2020, ISSN: 1534-4320, 1558-0210.
Abstract | Links | BibTeX | Tags: MedVR
@article{yeh_virtual-reality_2020,
title = {A Virtual-Reality System Integrated With Neuro-Behavior Sensing for Attention-Deficit/Hyperactivity Disorder Intelligent Assessment},
author = {Shih-Ching Yeh and Sheng-Yang Lin and Eric Hsiao-Kuang Wu and Kai-Feng Zhang and Xu Xiu and Albert Rizzo and Chia-Ru Chung},
url = {https://ieeexplore.ieee.org/document/9123917/},
doi = {10.1109/TNSRE.2020.3004545},
issn = {1534-4320, 1558-0210},
year = {2020},
date = {2020-09-01},
journal = {IEEE Transactions on Neural Systems and Rehabilitation Engineering},
volume = {28},
number = {9},
pages = {1899--1907},
abstract = {Attention-deficit/Hyperactivity disorder (ADHD) is a common neurodevelopmental disorder among children. Traditional assessment methods generally rely on behavioral rating scales (BRS) performed by clinicians, and sometimes parents or teachers. However, BRS assessment is time consuming, and the subjective ratings may lead to bias for the evaluation. Therefore, the major purpose of this study was to develop a Virtual Reality (VR) classroom associated with an intelligent assessment model to assist clinicians for the diagnosis of ADHD. In this study, an immersive VR classroom embedded with sustained and selective attention tasks was developed in which visual, audio, and visual-audio hybrid distractions, were triggered while attention tasks were conducted. A clinical experiment with 37 ADHD and 31 healthy subjects was performed. Data from BRS was compared with VR task performance and analyzed by rank-sum tests and Pearson Correlation. Results showed that 23 features out of total 28 were related to distinguish the ADHD and non-ADHD children. Several features of task performance and neuro-behavioral measurements were also correlated with features of the BRSs. Additionally, the machine learning models incorporating task performance and neuro-behavior were used to classify ADHD and non-ADHD children. The mean accuracy for the repeated cross-validation reached to 83.2%, which demonstrated a great potential for our system to provide more help for clinicians on assessment of ADHD.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Miller, Rob; Morgenstern, Leora; Turán, György
Preface Journal Article
In: Annals of Mathematics and Artificial Intelligence, 2020, ISSN: 1012-2443, 1573-7470.
Abstract | Links | BibTeX | Tags: Narrative
@article{gordon_preface_2020,
title = {Preface},
author = {Andrew S. Gordon and Rob Miller and Leora Morgenstern and György Turán},
url = {http://link.springer.com/10.1007/s10472-020-09711-5},
doi = {10.1007/s10472-020-09711-5},
issn = {1012-2443, 1573-7470},
year = {2020},
date = {2020-09-01},
journal = {Annals of Mathematics and Artificial Intelligence},
abstract = {A few years after the 1956 Dartmouth Summer Workshop [1, 2], which first established artificial intelligence as a field of research, John McCarthy [3] discussed the importance of explicitly representing and reasoning with commonsense knowledge to the enterprise of creating artificially intelligent robots and agents. McCarthy proposed that commonsense knowledge was best represented using formal logic, which he viewed as a uniquely powerful lingua franca that could be used to express and reason with virtually any sort of information that humans might reason with when problem solving, a stance he further developed and propounded in [4, 5]. This approach, the formalist or logic-based approach to commonsense reasoning, was practiced by an increasing set of adherents over the next several decades [6, 7], and continues to be represented by the Commonsense Symposium Series, first held in 1991 [8] and held biennially, for the most part, after that.},
keywords = {Narrative},
pubstate = {published},
tppubtype = {article}
}
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.
Building preparedness in response to active shooter incidents: Results of focus group interviews Journal Article
In: International Journal of Disaster Risk Reduction, vol. 48, pp. 101617, 2020, ISSN: 22124209.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@article{zhu_building_2020,
title = {Building preparedness in response to active shooter incidents: Results of focus group interviews},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers},
url = {https://linkinghub.elsevier.com/retrieve/pii/S221242091931427X},
doi = {10.1016/j.ijdrr.2020.101617},
issn = {22124209},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Disaster Risk Reduction},
volume = {48},
pages = {101617},
abstract = {Active shooter incidents present an increasing threat to the American society. Many of these incidents occur in building environments, therefore, it is important to consider design and security elements in buildings to decrease the risk of active shooter incidents. This study aims to assess current security countermeasures and identify varying considerations associated with implementing these countermeasures. Fifteen participants, with expertise and experience in a diverse collection of operational and organizational backgrounds, including se curity, engineering, law enforcement, emergency management and policy making, participated in three focus group interviews. The participants identified a list of countermeasures that have been used for active shooter incidents. Important determinants for the effectiveness of countermeasures include their influence on occupants’ behavior during active shooter incidents, and occupants’ and administrators’ awareness of how to use them effectively. The nature of incidents (e.g., internal vs. external threats), building type (e.g., office buildings vs. school buildings), and occupants (e.g., students of different ages) were also recognized to affect the selection of appropriate countermeasures. The nexus between emergency preparedness and normal operations, and the importance of tradeoffs such as the ones between cost, aesthetics, maintenance needs and the influence on oc cupants’ daily activities were also discussed. To ensure the effectiveness of countermeasures and improve safety, the participants highlighted the importance of both training and practice, for occupants and administrators (e.g., first responder teams). The interview results suggested that further study of the relationship between security countermeasures and occupants’ and administrators’ responses, as well as efficient training approaches are needed.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315--332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Incollection
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145--160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Luu, Cindy; Talbot, Thomas B.; Fung, Cha Chi; Ben-Isaac, Eyal; Espinoza, Juan; Fischer, Susan; Cho, Christine S.; Sargsyan, Mariam; Korand, Sridevi; Chang, Todd P.
In: Simulation & Gaming, vol. 51, no. 4, pp. 550–570, 2020, ISSN: 1046-8781, 1552-826X.
Abstract | Links | BibTeX | Tags: MedVR
@article{luu_development_2020,
title = {Development and Performance Assessment of a Digital Serious Game to Assess Multi-Patient Care Skills in a Simulated Pediatric Emergency Department},
author = {Cindy Luu and Thomas B. Talbot and Cha Chi Fung and Eyal Ben-Isaac and Juan Espinoza and Susan Fischer and Christine S. Cho and Mariam Sargsyan and Sridevi Korand and Todd P. Chang},
url = {http://journals.sagepub.com/doi/10.1177/1046878120904984},
doi = {10.1177/1046878120904984},
issn = {1046-8781, 1552-826X},
year = {2020},
date = {2020-08-01},
journal = {Simulation & Gaming},
volume = {51},
number = {4},
pages = {550--570},
abstract = {Objective. Multi-patient care is important among medical trainees in an emergency department (ED). While resident efficiency is a typically measured metric, multi-patient care involves both efficiency and diagnostic / treatment accuracy. Multi-patient care ability is difficult to assess, though simulation is a potential alternative. Our objective was to generate validity evidence for a serious game in assessing multi-patient care skills among a variety of learners.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Rakofsky, Jeffrey J.; Talbot, Thomas B.; Dunlop, Boadie W.
A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency Journal Article
In: Academic Psychiatry, 2020, ISSN: 1042-9670, 1545-7230.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{rakofsky_virtual_2020,
title = {A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency},
author = {Jeffrey J. Rakofsky and Thomas B. Talbot and Boadie W. Dunlop},
url = {http://link.springer.com/10.1007/s40596-020-01286-x},
doi = {10.1007/s40596-020-01286-x},
issn = {1042-9670, 1545-7230},
year = {2020},
date = {2020-07-01},
journal = {Academic Psychiatry},
abstract = {Objectives A virtual standardized patient-based assessment simulator was developed to address biases and practical limitations in existing methods for evaluating residents’ proficiency in psychopharmacological knowledge and practice. Methods The simulator was designed to replicate an outpatient psychiatric clinic experience. The virtual patient reported symptoms of a treatment-resistant form of major depressive disorder (MDD), requiring the learner to use various antidepressants in order for the patient to fully remit. Test scores were based on the proportion of correct responses to questions asked by the virtual patient about possible side effects, dosing, and titration decisions, which depended upon the patient’s tolerability and response to the learner’s selected medications. The validation paradigm included a novice-expert performance comparison across 4th year medical students, psychiatric residents from all four post-graduate year classes, and psychiatry department faculty, and a correlational analysis of simulator performance with the PRITE Somatic Treatments subscale score. Post-test surveys evaluated the test takers’ subjective impressions of the simulator. Results Forty-three subjects completed the online exam and survey. Total mean scores on the exam differed significantly across all the learner groups in a step-wise manner from students to faculty (F = 6.10},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Malta, Loretta S.; Giosan, Cezar; Szkodny, Lauren E.; Altemus, Margaret M.; Rizzo, Albert A.; Silbersweig, David A.; Difede, JoAnn
Development of a virtual reality laboratory stressor Journal Article
In: Virtual Reality, 2020, ISSN: 1359-4338, 1434-9957.
Abstract | Links | BibTeX | Tags: MedVR
@article{malta_development_2020,
title = {Development of a virtual reality laboratory stressor},
author = {Loretta S. Malta and Cezar Giosan and Lauren E. Szkodny and Margaret M. Altemus and Albert A. Rizzo and David A. Silbersweig and JoAnn Difede},
url = {http://link.springer.com/10.1007/s10055-020-00455-5},
doi = {10.1007/s10055-020-00455-5},
issn = {1359-4338, 1434-9957},
year = {2020},
date = {2020-07-01},
journal = {Virtual Reality},
abstract = {This research report describes the development of a virtual reality (VR) laboratory stressor to study the effects of exposure to stressful events. The aim of the research was to develop a VR simulation that would evoke stressor responses at a level that was tolerable for participants. Veterans with and without warzone-related posttraumatic stress disorder (PTSD) were presented with VR simulations of combat stressors. There was one complaint of feeling hot during simulations but no incidents of simulator sickness. Participants denied experiencing the simulations as overly distressing, and there were no reports of any distress or problems related to study participation when they were contacted two weeks after the VR challenge. Simulations elicited moderate levels of anxiety and mild levels of dissociation that were significantly greater in Veterans with PTSD. Simulations were less successful in eliciting differential heart rate reactivity and stress hormone secretion, though history of civilian trauma exposure was associated with elevated heart rates during the second simulation. The study demonstrated that the VR paradigm was feasible and tolerable and that it holds promise as a new method with which to conduct controlled laboratory research on the effects of exposure to stressful events.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Davis, Dan M.; Rizvi, Sanad Z.; Carr, Kayla; Swartout, William; Thacker, Raj; Shaw, Kenneth
Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors Journal Article
In: Journal of Research on Technology in Education, pp. 1–23, 2020, ISSN: 1539-1523, 1945-0818.
Abstract | Links | BibTeX | Tags: Learning Sciences, Virtual Humans
@article{nye_feasibility_2020,
title = {Feasibility and usability of MentorPal, a framework for rapid development of virtual mentors},
author = {Benjamin D. Nye and Dan M. Davis and Sanad Z. Rizvi and Kayla Carr and William Swartout and Raj Thacker and Kenneth Shaw},
url = {https://www.tandfonline.com/doi/full/10.1080/15391523.2020.1771640},
doi = {10.1080/15391523.2020.1771640},
issn = {1539-1523, 1945-0818},
year = {2020},
date = {2020-07-01},
journal = {Journal of Research on Technology in Education},
pages = {1--23},
abstract = {One-on-one mentoring is an effective method to help novices with career development. However, traditional mentoring scales poorly. To address this problem, MentorPal emulates conversations with a panel of virtual mentors based on recordings of real STEM professionals. Students freely ask questions as they might in a career fair, while machine learning algorithms attempt to provide the best answers. MentorPal has developed strategies for the rapid development of new virtual mentors, where training data will be sparse. In a usability study, 31 high school students self-reported a) increased career knowledge and confidence, b) positive ease-of-use, and that c) mentors were helpful (87%) but often did not cover their preferred career (29%). Results demonstrate the feasibility of scalable virtual mentoring, but efficacy studies are needed to evaluate the impact of virtual mentors, particularly for groups with limited STEM opportunities.},
keywords = {Learning Sciences, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Thiry, Katherine Elizabeth; Jenkins, Michael
In: Advances in Usability, User Experience, Wearable and Assistive Technology, vol. 1217, pp. 129–135, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-51827-1 978-3-030-51828-8.
Abstract | Links | BibTeX | Tags: MedVR
@incollection{talbot_storyboarding_2020,
title = {Storyboarding the Virtuality: Methods and Best Practices to Depict Scenes and Interactive Stories in Virtual and Mixed Reality},
author = {Thomas Brett Talbot and Katherine Elizabeth Thiry and Michael Jenkins},
url = {http://link.springer.com/10.1007/978-3-030-51828-8_17},
doi = {10.1007/978-3-030-51828-8_17},
isbn = {978-3-030-51827-1 978-3-030-51828-8},
year = {2020},
date = {2020-07-01},
booktitle = {Advances in Usability, User Experience, Wearable and Assistive Technology},
volume = {1217},
pages = {129--135},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Storyboarding is a cinematic prototyping technique to visualize settings, event sequences, dialogues & character depictions. Interactive VR/MR experiences benefit from storyboarding as part of the creation process, yet free movement & immersive 3D introduce challenges. Techniques to visualize 3D settings are explored with methods to conduct traditional storyboarding while requiring multiple viewpoints within a single timestep are elaborated. This is possible w/ perspective scene views. Even with 3D prototyping tools, it is important to maintain practices which optimize VR storyboarding and maintain spatial efficiency, allow storyboards to be hand drawn and be intuitive to read. A powerful solution is to bind several perspectives together to represent a specific time while reverting to a traditional single viewpoint when not necessary, therefore balancing three dimensionality, spatial efficiency & ease of creation.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {incollection}
}
Mozgai, Sharon; Hartholt, Arno; Akinyemi, Dayo; Kubicek, Katarina; Rizzo, Albert (Skip); Kipke, Michele
In: HCI International 2020 - Posters, vol. 1225, pp. 304–307, Springer International Publishing, Cham, Switzerland, 2020, ISBN: 978-3-030-50728-2 978-3-030-50729-9.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{mozgai_development_2020,
title = {Development and Initial Feasibility Testing of the Virtual Research Navigator (VRN): A Public-Facing Agent-Based Educational System for Clinical Research Participation},
author = {Sharon Mozgai and Arno Hartholt and Dayo Akinyemi and Katarina Kubicek and Albert (Skip) Rizzo and Michele Kipke},
url = {http://link.springer.com/10.1007/978-3-030-50729-9_43},
doi = {10.1007/978-3-030-50729-9_43},
isbn = {978-3-030-50728-2 978-3-030-50729-9},
year = {2020},
date = {2020-07-01},
booktitle = {HCI International 2020 - Posters},
volume = {1225},
pages = {304--307},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {The overall goal of VRN is to develop a novel technology solution at Children’s Hospital Los Angeles (CHLA) to overcome barriers that prevent the recruitment of diverse patient populations to clinical trials by providing both caregivers and children with an interactive educational experience. This system consists of 1) an intelligent agent called Zippy that users interact with by keyboard or voice input, 2) a series of videos covering topics including Privacy, Consent and Benefits, and 3) a UI that guides users through all available content. Pre- and post-questionnaires assessed willingness to participate in clinical research and found participants either increased or maintained their level of willingness to participate in research studies. Additionally, qualitative analysis of interview data revealed participants rated the overall interaction favorably and believed Zippy to be more fun, less judgmental and less threatening than interacting with a human. Future iterations are in-progress based on the user-feedback},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Li, Ruilong; Xiu, Yuliang; Saito, Shunsuke; Huang, Zeng; Olszewski, Kyle; Li, Hao
Monocular Real-Time Volumetric Performance Capture Journal Article
In: ResearchGate, pp. 30, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{li_monocular_2020,
title = {Monocular Real-Time Volumetric Performance Capture},
author = {Ruilong Li and Yuliang Xiu and Shunsuke Saito and Zeng Huang and Kyle Olszewski and Hao Li},
url = {https://www.researchgate.net/publication/343279742_Monocular_Real-Time_Volumetric_Performance_Capture},
year = {2020},
date = {2020-07-01},
journal = {ResearchGate},
pages = {30},
abstract = {We present the first approach to volumetric performance capture and novel-view rendering at real-time speed from monocular video, eliminating the need for expensive multi-view systems or cumbersome pre-acquisition of a personalized template model. Our system reconstructs a fully textured 3D human from each frame by leveraging Pixel-Aligned Implicit Function (PIFu). While PIFu achieves high-resolution reconstruction in a memory-efficient manner, its computationally expensive inference prevents us from deploying such a system for real-time applications. To this end, we propose a novel hierarchical surface localization algorithm and a direct rendering method without explicitly extracting surface meshes. By culling unnecessary regions for evaluation in a coarse-to-fine manner, we successfully accelerate the reconstruction by two orders of magnitude from the baseline without compromising the quality. Furthermore, we introduce an Online Hard Example Mining (OHEM) technique that effectively suppresses failure modes due to the rare occurrence of challenging examples. We adaptively update the sampling probability of the training data based on the current reconstruction accuracy, which effectively alleviates reconstruction artifacts. Our experiments and evaluations demonstrate the robustness of our system to various challenging angles, illuminations, poses, and clothing styles. We also show that our approach compares favorably with the state-of-the-art monocular performance capture. Our proposed approach removes the need for multi-view studio settings and enables a consumer-accessible solution for volumetric capture.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Czyzewski, Adam; Dalton, Jeffrey; Leuski, Anton
Agent Dialogue: A Platform for Conversational Information Seeking Experimentation Inproceedings
In: Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 2121–2124, ACM, Virtual Event China, 2020, ISBN: 978-1-4503-8016-4.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{czyzewski_agent_2020,
title = {Agent Dialogue: A Platform for Conversational Information Seeking Experimentation},
author = {Adam Czyzewski and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3397271.3401397},
doi = {10.1145/3397271.3401397},
isbn = {978-1-4503-8016-4},
year = {2020},
date = {2020-07-01},
booktitle = {Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {2121--2124},
publisher = {ACM},
address = {Virtual Event China},
abstract = {Conversational Information Seeking (CIS) is an emerging area of Information Retrieval focused on interactive search systems. As a result there is a need for new benchmark datasets and tools to enable their creation. In this demo we present the Agent Dialogue (AD) platform, an open-source system developed for researchers to perform Wizard-of-Oz CIS experiments. AD is a scalable cloud-native platform developed with Docker and Kubernetes with a flexible and modular micro-service architecture built on production-grade stateof-the-art open-source tools (Kubernetes, gRPC streaming, React, and Firebase). It supports varied front-ends and has the ability to interface with multiple existing agent systems, including Google Assistant and open-source search libraries. It includes support for centralized structure logging as well as offline relevance annotation.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
Abstract | Links | BibTeX | Tags: ARO-Coop, UARC, Virtual Humans
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {ARO-Coop, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hilty, Donald M.; Randhawa, Karan; Maheu, Marlene M.; McKean, Alastair J. S.; Pantera, Richard; Mishkind, Matthew C.; Rizzo, Albert “Skip”
A Review of Telepresence, Virtual Reality, and Augmented Reality Applied to Clinical Care Journal Article
In: Journal of Technology in Behavioral Science, vol. 5, no. 2, pp. 178–205, 2020, ISSN: 2366-5963.
Abstract | Links | BibTeX | Tags: MedVR
@article{hilty_review_2020,
title = {A Review of Telepresence, Virtual Reality, and Augmented Reality Applied to Clinical Care},
author = {Donald M. Hilty and Karan Randhawa and Marlene M. Maheu and Alastair J. S. McKean and Richard Pantera and Matthew C. Mishkind and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/s41347-020-00126-x},
doi = {10.1007/s41347-020-00126-x},
issn = {2366-5963},
year = {2020},
date = {2020-06-01},
journal = {Journal of Technology in Behavioral Science},
volume = {5},
number = {2},
pages = {178--205},
abstract = {This scoping review article explores the application of telepresence (TPr), virtual reality (VR), and augmented reality (AR) to clinical care. A literature search of key words was conducted from January 1990 through May 2019 of the following databases: PubMed/ Medline, American Psychological Association PsycNET, Pubmed/Medline, Cochrane, Embase, PsycINFO, Web of Science, Scopus, OTSeeker, ABI/INFORM, computer-mediated communication (CMC), technology-mediated communications, Arts & Humanities Citation Index, Project Muse, ProQuest Research Library Plus, Sociological abstracts, Computers and Applied Sciences Complete and IT Source. It focused on concept areas: (1) TPr related to technologies; (2) virtual, augmented, reality, environment; (3) technology or computer-mediated communication; (4) clinical therapeutic relationship (boundaries, care, communication, connect, engagement, empathy, intimacy, trust); (5) telebehavioral health; (6) psychotherapy via technology; and (7) medicine/health care. Inclusion criteria were concept area 1 in combination with 2–7 and 2 or 3 in combination with any of 4–7. From a total of 5214 potential references, the authors found 512 eligible for full-text review and found 85 papers directly relevant to the concepts. From papers’ references and a review of books and popular literature about TPr, virtual reality (VR), and augmented reality (AR), 13 other sources of information were found. The historical evolution of TPr, VR, and AR shows that definitions, foci of studies (e.g., social neuroscience to business), and applications vary; assessments of TPr also vary widely. Studies discuss VR, AR, and TPr in medicine (e.g., rehabilitation, robotics), experimental psychology (laboratory, field, mixed), and behavioral health. Virtual environment (VE) designs aid the study of interpersonal communication and behavior, using standardized social interaction partners, virtual standardized patients, and/or virtual humans—all contingent on the participants’ experience of presence and the ability to engage. Additional research is needed to standardize experimental and clinical interventions, while maintaining ecological validity. Technology can significantly improve quality of care, access to new treatments and training, if ethical and reimbursement issues are better explored.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Saxon, Leslie; DiPaula, Brooks; Fox, Glenn R; Ebert, Rebecca; Duhaime, Josiah; Nocera, Luciano; Tran, Luan; Sobhani, Mona
Continuous Measurement of Reconnaissance Marines in Training With Custom Smartphone App and Watch: Observational Cohort Study Journal Article
In: JMIR mHealth and uHealth, vol. 8, no. 6, pp. e14116, 2020, ISSN: 2291-5222.
Abstract | Links | BibTeX | Tags: CBC
@article{saxon_continuous_2020,
title = {Continuous Measurement of Reconnaissance Marines in Training With Custom Smartphone App and Watch: Observational Cohort Study},
author = {Leslie Saxon and Brooks DiPaula and Glenn R Fox and Rebecca Ebert and Josiah Duhaime and Luciano Nocera and Luan Tran and Mona Sobhani},
url = {https://mhealth.jmir.org/2020/6/e14116},
doi = {10.2196/14116},
issn = {2291-5222},
year = {2020},
date = {2020-06-01},
journal = {JMIR mHealth and uHealth},
volume = {8},
number = {6},
pages = {e14116},
abstract = {Background: Specialized training for elite US military units is associated with high attrition due to intense psychological and physical demands. The need to graduate more service members without degrading performance standards necessitates the identification of factors to predict success or failure in targeted training interventions. Objective: The aim of this study was to continuously quantify the mental and physical status of trainees of an elite military unit to identify novel predictors of success in training. Methods: A total of 3 consecutive classes of a specialized training course were provided with an Apple iPhone, Watch, and specially designed mobile app. Baseline personality assessments and continuous daily measures of mental status, physical pain, heart rate, activity, sleep, hydration, and nutrition were collected from the app and Watch data. Results: A total of 115 trainees enrolled and completed the study (100% male; age: mean 22 years, SD 4 years) and 64 (55.7%) successfully graduated. Most training withdrawals (27/115, 23.5%) occurred by day 7 (mean 5.5 days, SD 3.4 days; range 1-22 days). Extraversion, positive affect personality traits, and daily psychological profiles were associated with course completion; key psychological factors could predict withdrawals 1-2 days in advance (P=.009). Conclusions: Gathering accurate and continuous mental and physical status data during elite military training is possible with early predictors of withdrawal providing an opportunity for intervention.},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Huang, Zeng; Xu, Yuanlu; Lassner, Christoph; Li, Hao; Tung, Tony
ARCH: Animatable Reconstruction of Clothed Humans Inproceedings
In: Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3090–3099, IEEE, Seattle, WA, USA, 2020, ISBN: 978-1-72817-168-5.
Abstract | Links | BibTeX | Tags: ARO-Coop, Graphics
@inproceedings{huang_arch_2020,
title = {ARCH: Animatable Reconstruction of Clothed Humans},
author = {Zeng Huang and Yuanlu Xu and Christoph Lassner and Hao Li and Tony Tung},
url = {https://ieeexplore.ieee.org/document/9157750/},
doi = {10.1109/CVPR42600.2020.00316},
isbn = {978-1-72817-168-5},
year = {2020},
date = {2020-06-01},
booktitle = {Proceedings of the 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
pages = {3090--3099},
publisher = {IEEE},
address = {Seattle, WA, USA},
abstract = {In this paper, we propose ARCH (Animatable Reconstruction of Clothed Humans), a novel end-to-end framework for accurate reconstruction of animation-ready 3D clothed humans from a monocular image. Existing approaches to digitize 3D humans struggle to handle pose variations and recover details. Also, they do not produce models that are animation ready. In contrast, ARCH is a learned pose-aware model that produces detailed 3D rigged full-body human avatars from a single unconstrained RGB image. A Semantic Space and a Semantic Deformation Field are created using a parametric 3D body estimator. They allow the transformation of 2D/3D clothed humans into a canonical space, reducing ambiguities in geometry caused by pose variations and occlusions in training data. Detailed surface geometry and appearance are learned using an implicit function representation with spatial local features. Furthermore, we propose additional per-pixel supervision on the 3D reconstruction using opacity-aware differentiable rendering. Our experiments indicate that ARCH increases the fidelity of the reconstructed humans. We obtain more than 50% lower reconstruction errors for standard metrics compared to state-of-the-art methods on public datasets. We also show numerous qualitative examples of animated, high-quality reconstructed avatars unseen in the literature so far.},
keywords = {ARO-Coop, Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Malta, Loretta S.; Giosan, Cezar; Szkodny, Lauren E.; Altemus, Margaret M.; Rizzo, Albert A.; Silbersweig, David A.; Difede, JoAnn
Predictors of involuntary and voluntary emotional episodic memories of virtual reality scenarios in Veterans with and without PTSD Journal Article
In: Memory, pp. 1–17, 2020, ISSN: 0965-8211, 1464-0686.
Abstract | Links | BibTeX | Tags: MedVR
@article{malta_predictors_2020,
title = {Predictors of involuntary and voluntary emotional episodic memories of virtual reality scenarios in Veterans with and without PTSD},
author = {Loretta S. Malta and Cezar Giosan and Lauren E. Szkodny and Margaret M. Altemus and Albert A. Rizzo and David A. Silbersweig and JoAnn Difede},
url = {https://www.tandfonline.com/doi/full/10.1080/09658211.2020.1770289},
doi = {10.1080/09658211.2020.1770289},
issn = {0965-8211, 1464-0686},
year = {2020},
date = {2020-05-01},
journal = {Memory},
pages = {1--17},
abstract = {This study investigated predictors of involuntary and voluntary memories of stressful virtual reality scenarios. Thirty-two veterans of the two Persian Gulf Wars completed verbal memory tests and diagnostic assessments. They were randomly assigned to a Recounting (16) or a Suppression (16) condition. After immersion in the VR scenarios, the Recounting group described the scenarios and the Suppression group suppressed thoughts of the scenarios. One week later, participants completed surprise voluntary memory tests and another thought suppression task. The best predictors of voluntary memory were verbal memory ability, dissociation, and to a lesser extent, physiological arousal before and after scenarios. Dissociation and physiological stress responses selectively affected memory for neutral elements. Higher distress during scenarios impaired voluntary memory but increased the frequency of involuntary memories. Physiological stress responses promoted more frequent involuntary memories immediately after the scenarios. More frequent initial involuntary memories, tonic physiological arousal, and stronger emotional responses to dangerous events predicted difficulty inhibiting involuntary memories at follow-up. The effects of thought suppression were transient and weaker than those of other variables. The findings suggest that posttraumatic amnesia and involuntary memories of adverse events are more related to memory ability and emotional and physiological stress responses than to postexposure suppression.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Inproceedings
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222--3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Inproceedings
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARL, ARO-Coop, DoD, UARC, Virtual Humans
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {ARL, ARO-Coop, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alavi, Seyed Hossein; Leuski, Anton; Traum, David
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 735–742, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{alavi_which_2020,
title = {Which Model Should We Use for a Real-World Conversational Dialogue System? a Cross-Language Relevance Model or a Deep Neural Net?},
author = {Seyed Hossein Alavi and Anton Leuski and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.92/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {735--742},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We compare two models for corpus-based selection of dialogue responses: one based on cross-language relevance with a cross-language LSTM model. Each model is tested on multiple corpora, collected from two different types of dialogue source material. Results show that while the LSTM model performs adequately on a very large corpus (millions of utterances), its performance is dominated by the cross-language relevance model for a more moderate-sized corpus (ten thousands of utterances).},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Stefanov, Kalin; Gratch, Jonathan
Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma Inproceedings
In: Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG), pp. 8, IEEE, Buenos Aires, Argentina, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{lei_emotion_2020,
title = {Emotion or expressivity? An automated analysis of nonverbal perception in a social dilemma},
author = {Su Lei and Kalin Stefanov and Jonathan Gratch},
url = {https://www.computer.org/csdl/proceedings-article/fg/2020/307900a770/1kecIWT5wmA},
doi = {10.1109/FG47880.2020.00123},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020) (FG)},
pages = {8},
publisher = {IEEE},
address = {Buenos Aires, Argentina},
abstract = {An extensive body of research has examined how specific emotional expressions shape social perceptions and social decisions, yet recent scholarship in emotion research has raised questions about the validity of emotion as a construct. In this article, we contrast the value of measuring emotional expressions with the more general construct of expressivity (in the sense of conveying a thought or emotion through any nonverbal behavior) and develop models that can automatically extract perceived expressivity from videos. Although less extensive, a solid body of research has shown expressivity to be an important element when studying interpersonal perception, particularly in psychiatric contexts. Here we examine the role expressivity plays in predicting social perceptions and decisions in the context of a social dilemma. We show that perceivers use more than facial expressions when making judgments of expressivity and see these expressions as conveying thoughts as well as emotions (although facial expressions and emotional attributions explain most of the variance in these judgments). We next show that expressivity can be predicted with high accuracy using Lasso and random forests. Our analysis shows that features related to motion dynamics are particularly important for modeling these judgments. We also show that learned models of expressivity have value in recognizing important aspects of a social situation. First, we revisit a previously published finding which showed that smile intensity was associated with the unexpectedness of outcomes in social dilemmas; instead, we show that expressivity is a better predictor (and explanation) of this finding. Second, we provide preliminary evidence that expressivity is useful for identifying “moments of interest” in a video sequence.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Yanov, Volodymyr; Traum, David
Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers Inproceedings
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 726–734, European Language Resources Association, Marseille, France, 2020.
Abstract | Links | BibTeX | Tags: ARO-Coop, Virtual Humans
@inproceedings{georgila_predicting_2020,
title = {Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers},
author = {Kallirroi Georgila and Carla Gordon and Volodymyr Yanov and David Traum},
url = {https://www.aclweb.org/anthology/2020.lrec-1.91/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {726--734},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We collected a corpus of dialogues in a Wizard of Oz (WOz) setting in the Internet of Things (IoT) domain. We asked users participating in these dialogues to rate the system on a number of aspects, namely, intelligence, naturalness, personality, friendliness, their enjoyment, overall quality, and whether they would recommend the system to others. Then we asked dialogue observers, i.e., Amazon Mechanical Turkers (MTurkers), to rate these dialogues on the same aspects. We also generated simulated dialogues between dialogue policies and simulated users and asked MTurkers to rate them again on the same aspects. Using linear regression, we developed dialogue evaluation functions based on features from the simulated dialogues and the MTurkers’ ratings, the WOz dialogues and the MTurkers’ ratings, and the WOz dialogues and the WOz participants’ ratings. We applied all these dialogue evaluation functions to a held-out portion of our WOz dialogues, and we report results on the predictive power of these different types of dialogue evaluation functions. Our results suggest that for three conversational aspects (intelligence, naturalness, overall quality) just training evaluation functions on simulated data could be sufficient.},
keywords = {ARO-Coop, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Katz, Andrea C.; Norr, Aaron M.; Buck, Benjamin; Fantelli, Emily; Edwards-Stewart, Amanda; Koenen-Woods, Patricia; Zetocha, Kimberlee; Smolenski, Derek J.; Holloway, Kevin; Rothbaum, Barbara O.; Difede, JoAnn; Rizzo, Albert; Skopp, Nancy; Mishkind, Matt; Gahm, Gregory; Reger, Greg M.; Andrasik, Frank
In: Psychological Trauma: Theory, Research, Practice, and Policy, 2020, ISSN: 1942-969X, 1942-9681.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{katz_changes_2020,
title = {Changes in physiological reactivity in response to the trauma memory during prolonged exposure and virtual reality exposure therapy for posttraumatic stress disorder.},
author = {Andrea C. Katz and Aaron M. Norr and Benjamin Buck and Emily Fantelli and Amanda Edwards-Stewart and Patricia Koenen-Woods and Kimberlee Zetocha and Derek J. Smolenski and Kevin Holloway and Barbara O. Rothbaum and JoAnn Difede and Albert Rizzo and Nancy Skopp and Matt Mishkind and Gregory Gahm and Greg M. Reger and Frank Andrasik},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/tra0000567},
doi = {10.1037/tra0000567},
issn = {1942-969X, 1942-9681},
year = {2020},
date = {2020-04-01},
journal = {Psychological Trauma: Theory, Research, Practice, and Policy},
abstract = {This study is among the first to examine how physiological processes change throughout PTSD treatment and the first to compare standard exposure therapy to therapy augmented with virtual reality (VR) in active-duty soldiers with PTSD. Results showed that soldiers in VR therapy had smaller physical reactions to trauma memories compared to those who did not receive treatment, whereas those who got standard treatment did not. These findings provide insight into possible mechanisms of PTSD treatment, point to potential objective indicators of early treatment response in active-duty soldiers, and suggest that VR treatment might lead to earlier symptom reduction.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Leeds, Andrew; Rizzo, Albert ‘Skip’
Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma Inproceedings
In: Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems, pp. 8, ACM, Honolulu, HI, 2020.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{mozgai_iterative_2020,
title = {Iterative Participatory Design for VRET Domain Transfer: From Combat Exposure to Military Sexual Trauma},
author = {Sharon Mozgai and Arno Hartholt and Andrew Leeds and Albert ‘Skip’ Rizzo},
url = {https://dl.acm.org/doi/abs/10.1145/3334480.3375219},
doi = {10.1145/3334480.3375219},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the 2020 CHI Conference of Human Factors in Computing Systems},
pages = {8},
publisher = {ACM},
address = {Honolulu, HI},
abstract = {This case study describes the expansion of the BRAVEMIND virtual reality exposure therapy (VRET) system from the domain of combat-related posttraumatic stress disorder (PTSD) to the domain of PTSD due to Military Sexual Trauma (MST). As VRET continues to demonstrate efficacy in treating PTSD across multiple trauma types and anxiety disorders, adapting existing systems and content to new domains while simultaneously maintaining clinical integrity is becoming a high priority. To develop BRAVEMIND-MST we engaged in an iterative participatory design process with psychologists, engineers, and artists. This first-person account of our collaborative development process focuses on three key areas (1) VR Environment, (2) User-Avatar State, and (3) Events, while detailing the challenges we encountered and lessons learned. This process culminated in eight design guidelines as a first-step in defining a VRET domain transfer methodology.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chang, Chen-Wei; Li, Mengtong; Yeh, Shih-Ching; Chen, Yijing; Rizzo, Albert
In: IEEE Access, vol. 8, pp. 69566–69578, 2020, ISSN: 2169-3536.
Abstract | Links | BibTeX | Tags: MedVR
@article{chang_examining_2020,
title = {Examining the Effects of HMDs/FSDs and Gender Differences on Cognitive Processing Ability and User Experience of the Stroop Task-Embedded Virtual Reality Driving System (STEVRDS)},
author = {Chen-Wei Chang and Mengtong Li and Shih-Ching Yeh and Yijing Chen and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/8959194/},
doi = {10.1109/ACCESS.2020.2966564},
issn = {2169-3536},
year = {2020},
date = {2020-04-01},
journal = {IEEE Access},
volume = {8},
pages = {69566--69578},
abstract = {The advent of virtual reality technology has provided a new approach for assessing and training cognitive processing ability, with the design of simulations used to replicate real events in everyday lives. To better understand how head mounted displays/flat screen displays (HMDs/FSDs) and differences in the individuals who use them affect cognitive performance and the use of VR systems, our research group created the Stroop task-embedded virtual reality driving system (STEVRDS) and conducted a 2 × 2 between-group factorial design experiment among college students. The study examined the effects of HMDs and FSDs that differ in monovision/stereovision and field of view, the impact of gender (males vs. females) on users’ performances in virtual driving and Stroop trials, and users’ psychophysiological responses while using the system. The participants’ subjective perceptions toward STEVRDS were also assessed to support the analyses/interpretations of cognitive performance, as well as provide empirical data relating to user experiences. The statistical analyses showed both main and interaction effects of HMDs/FSDs and gender on task performance, psychophysiological responses, and user evaluations of the system. The psychophysiological patterns exhibited during the use of STEVRDS further extended the findings. Overall, our results were comparable with cognitive phenomena reported in other studies/in real-life experiences or explained by logical reasoning, which suggests that the design/development of the STEVRDS is suitable for cognitive assessment/training. Practical implications are discussed for the application of HMDs and FSDs in evaluating and enhancing cognitive processing ability and the need for specific tailoring for male and female users.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Inproceedings
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
Abstract | Links | BibTeX | Tags: ARL, DoD, MxR, UARC, Virtual Humans
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {ARL, DoD, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Guizani, Skander; Jaksha, Evan
Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_52, pp. 11, 2020.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{davis_establishing_2020,
title = {Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations},
author = {Dan M Davis and Skander Guizani and Evan Jaksha},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_52},
pages = {11},
abstract = {This paper asserts that quantification and verification of Battlefield simulations is necessary to assess, verify, and guide the researchers, military commanders, and users in both the simulations’ development and their implementation. The authors present their observations on previous development activities that were hampered by lack of effective metrics and present their arguments that much of this was driven by a lack of standards. Tracing back using commonly accepted System Engineering practices, they show how lack of such standards makes even to the development of effective metrics problematic. The paper documents the experiences and enumerates the potential pitfalls of these shortcomings. Both the authors' experiences in military service and the technical literature supporting their theses are adduced to support their analysis of the current technical research and development environment. Then the paper evaluates several System Engineering tools to further investigate and establish the ultimate goals of these formalized processes. Using their current project in establishing virtual on-line mentors as an exemplar of the way such tools would be effective, the authors make a case for the needs for metrics standards that both are accepted by consensus and are ultimately directed at providing the warfighter with all of the training possible before putting that warfighters in harm's way and imperiling the missions for which they are putting themselves at risk. Examples of the nature and reaction to simulator training, virtual human interaction, computer agent interfaces and implementation issues are given to further illuminate for the reader the possible extensions of these approaches into the reader's own research as well as calling for a more community-wide recognition of the needs for standards both for implementation and for metrics to assess Battlefield Simulation utility to the warfighter. Future investigations, analysis and action are considered and evaluated},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Li, Ruilong; Bladin, Karl; Zhao, Yajie; Chinara, Chinmay; Ingraham, Owen; Xiang, Pengda; Ren, Xinglei; Prasad, Pratusha; Kishore, Bipin; Xing, Jun; Li, Hao
Learning Formation of Physically-Based Face Attributes Inproceedings
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{li_learning_2020,
title = {Learning Formation of Physically-Based Face Attributes},
author = {Ruilong Li and Karl Bladin and Yajie Zhao and Chinmay Chinara and Owen Ingraham and Pengda Xiang and Xinglei Ren and Pratusha Prasad and Bipin Kishore and Jun Xing and Hao Li},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d407/1m3oiaP9ouQ},
doi = {10.1109/CVPR42600.2020.00347},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {Based on a combined data set of 4000 high resolution facial scans, we introduce a non-linear morphable face model, capable of producing multifarious face geometry of pore-level resolution, coupled with material attributes for use in physically-based rendering. We aim to maximize the variety of face identities, while increasing the robustness of correspondence between unique components, including middle-frequency geometry, albedo maps, specular intensity maps and high-frequency displacement details. Our deep learning based generative model learns to correlate albedo and geometry, which ensures the anatomical correctness of the generated assets. We demonstrate potential use of our generative model for novel identity generation, model fitting, interpolation, animation, high fidelity data visualization, and low-to-high resolution data domain transferring. We hope the release of this generative model will encourage further cooperation between all graphics, vision, and data focused professionals, while demonstrating the cumulative value of every individual’s complete biometric profile.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}