Publications
Search
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.; Landicho, Earl
The impact of security countermeasures on human behavior during active shooter incidents Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 929, 2022, ISSN: 2045-2322.
@article{zhu_impact_2022,
title = {The impact of security countermeasures on human behavior during active shooter incidents},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers and Earl Landicho},
url = {https://www.nature.com/articles/s41598-022-04922-8},
doi = {10.1038/s41598-022-04922-8},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-26},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {929},
abstract = {Abstract Active shooter incidents represent an increasing threat to American society, especially in commercial and educational buildings. In recent years, a wide variety of security countermeasures have been recommended by public and governmental agencies. Many of these countermeasures are aimed to increase building security, yet their impact on human behavior when an active shooter incident occurs remains underexplored. To fill this research gap, we conducted virtual experiments to evaluate the impact of countermeasures on human behavior during active shooter incidents. A total of 162 office workers and middle/high school teachers were recruited to respond to an active shooter incident in virtual office and school buildings with or without the implementation of multiple countermeasures. The experiment results showed countermeasures significantly influenced participants’ response time and decisions (e.g., run, hide, fight). Participants’ responses and perceptions of the active shooter incident were also contingent on their daily roles, as well as building and social contexts. Teachers had more concerns for occupants’ safety than office workers. Moreover, teachers had more positive perceptions of occupants in the school, whereas office workers had more positive perceptions of occupants in the office.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pauw, Lisanne S.; Sauter, Disa A.; Kleef, Gerben A.; Lucas, Gale M.; Gratch, Jonathan; Fischer, Agneta H.
The avatar will see you now: Support from a virtual human provides socio-emotional benefits Journal Article
In: Computers in Human Behavior, vol. 136, pp. 107368, 2022, ISSN: 07475632.
@article{pauw_avatar_2022,
title = {The avatar will see you now: Support from a virtual human provides socio-emotional benefits},
author = {Lisanne S. Pauw and Disa A. Sauter and Gerben A. Kleef and Gale M. Lucas and Jonathan Gratch and Agneta H. Fischer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S074756322200190X},
doi = {10.1016/j.chb.2022.107368},
issn = {07475632},
year = {2022},
date = {2022-11-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {136},
pages = {107368},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Fast, Nathanael J.
The power to harm: AI assistants pave the way to unethical behavior Journal Article
In: Current Opinion in Psychology, vol. 47, pp. 101382, 2022, ISSN: 2352250X.
@article{gratch_power_2022,
title = {The power to harm: AI assistants pave the way to unethical behavior},
author = {Jonathan Gratch and Nathanael J. Fast},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352250X22001014},
doi = {10.1016/j.copsyc.2022.101382},
issn = {2352250X},
year = {2022},
date = {2022-10-01},
urldate = {2022-09-28},
journal = {Current Opinion in Psychology},
volume = {47},
pages = {101382},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Korand, Sridevi; Fung, Cha Chi; Cohen, Sammy; Talbot, Thomas B.; Fischer, Susan; Luu, Cindy; Sargsyan, Mariam; Ben-Isaac, Eyal; Espinoza, Juan; Chang, Todd P.
In: Simulation & Gaming, vol. 53, no. 4, pp. 335–352, 2022, ISSN: 1046-8781, 1552-826X.
@article{korand_association_2022,
title = {The Association Between Multitasking and Multi-Patient Care Skills in a Simulated Patient Care Video Game Among Second Year Medical Students Based on Specialty Choice},
author = {Sridevi Korand and Cha Chi Fung and Sammy Cohen and Thomas B. Talbot and Susan Fischer and Cindy Luu and Mariam Sargsyan and Eyal Ben-Isaac and Juan Espinoza and Todd P. Chang},
url = {http://journals.sagepub.com/doi/10.1177/10468781221103460},
doi = {10.1177/10468781221103460},
issn = {1046-8781, 1552-826X},
year = {2022},
date = {2022-08-01},
urldate = {2022-09-21},
journal = {Simulation & Gaming},
volume = {53},
number = {4},
pages = {335--352},
abstract = {Background and Objective Healthcare providers require multitasking and multi-patient care skills, and training programs do not formally incorporate curricula specifically for multitasking skills to trainees. The medical education community is in equipoise on whether multitasking ability is a fixed trait. Furthermore, it is unclear whether multitasking ability affects those who gravitate toward careers that demand it, particularly among medical students deciding on a specialty. We sought to define the association between specialty choice, multitasking abilities and multi-patient care delivery among pre-clinical medical students. For this study, we examined both efficiency and accuracy metrics within multitasking and whether they were different between students choosing specialties. Methods This was a planned cross-sectional sub-study focused on 2nd year medical students (MS-IIs) within a parent study evaluating multi-patient care skills using a serious game (VitalSigns:ED TM ) depicting a pediatric emergency department. Subjects completed a Multitasking Ability Test (MTAT) and five VitalSigns:ED gameplays. The predictor variable was specialty choice, categorized into multitasking and non-multitasking groups. Outcome variables measuring efficiency and diagnostic accuracy were obtained from the MTAT and the game. The primary analysis was a Mann–Whitney U test, and secondary analyses employed Spearman Rank correlations. Results Twelve students applied to multitasking specialties and 18 applied to others. Those in the multitasking specialties had faster MTAT completions than the other cohort (29.8 vs. 59.7 sec, 95%CI difference -0.9 to -39.8 sec). Differential diagnoses were higher in multitasking specialties in VitalSigns:ED (2.03 vs. 1.06, 95%CI difference +0.05 to +1.54) but efficiency metrics in the game did not differ. Conclusion Multitasking and multi-patient care performance show some association with preferred specialty choices for MS-IIs prior to clinical exposure.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Barrett, Trevor J.; Sobhani, Mona; Fox, Glenn R.; Files, Benjamin; Patitsas, Nicholas; Duhaime, Josiah; Ebert, Rebecca; Faulk, Rob; Saxon, Leslie
Diverse predictors of early attrition in an elite Marine training school Journal Article
In: Military Psychology, vol. 34, no. 4, pp. 388–397, 2022, ISSN: 0899-5605, 1532-7876.
@article{barrett_diverse_2022,
title = {Diverse predictors of early attrition in an elite Marine training school},
author = {Trevor J. Barrett and Mona Sobhani and Glenn R. Fox and Benjamin Files and Nicholas Patitsas and Josiah Duhaime and Rebecca Ebert and Rob Faulk and Leslie Saxon},
url = {https://www.tandfonline.com/doi/full/10.1080/08995605.2021.1993721},
doi = {10.1080/08995605.2021.1993721},
issn = {0899-5605, 1532-7876},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-27},
journal = {Military Psychology},
volume = {34},
number = {4},
pages = {388--397},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Inproceedings
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240--3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Angelika-Nikita, Maria; Melo, Celso M.; Terada, Kazunori; Lucas, Gale; Gratch, Jonathan
The Impact of Partner Expressions on Felt Emotion in the Iterated Prisoner's Dilemma: An Event-level Analysis Miscellaneous
2022.
@misc{angelika-nikita_impact_2022,
title = {The Impact of Partner Expressions on Felt Emotion in the Iterated Prisoner's Dilemma: An Event-level Analysis},
author = {Maria Angelika-Nikita and Celso M. Melo and Kazunori Terada and Gale Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2207.00925},
doi = {10.48550/arXiv.2207.00925},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
publisher = {arXiv},
abstract = {Social games like the prisoner's dilemma are often used to develop models of the role of emotion in social decision-making. Here we examine an understudied aspect of emotion in such games: how an individual's feelings are shaped by their partner's expressions. Prior research has tended to focus on other aspects of emotion. Research on felt-emotion has focused on how an individual's feelings shape how they treat their partner, or whether these feelings are authentically expressed. Research on expressed-emotion has focused on how an individual's decisions are shaped by their partner's expressions, without regard for whether these expressions actually evoke feelings. Here, we use computer-generated characters to examine how an individual's moment-to-moment feelings are shaped by (1) how they are treated by their partner and (2) what their partner expresses during this treatment. Surprisingly, we find that partner expressions are far more important than actions in determining self-reported feelings. In other words, our partner can behave in a selfish and exploitive way, but if they show a collaborative pattern of expressions, we will feel greater pleasure collaborating with them. These results also emphasize the importance of context in determining how someone will feel in response to an expression (i.e., knowing a partner is happy is insufficient; we must know what they are happy-at). We discuss the implications of this work for cognitive-system design, emotion theory, and methodological practice in affective computing.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Cleland, John G. F.; Bristow, Michael R.; Freemantle, Nicholas; Olshansky, Brian; Gras, Daniel; Saxon, Leslie; Tavazzi, Luigi; Boehmer, John; Ghio, Stefano; Feldman, Arthur M.; Daubert, Jean‐Claude; Mets, David
In: European J of Heart Fail, vol. 24, no. 6, pp. 1080–1090, 2022, ISSN: 1388-9842, 1879-0844.
@article{cleland_effect_2022,
title = {The effect of cardiac resynchronization without a defibrillator on morbidity and mortality: an individual patient data meta‐analysis of companion and care-hf},
author = {John G. F. Cleland and Michael R. Bristow and Nicholas Freemantle and Brian Olshansky and Daniel Gras and Leslie Saxon and Luigi Tavazzi and John Boehmer and Stefano Ghio and Arthur M. Feldman and Jean‐Claude Daubert and David Mets},
url = {https://onlinelibrary.wiley.com/doi/10.1002/ejhf.2524},
doi = {10.1002/ejhf.2524},
issn = {1388-9842, 1879-0844},
year = {2022},
date = {2022-06-01},
urldate = {2022-09-27},
journal = {European J of Heart Fail},
volume = {24},
number = {6},
pages = {1080--1090},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Inproceedings
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698--4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710--1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots Journal Article
In: J. Comput. Civ. Eng., vol. 36, no. 3, pp. 04022006, 2022, ISSN: 0887-3801, 1943-5487.
@article{adami_impact_2022,
title = {Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0001016},
doi = {10.1061/(ASCE)CP.1943-5487.0001016},
issn = {0887-3801, 1943-5487},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
journal = {J. Comput. Civ. Eng.},
volume = {36},
number = {3},
pages = {04022006},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, 2022, ISSN: 1572-9346.
@article{pynadath_disaster_2022,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-28},
journal = {Comput Math Organ Theory},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Schweitzer, Julie B.; Rizzo, Albert “Skip”
Virtual Reality and ADHD: Clinical Assessment and Treatment in the Metaverse Journal Article
In: The ADHD Report, vol. 30, no. 3, pp. 1–9, 2022, ISSN: 1065-8025.
@article{schweitzer_virtual_2022,
title = {Virtual Reality and ADHD: Clinical Assessment and Treatment in the Metaverse},
author = {Julie B. Schweitzer and Albert “Skip” Rizzo},
url = {https://guilfordjournals.com/doi/abs/10.1521/adhd.2022.30.3.1},
doi = {10.1521/adhd.2022.30.3.1},
issn = {1065-8025},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-13},
journal = {The ADHD Report},
volume = {30},
number = {3},
pages = {1--9},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover on Geo-Specific Terrains via Reinforcement Learning Journal Article
In: FLAIRS, vol. 35, 2022, ISSN: 2334-0762.
@article{aris_learning_2022,
title = {Learning to Take Cover on Geo-Specific Terrains via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/130871},
doi = {10.32473/flairs.v35i.130871},
issn = {2334-0762},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-15},
journal = {FLAIRS},
volume = {35},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. We show that increasing the number of novel situations the agent is exposed to increases the performance on the test set. In addition, the trained models possess some ability to generalize across terrains, and it can also take less time to retrain an agent to a new terrain, if that terrain has a level of complexity less than or equal to the terrain it was previously trained on.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2022
Zhu, Runhe; Lucas, Gale M.; Becerik-Gerber, Burcin; Southers, Erroll G.; Landicho, Earl
The impact of security countermeasures on human behavior during active shooter incidents Journal Article
In: Sci Rep, vol. 12, no. 1, pp. 929, 2022, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{zhu_impact_2022,
title = {The impact of security countermeasures on human behavior during active shooter incidents},
author = {Runhe Zhu and Gale M. Lucas and Burcin Becerik-Gerber and Erroll G. Southers and Earl Landicho},
url = {https://www.nature.com/articles/s41598-022-04922-8},
doi = {10.1038/s41598-022-04922-8},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-26},
journal = {Sci Rep},
volume = {12},
number = {1},
pages = {929},
abstract = {Abstract Active shooter incidents represent an increasing threat to American society, especially in commercial and educational buildings. In recent years, a wide variety of security countermeasures have been recommended by public and governmental agencies. Many of these countermeasures are aimed to increase building security, yet their impact on human behavior when an active shooter incident occurs remains underexplored. To fill this research gap, we conducted virtual experiments to evaluate the impact of countermeasures on human behavior during active shooter incidents. A total of 162 office workers and middle/high school teachers were recruited to respond to an active shooter incident in virtual office and school buildings with or without the implementation of multiple countermeasures. The experiment results showed countermeasures significantly influenced participants’ response time and decisions (e.g., run, hide, fight). Participants’ responses and perceptions of the active shooter incident were also contingent on their daily roles, as well as building and social contexts. Teachers had more concerns for occupants’ safety than office workers. Moreover, teachers had more positive perceptions of occupants in the school, whereas office workers had more positive perceptions of occupants in the office.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
Difede, JoAnn; Rothbaum, Barbara O.; Rizzo, Albert A.; Wyka, Katarzyna; Spielman, Lisa; Reist, Christopher; Roy, Michael J.; Jovanovic, Tanja; Norrholm, Seth D.; Cukor, Judith; Olden, Megan; Glatt, Charles E.; Lee, Francis S.
In: Transl Psychiatry, vol. 12, no. 1, pp. 299, 2022, ISSN: 2158-3188.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, Virtual Humans
@article{difede_enhancing_2022,
title = {Enhancing exposure therapy for posttraumatic stress disorder (PTSD): a randomized clinical trial of virtual reality and imaginal exposure with a cognitive enhancer},
author = {JoAnn Difede and Barbara O. Rothbaum and Albert A. Rizzo and Katarzyna Wyka and Lisa Spielman and Christopher Reist and Michael J. Roy and Tanja Jovanovic and Seth D. Norrholm and Judith Cukor and Megan Olden and Charles E. Glatt and Francis S. Lee},
url = {https://www.nature.com/articles/s41398-022-02066-x},
doi = {10.1038/s41398-022-02066-x},
issn = {2158-3188},
year = {2022},
date = {2022-12-01},
urldate = {2022-09-13},
journal = {Transl Psychiatry},
volume = {12},
number = {1},
pages = {299},
abstract = {Abstract Posttraumatic stress disorder (PTSD) is a significant public health issue. Yet, there are limited treatment options and no data to suggest which treatment will work for whom. We tested the efficacy of virtual reality exposure (VRE) or prolonged imaginal exposure (PE), augmented with D-cycloserine (DCS) for combat-related PTSD. As an exploratory aim, we examined whether brain-derived neurotrophic factor (BDNF) and fatty acid amide hydrolase (FAAH) moderated treatment response. Military personnel with PTSD ( n = 192) were recruited into a multisite double-blind randomized controlled trial to receive nine weeks of VRE or PE, with DCS or placebo. Primary outcome was the improvement in symptom severity. Randomization was stratified by comorbid depression (MDD) and site. Participants in both VRE and PE showed similar meaningful clinical improvement with no difference between the treatment groups. A significant interaction ( p = 0.45) suggested VRE was more effective for depressed participants (CAPS difference M = 3.51 [95% CI 1.17–5.86]},
keywords = {DTIC, MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Pauw, Lisanne S.; Sauter, Disa A.; Kleef, Gerben A.; Lucas, Gale M.; Gratch, Jonathan; Fischer, Agneta H.
The avatar will see you now: Support from a virtual human provides socio-emotional benefits Journal Article
In: Computers in Human Behavior, vol. 136, pp. 107368, 2022, ISSN: 07475632.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{pauw_avatar_2022,
title = {The avatar will see you now: Support from a virtual human provides socio-emotional benefits},
author = {Lisanne S. Pauw and Disa A. Sauter and Gerben A. Kleef and Gale M. Lucas and Jonathan Gratch and Agneta H. Fischer},
url = {https://linkinghub.elsevier.com/retrieve/pii/S074756322200190X},
doi = {10.1016/j.chb.2022.107368},
issn = {07475632},
year = {2022},
date = {2022-11-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {136},
pages = {107368},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Fast, Nathanael J.
The power to harm: AI assistants pave the way to unethical behavior Journal Article
In: Current Opinion in Psychology, vol. 47, pp. 101382, 2022, ISSN: 2352250X.
Links | BibTeX | Tags: AI, DTIC, Virtual Humans
@article{gratch_power_2022,
title = {The power to harm: AI assistants pave the way to unethical behavior},
author = {Jonathan Gratch and Nathanael J. Fast},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352250X22001014},
doi = {10.1016/j.copsyc.2022.101382},
issn = {2352250X},
year = {2022},
date = {2022-10-01},
urldate = {2022-09-28},
journal = {Current Opinion in Psychology},
volume = {47},
pages = {101382},
keywords = {AI, DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hale, James; Kim, Peter; Gratch, Jonathan
Preference interdependencies in a multi-issue salary negotiation Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_preference_2022,
title = {Preference interdependencies in a multi-issue salary negotiation},
author = {James Hale and Peter Kim and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549681},
doi = {10.1145/3514197.3549681},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Negotiation is an important potential application domain for intelligent virtual agents but, unlike research on agent-agent negotiations, agents that negotiate with people often adopt unrealistic simplifying assumptions. These assumptions not only limit the generality of these agents, but call into question scientific findings about how people negotiate with agents. Here we relax two common assumptions: the use of assigned rather than elicited user preferences, and the use of linear utility functions. Using a simulated salary negotiation, we find that relaxing these assumptions helps reveal interesting individual differences in how people negotiate their salary and allows algorithms to find better win-win solutions.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hale, James; Jalan, Harsh; Saini, Nidhi; Tan, Shao Ling; Woo, Junhyuck; Gratch, Jonathan
Negotiation game to introduce non-linear utility Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hale_negotiation_2022,
title = {Negotiation game to introduce non-linear utility},
author = {James Hale and Harsh Jalan and Nidhi Saini and Shao Ling Tan and Junhyuck Woo and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549678},
doi = {10.1145/3514197.3549678},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Much prior work in automated negotiation makes the simplifying assumption of linear utility functions. As such, we propose a framework for multilateral repeated negotiations in a complex game setting—to introduce non-linearities—where negotiators can choose with whom they negotiate in subsequent games. This game setting not only creates non-linear utility functions, but also motivates the negotiation.},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Eugene; McNulty, Zachary; Gentle, Alex; Pradhan, Prerak Tusharkumar; Gratch, Jonathan
Examining the impact of emotion and agency on negotiator behavior Inproceedings
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–3, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9248-8.
Abstract | Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{lee_examining_2022,
title = {Examining the impact of emotion and agency on negotiator behavior},
author = {Eugene Lee and Zachary McNulty and Alex Gentle and Prerak Tusharkumar Pradhan and Jonathan Gratch},
url = {https://doi.org/10.1145/3514197.3549673},
doi = {10.1145/3514197.3549673},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-27},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--3},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '22},
abstract = {Virtual human expressions can shape user behavior [1, 2, 3], yet in negotiation, findings have been underwhelming. For example, human negotiators can use anger to claim value (i.e., extract concessions) [4], but anger has no effect when exhibited by a virtual human [5]. Other psychological work suggests that emotions can create value (e.g., happy negotiators can better discover tradeoffs across issues that "grow the pie"), but little research has examined how virtual human expressions shape value creation. Here we present an agent architecture and pilot study that examines differences between how the emotional expressions of human and virtual-human opponents shape value claiming and value creation. We replicate the finding that virtual human anger fails to influence value claiming but discover counter-intuitive findings on value creation. We argue these findings highlight the potential for intelligent virtual humans to yield insight into human psychology.},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Li, Zongjian; Kim, Kevin; Leeds, Andrew; Mozgai, Sharon
In: Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents, pp. 1–8, ACM, Faro Portugal, 2022, ISBN: 978-1-4503-9248-8.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{hartholt_re-architecting_2022,
title = {Re-architecting the virtual human toolkit: towards an interoperable platform for embodied conversational agent research and development},
author = {Arno Hartholt and Ed Fast and Zongjian Li and Kevin Kim and Andrew Leeds and Sharon Mozgai},
url = {https://dl.acm.org/doi/10.1145/3514197.3549671},
doi = {10.1145/3514197.3549671},
isbn = {978-1-4503-9248-8},
year = {2022},
date = {2022-09-01},
urldate = {2022-09-15},
booktitle = {Proceedings of the 22nd ACM International Conference on Intelligent Virtual Agents},
pages = {1--8},
publisher = {ACM},
address = {Faro Portugal},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Korand, Sridevi; Fung, Cha Chi; Cohen, Sammy; Talbot, Thomas B.; Fischer, Susan; Luu, Cindy; Sargsyan, Mariam; Ben-Isaac, Eyal; Espinoza, Juan; Chang, Todd P.
In: Simulation & Gaming, vol. 53, no. 4, pp. 335–352, 2022, ISSN: 1046-8781, 1552-826X.
Abstract | Links | BibTeX | Tags: MedVR
@article{korand_association_2022,
title = {The Association Between Multitasking and Multi-Patient Care Skills in a Simulated Patient Care Video Game Among Second Year Medical Students Based on Specialty Choice},
author = {Sridevi Korand and Cha Chi Fung and Sammy Cohen and Thomas B. Talbot and Susan Fischer and Cindy Luu and Mariam Sargsyan and Eyal Ben-Isaac and Juan Espinoza and Todd P. Chang},
url = {http://journals.sagepub.com/doi/10.1177/10468781221103460},
doi = {10.1177/10468781221103460},
issn = {1046-8781, 1552-826X},
year = {2022},
date = {2022-08-01},
urldate = {2022-09-21},
journal = {Simulation & Gaming},
volume = {53},
number = {4},
pages = {335--352},
abstract = {Background and Objective Healthcare providers require multitasking and multi-patient care skills, and training programs do not formally incorporate curricula specifically for multitasking skills to trainees. The medical education community is in equipoise on whether multitasking ability is a fixed trait. Furthermore, it is unclear whether multitasking ability affects those who gravitate toward careers that demand it, particularly among medical students deciding on a specialty. We sought to define the association between specialty choice, multitasking abilities and multi-patient care delivery among pre-clinical medical students. For this study, we examined both efficiency and accuracy metrics within multitasking and whether they were different between students choosing specialties. Methods This was a planned cross-sectional sub-study focused on 2nd year medical students (MS-IIs) within a parent study evaluating multi-patient care skills using a serious game (VitalSigns:ED TM ) depicting a pediatric emergency department. Subjects completed a Multitasking Ability Test (MTAT) and five VitalSigns:ED gameplays. The predictor variable was specialty choice, categorized into multitasking and non-multitasking groups. Outcome variables measuring efficiency and diagnostic accuracy were obtained from the MTAT and the game. The primary analysis was a Mann–Whitney U test, and secondary analyses employed Spearman Rank correlations. Results Twelve students applied to multitasking specialties and 18 applied to others. Those in the multitasking specialties had faster MTAT completions than the other cohort (29.8 vs. 59.7 sec, 95%CI difference -0.9 to -39.8 sec). Differential diagnoses were higher in multitasking specialties in VitalSigns:ED (2.03 vs. 1.06, 95%CI difference +0.05 to +1.54) but efficiency metrics in the game did not differ. Conclusion Multitasking and multi-patient care performance show some association with preferred specialty choices for MS-IIs prior to clinical exposure.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Barrett, Trevor J.; Sobhani, Mona; Fox, Glenn R.; Files, Benjamin; Patitsas, Nicholas; Duhaime, Josiah; Ebert, Rebecca; Faulk, Rob; Saxon, Leslie
Diverse predictors of early attrition in an elite Marine training school Journal Article
In: Military Psychology, vol. 34, no. 4, pp. 388–397, 2022, ISSN: 0899-5605, 1532-7876.
Links | BibTeX | Tags: CBC, DTIC
@article{barrett_diverse_2022,
title = {Diverse predictors of early attrition in an elite Marine training school},
author = {Trevor J. Barrett and Mona Sobhani and Glenn R. Fox and Benjamin Files and Nicholas Patitsas and Josiah Duhaime and Rebecca Ebert and Rob Faulk and Leslie Saxon},
url = {https://www.tandfonline.com/doi/full/10.1080/08995605.2021.1993721},
doi = {10.1080/08995605.2021.1993721},
issn = {0899-5605, 1532-7876},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-27},
journal = {Military Psychology},
volume = {34},
number = {4},
pages = {388--397},
keywords = {CBC, DTIC},
pubstate = {published},
tppubtype = {article}
}
Speggiorin, Alessandro; Dalton, Jeffrey; Leuski, Anton
TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation Inproceedings
In: Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pp. 3240–3244, ACM, Madrid Spain, 2022, ISBN: 978-1-4503-8732-3.
Links | BibTeX | Tags: Dialogue, DTIC, UARC
@inproceedings{speggiorin_taskmad_2022,
title = {TaskMAD: A Platform for Multimodal Task-Centric Knowledge-Grounded Conversational Experimentation},
author = {Alessandro Speggiorin and Jeffrey Dalton and Anton Leuski},
url = {https://dl.acm.org/doi/10.1145/3477495.3531679},
doi = {10.1145/3477495.3531679},
isbn = {978-1-4503-8732-3},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
booktitle = {Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},
pages = {3240--3244},
publisher = {ACM},
address = {Madrid Spain},
keywords = {Dialogue, DTIC, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Angelika-Nikita, Maria; Melo, Celso M.; Terada, Kazunori; Lucas, Gale; Gratch, Jonathan
The Impact of Partner Expressions on Felt Emotion in the Iterated Prisoner's Dilemma: An Event-level Analysis Miscellaneous
2022.
Abstract | Links | BibTeX | Tags:
@misc{angelika-nikita_impact_2022,
title = {The Impact of Partner Expressions on Felt Emotion in the Iterated Prisoner's Dilemma: An Event-level Analysis},
author = {Maria Angelika-Nikita and Celso M. Melo and Kazunori Terada and Gale Lucas and Jonathan Gratch},
url = {http://arxiv.org/abs/2207.00925},
doi = {10.48550/arXiv.2207.00925},
year = {2022},
date = {2022-07-01},
urldate = {2022-09-22},
publisher = {arXiv},
abstract = {Social games like the prisoner's dilemma are often used to develop models of the role of emotion in social decision-making. Here we examine an understudied aspect of emotion in such games: how an individual's feelings are shaped by their partner's expressions. Prior research has tended to focus on other aspects of emotion. Research on felt-emotion has focused on how an individual's feelings shape how they treat their partner, or whether these feelings are authentically expressed. Research on expressed-emotion has focused on how an individual's decisions are shaped by their partner's expressions, without regard for whether these expressions actually evoke feelings. Here, we use computer-generated characters to examine how an individual's moment-to-moment feelings are shaped by (1) how they are treated by their partner and (2) what their partner expresses during this treatment. Surprisingly, we find that partner expressions are far more important than actions in determining self-reported feelings. In other words, our partner can behave in a selfish and exploitive way, but if they show a collaborative pattern of expressions, we will feel greater pleasure collaborating with them. These results also emphasize the importance of context in determining how someone will feel in response to an expression (i.e., knowing a partner is happy is insufficient; we must know what they are happy-at). We discuss the implications of this work for cognitive-system design, emotion theory, and methodological practice in affective computing.},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Cleland, John G. F.; Bristow, Michael R.; Freemantle, Nicholas; Olshansky, Brian; Gras, Daniel; Saxon, Leslie; Tavazzi, Luigi; Boehmer, John; Ghio, Stefano; Feldman, Arthur M.; Daubert, Jean‐Claude; Mets, David
In: European J of Heart Fail, vol. 24, no. 6, pp. 1080–1090, 2022, ISSN: 1388-9842, 1879-0844.
@article{cleland_effect_2022,
title = {The effect of cardiac resynchronization without a defibrillator on morbidity and mortality: an individual patient data meta‐analysis of companion and care-hf},
author = {John G. F. Cleland and Michael R. Bristow and Nicholas Freemantle and Brian Olshansky and Daniel Gras and Leslie Saxon and Luigi Tavazzi and John Boehmer and Stefano Ghio and Arthur M. Feldman and Jean‐Claude Daubert and David Mets},
url = {https://onlinelibrary.wiley.com/doi/10.1002/ejhf.2524},
doi = {10.1002/ejhf.2524},
issn = {1388-9842, 1879-0844},
year = {2022},
date = {2022-06-01},
urldate = {2022-09-27},
journal = {European J of Heart Fail},
volume = {24},
number = {6},
pages = {1080--1090},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Winn, Jade; Kaurloto, Cari; Leeds, Andrew; Heylen, Dirk; Hartholt, Arno
Toward a Semi-Automated Scoping Review of Virtual Human Smiles Inproceedings
In: Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop, 2022.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans
@inproceedings{mozgai_toward_2022,
title = {Toward a Semi-Automated Scoping Review of Virtual Human Smiles},
author = {Sharon Mozgai and Jade Winn and Cari Kaurloto and Andrew Leeds and Dirk Heylen and Arno Hartholt},
url = {http://www.lrec-conf.org/proceedings/lrec2022/workshops/SmiLa/index.html},
year = {2022},
date = {2022-06-01},
booktitle = {Proceedings of the Smiling and Laughter across Contexts and the Life-span Workshop},
keywords = {DTIC, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tran, Minh; Soleymani, Mohammad
A Pre-Trained Audio-Visual Transformer for Emotion Recognition Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4698–4702, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_pre-trained_2022,
title = {A Pre-Trained Audio-Visual Transformer for Emotion Recognition},
author = {Minh Tran and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9747278/},
doi = {10.1109/ICASSP43922.2022.9747278},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {4698--4702},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhu, Haidong; Zheng, Zhaoheng; Soleymani, Mohammad; Nevatia, Ram
Self-Supervised Learning for Sentiment Analysis via Image-Text Matching Inproceedings
In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1710–1714, IEEE, Singapore, Singapore, 2022, ISBN: 978-1-66540-540-9.
Links | BibTeX | Tags: Emotions
@inproceedings{zhu_self-supervised_2022,
title = {Self-Supervised Learning for Sentiment Analysis via Image-Text Matching},
author = {Haidong Zhu and Zhaoheng Zheng and Mohammad Soleymani and Ram Nevatia},
url = {https://ieeexplore.ieee.org/document/9747819/},
doi = {10.1109/ICASSP43922.2022.9747819},
isbn = {978-1-66540-540-9},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
booktitle = {ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1710--1714},
publisher = {IEEE},
address = {Singapore, Singapore},
keywords = {Emotions},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots Journal Article
In: J. Comput. Civ. Eng., vol. 36, no. 3, pp. 04022006, 2022, ISSN: 0887-3801, 1943-5487.
Links | BibTeX | Tags: DTIC, UARC, Virtual Humans, VR
@article{adami_impact_2022,
title = {Impact of VR-Based Training on Human–Robot Interaction for Remote Operating Construction Robots},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0001016},
doi = {10.1061/(ASCE)CP.1943-5487.0001016},
issn = {0887-3801, 1943-5487},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-23},
journal = {J. Comput. Civ. Eng.},
volume = {36},
number = {3},
pages = {04022006},
keywords = {DTIC, UARC, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
Pynadath, David V.; Dilkina, Bistra; Jeong, David C.; John, Richard S.; Marsella, Stacy C.; Merchant, Chirag; Miller, Lynn C.; Read, Stephen J.
Disaster world Journal Article
In: Comput Math Organ Theory, 2022, ISSN: 1572-9346.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation
@article{pynadath_disaster_2022,
title = {Disaster world},
author = {David V. Pynadath and Bistra Dilkina and David C. Jeong and Richard S. John and Stacy C. Marsella and Chirag Merchant and Lynn C. Miller and Stephen J. Read},
url = {https://doi.org/10.1007/s10588-022-09359-y},
doi = {10.1007/s10588-022-09359-y},
issn = {1572-9346},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-28},
journal = {Comput Math Organ Theory},
abstract = {Artificial intelligence (AI) research provides a rich source of modeling languages capable of generating socially plausible simulations of human behavior, while also providing a transparent ground truth that can support validation of social-science methods applied to that simulation. In this work, we leverage two established AI representations: decision-theoretic planning and recursive modeling. Decision-theoretic planning (specifically Partially Observable Markov Decision Processes) provides agents with quantitative models of their corresponding real-world entities’ subjective (and possibly incorrect) perspectives of ground truth in the form of probabilistic beliefs and utility functions. Recursive modeling gives an agent a theory of mind, which is necessary when a person’s (again, possibly incorrect) subjective perspectives are of another person, rather than of just his/her environment. We used PsychSim, a multiagent social-simulation framework combining these two AI frameworks, to build a general parameterized model of human behavior during disaster response, grounding the model in social-psychological theories to ensure social plausibility. We then instantiated that model into alternate ground truths for simulating population response to a series of natural disasters, namely, hurricanes. The simulations generate data in response to socially plausible instruments (e.g., surveys) that serve as input to the Ground Truth program’s designated research teams for them to conduct simulated social science. The simulation also provides a graphical ground truth and a set of outcomes to be used as the gold standard in evaluating the research teams’ inferences.},
keywords = {DTIC, Social Simulation},
pubstate = {published},
tppubtype = {article}
}
Schweitzer, Julie B.; Rizzo, Albert “Skip”
Virtual Reality and ADHD: Clinical Assessment and Treatment in the Metaverse Journal Article
In: The ADHD Report, vol. 30, no. 3, pp. 1–9, 2022, ISSN: 1065-8025.
Links | BibTeX | Tags: MedVR, VR
@article{schweitzer_virtual_2022,
title = {Virtual Reality and ADHD: Clinical Assessment and Treatment in the Metaverse},
author = {Julie B. Schweitzer and Albert “Skip” Rizzo},
url = {https://guilfordjournals.com/doi/abs/10.1521/adhd.2022.30.3.1},
doi = {10.1521/adhd.2022.30.3.1},
issn = {1065-8025},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-13},
journal = {The ADHD Report},
volume = {30},
number = {3},
pages = {1--9},
keywords = {MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
Aris, Timothy; Ustun, Volkan; Kumar, Rajay
Learning to Take Cover on Geo-Specific Terrains via Reinforcement Learning Journal Article
In: FLAIRS, vol. 35, 2022, ISSN: 2334-0762.
Abstract | Links | BibTeX | Tags: DTIC, Integration Technology
@article{aris_learning_2022,
title = {Learning to Take Cover on Geo-Specific Terrains via Reinforcement Learning},
author = {Timothy Aris and Volkan Ustun and Rajay Kumar},
url = {https://journals.flvc.org/FLAIRS/article/view/130871},
doi = {10.32473/flairs.v35i.130871},
issn = {2334-0762},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-15},
journal = {FLAIRS},
volume = {35},
abstract = {This paper presents a reinforcement learning model designed to learn how to take cover on geo-specific terrains, an essential behavior component for military training simulations. Training of the models is performed on the Rapid Integration and Development Environment (RIDE) leveraging the Unity ML-Agents framework. We show that increasing the number of novel situations the agent is exposed to increases the performance on the test set. In addition, the trained models possess some ability to generalize across terrains, and it can also take less time to retrain an agent to a new terrain, if that terrain has a level of complexity less than or equal to the terrain it was previously trained on.},
keywords = {DTIC, Integration Technology},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Fast, Ed; Leeds, Andrew; Kim, Kevin; Gordon, Andrew; McCullough, Kyle; Ustun, Volkan; Mozgai, Sharon
Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities Inproceedings
In: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems, pp. 1902–1904, International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 2022, ISBN: 978-1-4503-9213-6.
Abstract | BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans
@inproceedings{hartholt_demonstrating_2022,
title = {Demonstrating the Rapid Integration & Development Environment (RIDE): Embodied Conversational Agent (ECA) and Multiagent Capabilities},
author = {Arno Hartholt and Ed Fast and Andrew Leeds and Kevin Kim and Andrew Gordon and Kyle McCullough and Volkan Ustun and Sharon Mozgai},
isbn = {978-1-4503-9213-6},
year = {2022},
date = {2022-05-01},
urldate = {2022-09-20},
booktitle = {Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems},
pages = {1902--1904},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Richland, SC},
series = {AAMAS '22},
abstract = {We demonstrate the Rapid Integration & Development Environment (RIDE), a research and development platform that enables rapid prototyping in support of multiagents and embodied conversational agents. RIDE is based on commodity game engines and includes a flexible architecture, system interoperability, and native support for artificial intelligence and machine learning frameworks.},
keywords = {AI, DTIC, Integration Technology, Machine Learning, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rodrigues, Patrick B.; Xiao, Yijing; Fukumura, Yoko E.; Awada, Mohamad; Aryal, Ashrant; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Ergonomic assessment of office worker postures using 3D automated joint angle assessment Journal Article
In: Advanced Engineering Informatics, vol. 52, pp. 101596, 2022, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Machine Learning, UARC
@article{rodrigues_ergonomic_2022,
title = {Ergonomic assessment of office worker postures using 3D automated joint angle assessment},
author = {Patrick B. Rodrigues and Yijing Xiao and Yoko E. Fukumura and Mohamad Awada and Ashrant Aryal and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1474034622000672},
doi = {10.1016/j.aei.2022.101596},
issn = {14740346},
year = {2022},
date = {2022-04-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {52},
pages = {101596},
keywords = {DTIC, Machine Learning, UARC},
pubstate = {published},
tppubtype = {article}
}
Fujiwara, Ken; Hoegen, Rens; Gratch, Jonathan; Dunbar, Norah E.
Synchrony facilitates altruistic decision making for non-human avatars Journal Article
In: Computers in Human Behavior, vol. 128, pp. 107079, 2022, ISSN: 07475632.
Links | BibTeX | Tags: DTIC, Virtual Humans
@article{fujiwara_synchrony_2022,
title = {Synchrony facilitates altruistic decision making for non-human avatars},
author = {Ken Fujiwara and Rens Hoegen and Jonathan Gratch and Norah E. Dunbar},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0747563221004027},
doi = {10.1016/j.chb.2021.107079},
issn = {07475632},
year = {2022},
date = {2022-03-01},
urldate = {2022-09-28},
journal = {Computers in Human Behavior},
volume = {128},
pages = {107079},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Visser, Ewart J.; Topoglu, Yigit; Joshi, Shawn; Krueger, Frank; Phillips, Elizabeth; Gratch, Jonathan; Tossell, Chad C.; Ayaz, Hasan
Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance Journal Article
In: Sensors, vol. 22, no. 3, pp. 1287, 2022, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@article{de_visser_designing_2022,
title = {Designing Man’s New Best Friend: Enhancing Human-Robot Dog Interaction through Dog-Like Framing and Appearance},
author = {Ewart J. Visser and Yigit Topoglu and Shawn Joshi and Frank Krueger and Elizabeth Phillips and Jonathan Gratch and Chad C. Tossell and Hasan Ayaz},
url = {https://www.mdpi.com/1424-8220/22/3/1287},
doi = {10.3390/s22031287},
issn = {1424-8220},
year = {2022},
date = {2022-02-01},
urldate = {2022-09-28},
journal = {Sensors},
volume = {22},
number = {3},
pages = {1287},
abstract = {To understand how to improve interactions with dog-like robots, we evaluated the importance of “dog-like” framing and physical appearance on interaction, hypothesizing multiple interactive benefits of each. We assessed whether framing Aibo as a puppy (i.e., in need of development) versus simply a robot would result in more positive responses and interactions. We also predicted that adding fur to Aibo would make it appear more dog-like, likable, and interactive. Twenty-nine participants engaged with Aibo in a 2 × 2 (framing × appearance) design by issuing commands to the robot. Aibo and participant behaviors were monitored per second, and evaluated via an analysis of commands issued, an analysis of command blocks (i.e., chains of commands), and using a T-pattern analysis of participant behavior. Participants were more likely to issue the “Come Here” command than other types of commands. When framed as a puppy, participants used Aibo’s dog name more often, praised it more, and exhibited more unique, interactive, and complex behavior with Aibo. Participants exhibited the most smiling and laughing behaviors with Aibo framed as a puppy without fur. Across conditions, after interacting with Aibo, participants felt Aibo was more trustworthy, intelligent, warm, and connected than at their initial meeting. This study shows the benefits of introducing a socially robotic agent with a particular frame and importance on realism (i.e., introducing the robot dog as a puppy) for more interactive engagement.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Marge, Matthew; Espy-Wilson, Carol; Ward, Nigel G.; Alwan, Abeer; Artzi, Yoav; Bansal, Mohit; Blankenship, Gil; Chai, Joyce; Daumé, Hal; Dey, Debadeepta; Harper, Mary; Howard, Thomas; Kennington, Casey; Kruijff-Korbayová, Ivana; Manocha, Dinesh; Matuszek, Cynthia; Mead, Ross; Mooney, Raymond; Moore, Roger K.; Ostendorf, Mari; Pon-Barry, Heather; Rudnicky, Alexander I.; Scheutz, Matthias; Amant, Robert St.; Sun, Tong; Tellex, Stefanie; Traum, David; Yu, Zhou
Spoken language interaction with robots: Recommendations for future research Journal Article
In: Computer Speech & Language, vol. 71, pp. 101255, 2022, ISSN: 08852308.
Links | BibTeX | Tags: ARL, Dialogue
@article{marge_spoken_2022,
title = {Spoken language interaction with robots: Recommendations for future research},
author = {Matthew Marge and Carol Espy-Wilson and Nigel G. Ward and Abeer Alwan and Yoav Artzi and Mohit Bansal and Gil Blankenship and Joyce Chai and Hal Daumé and Debadeepta Dey and Mary Harper and Thomas Howard and Casey Kennington and Ivana Kruijff-Korbayová and Dinesh Manocha and Cynthia Matuszek and Ross Mead and Raymond Mooney and Roger K. Moore and Mari Ostendorf and Heather Pon-Barry and Alexander I. Rudnicky and Matthias Scheutz and Robert St. Amant and Tong Sun and Stefanie Tellex and David Traum and Zhou Yu},
url = {https://linkinghub.elsevier.com/retrieve/pii/S0885230821000620},
doi = {10.1016/j.csl.2021.101255},
issn = {08852308},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-23},
journal = {Computer Speech & Language},
volume = {71},
pages = {101255},
keywords = {ARL, Dialogue},
pubstate = {published},
tppubtype = {article}
}
Chawla, Kushal; Lucas, Gale; May, Jonathan; Gratch, Jonathan
Opponent Modeling in Negotiation Dialogues by Related Data Adaptation Inproceedings
In: Findings of the Association for Computational Linguistics: NAACL 2022, pp. 661–674, Association for Computational Linguistics, Seattle, United States, 2022.
Links | BibTeX | Tags: DTIC, Social Simulation, UARC
@inproceedings{chawla_opponent_2022,
title = {Opponent Modeling in Negotiation Dialogues by Related Data Adaptation},
author = {Kushal Chawla and Gale Lucas and Jonathan May and Jonathan Gratch},
url = {https://aclanthology.org/2022.findings-naacl.50},
doi = {10.18653/v1/2022.findings-naacl.50},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-26},
booktitle = {Findings of the Association for Computational Linguistics: NAACL 2022},
pages = {661--674},
publisher = {Association for Computational Linguistics},
address = {Seattle, United States},
keywords = {DTIC, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Baarslag, Tim; Kaisers, Michael; Gerding, Enrico H.; Jonker, Catholijn M.; Gratch, Jonathan
In: Karagözoğlu, Emin; Hyndman, Kyle B. (Ed.): Bargaining, pp. 387–406, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-76665-8 978-3-030-76666-5.
Links | BibTeX | Tags: Virtual Humans
@incollection{baarslag_self-sufficient_2022,
title = {Self-sufficient, Self-directed, and Interdependent Negotiation Systems: A Roadmap Toward Autonomous Negotiation Agents},
author = {Tim Baarslag and Michael Kaisers and Enrico H. Gerding and Catholijn M. Jonker and Jonathan Gratch},
editor = {Emin Karagözoğlu and Kyle B. Hyndman},
url = {https://link.springer.com/10.1007/978-3-030-76666-5_18},
doi = {10.1007/978-3-030-76666-5_18},
isbn = {978-3-030-76665-8 978-3-030-76666-5},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-27},
booktitle = {Bargaining},
pages = {387--406},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Hou, Yu; Chen, Meida; Volk, Rebekka; Soibelman, Lucio
In: Journal of Building Engineering, vol. 45, pp. 103380, 2022, ISSN: 23527102.
Links | BibTeX | Tags: Graphics
@article{hou_investigation_2022,
title = {Investigation on performance of RGB point cloud and thermal information data fusion for 3D building thermal map modeling using aerial images under different experimental conditions},
author = {Yu Hou and Meida Chen and Rebekka Volk and Lucio Soibelman},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2352710221012389},
doi = {10.1016/j.jobe.2021.103380},
issn = {23527102},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
journal = {Journal of Building Engineering},
volume = {45},
pages = {103380},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Paun, Silviu; Artstein, Ron; Poesio, Massimo
Statistical Methods for Annotation Analysis Book
Springer International Publishing, Cham, 2022, ISBN: 978-3-031-03753-5 978-3-031-03763-4.
Links | BibTeX | Tags: AI, Natural Language
@book{paun_statistical_2022,
title = {Statistical Methods for Annotation Analysis},
author = {Silviu Paun and Ron Artstein and Massimo Poesio},
url = {https://link.springer.com/10.1007/978-3-031-03763-4},
doi = {10.1007/978-3-031-03763-4},
isbn = {978-3-031-03753-5 978-3-031-03763-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-28},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {AI, Natural Language},
pubstate = {published},
tppubtype = {book}
}
Aster, Hans-Christoph; Romanos, Marcel; Walitza, Susanne; Gerlach, Manfred; Mühlberger, Andreas; Rizzo, Albert; Andreatta, Marta; Hasenauer, Natalie; Hartrampf, Philipp E.; Nerlich, Kai; Reiners, Christoph; Lorenz, Reinhard; Buck, Andreas K.; Deserno, Lorenz
In: Frontiers in Psychiatry, vol. 13, 2022, ISSN: 1664-0640.
Abstract | Links | BibTeX | Tags: MedVR
@article{aster_responsivity_2022,
title = {Responsivity of the Striatal Dopamine System to Methylphenidate—A Within-Subject I-123-β-CIT-SPECT Study in Male Children and Adolescents With Attention-Deficit/Hyperactivity Disorder},
author = {Hans-Christoph Aster and Marcel Romanos and Susanne Walitza and Manfred Gerlach and Andreas Mühlberger and Albert Rizzo and Marta Andreatta and Natalie Hasenauer and Philipp E. Hartrampf and Kai Nerlich and Christoph Reiners and Reinhard Lorenz and Andreas K. Buck and Lorenz Deserno},
url = {https://www.frontiersin.org/articles/10.3389/fpsyt.2022.804730},
issn = {1664-0640},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Psychiatry},
volume = {13},
abstract = {Background:Methylphenidate (MPH) is the first-line pharmacological treatment of attention-deficit/hyperactivity disorder (ADHD). MPH binds to the dopamine (DA) transporter (DAT), which has high density in the striatum. Assessments of the striatal dopamine transporter by single positron emission computed tomography (SPECT) in childhood and adolescent patients are rare but can provide insight on how the effects of MPH affect DAT availability. The aim of our within-subject study was to investigate the effect of MPH on DAT availability and how responsivity to MPH in DAT availability is linked to clinical symptoms and cognitive functioning.MethodsThirteen adolescent male patients (9–16 years) with a diagnosis of ADHD according to the DSM-IV and long-term stimulant medication (for at least 6 months) with MPH were assessed twice within 7 days using SPECT after application of I-123-β-CIT to examine DAT binding potential (DAT BP). SPECT measures took place in an on- and off-MPH status balanced for order across participants. A virtual reality continuous performance test was performed at each time point. Further clinical symptoms were assessed for baseline off-MPH.ResultsOn-MPH status was associated with a highly significant change (−29.9%) of striatal DAT BP as compared to off-MPH (t = −4.12},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
DiNinni, Richard; Rizzo, Albert
Sensing Human Signals of Motivation Processes During STEM Tasks Inproceedings
In: Rodrigo, Maria Mercedes; Matsuda, Noburu; Cristea, Alexandra I.; Dimitrova, Vania (Ed.): Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium, pp. 163–167, Springer International Publishing, Cham, 2022, ISBN: 978-3-031-11647-6.
Abstract | Links | BibTeX | Tags: DTIC, Learning Sciences
@inproceedings{dininni_sensing_2022,
title = {Sensing Human Signals of Motivation Processes During STEM Tasks},
author = {Richard DiNinni and Albert Rizzo},
editor = {Maria Mercedes Rodrigo and Noburu Matsuda and Alexandra I. Cristea and Vania Dimitrova},
doi = {10.1007/978-3-031-11647-6_28},
isbn = {978-3-031-11647-6},
year = {2022},
date = {2022-01-01},
booktitle = {Artificial Intelligence in Education. Posters and Late Breaking Results, Workshops and Tutorials, Industry and Innovation Tracks, Practitioners’ and Doctoral Consortium},
pages = {163--167},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {This paper outlines the linking of a multi-modal sensing platform with an Intelligent Tutoring System to perceive the motivational state of the learner during STEM tasks. Motivation is a critical element to learning but receives little attention in comparison to strategies related to cognitive processes. The EMPOWER project has developed a novel platform that offers researchers an opportunity to capture a learner’s multi-modal behavioral signals to develop models of motivation problems that can be used to develop best practice strategies for instructional systems.},
keywords = {DTIC, Learning Sciences},
pubstate = {published},
tppubtype = {inproceedings}
}
Stokes, Jared D.; Rizzo, Albert; Geng, Joy J.; Schweitzer, Julie B.
Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking Journal Article
In: Frontiers in Virtual Reality, vol. 3, 2022, ISSN: 2673-4192.
Abstract | Links | BibTeX | Tags: MedVR, VR
@article{stokes_measuring_2022,
title = {Measuring Attentional Distraction in Children With ADHD Using Virtual Reality Technology With Eye-Tracking},
author = {Jared D. Stokes and Albert Rizzo and Joy J. Geng and Julie B. Schweitzer},
url = {https://www.frontiersin.org/articles/10.3389/frvir.2022.855895},
issn = {2673-4192},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
journal = {Frontiers in Virtual Reality},
volume = {3},
abstract = {Objective: Distractions inordinately impair attention in children with Attention-Deficit Hyperactivity Disorder (ADHD) but examining this behavior under real-life conditions poses a challenge for researchers and clinicians. Virtual reality (VR) technologies may mitigate the limitations of traditional laboratory methods by providing a more ecologically relevant experience. The use of eye-tracking measures to assess attentional functioning in a VR context in ADHD is novel. In this proof of principle project, we evaluate the temporal dynamics of distraction via eye-tracking measures in a VR classroom setting with 20 children diagnosed with ADHD between 8 and 12 years of age.Method: We recorded continuous eye movements while participants performed math, Stroop, and continuous performance test (CPT) tasks with a series of “real-world” classroom distractors presented. We analyzed the impact of the distractors on rates of on-task performance and on-task, eye-gaze (i.e., looking at a classroom whiteboard) versus off-task eye-gaze (i.e., looking away from the whiteboard).Results: We found that while children did not always look at distractors themselves for long periods of time, the presence of a distractor disrupted on-task gaze at task-relevant whiteboard stimuli and lowered rates of task performance. This suggests that children with attention deficits may have a hard time returning to tasks once those tasks are interrupted, even if the distractor itself does not hold attention. Eye-tracking measures within the VR context can reveal rich information about attentional disruption.Conclusions: Leveraging virtual reality technology in combination with eye-tracking measures is well-suited to advance the understanding of mechanisms underlying attentional impairment in naturalistic settings. Assessment within these immersive and well-controlled simulated environments provides new options for increasing our understanding of distractibility and its potential impact on the development of interventions for children with ADHD.},
keywords = {MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas Brett; Chinara, Chinmay
In: 2022.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, mixed reality, MR, virtual reality, VR
@inproceedings{brett_talbot_open_2022,
title = {Open Medical Gesture: An Open-Source Experiment in Naturalistic Physical Interactions for Mixed and Virtual Reality Simulations},
author = {Thomas Brett Talbot and Chinmay Chinara},
url = {https://openaccess.cms-conferences.org/#/publications/book/978-1-958651-26-1/article/978-1-958651-26-1_0},
doi = {10.54941/ahfe1002054},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-13},
abstract = {Mixed (MR) and Virtual Reality (VR) simulations are hampered by requirements for hand controllers or attempts to perseverate in use of two-dimensional computer interface paradigms from the 1980s. From our efforts to produce more naturalistic interactions for combat medic training for the military, we have developed an open-source toolkit that enables direct hand controlled responsive interactions that is sensor independent and can function with depth sensing cameras, webcams or sensory gloves. From this research and review of current literature, we have discerned several best approaches for hand-based human computer interactions which provide intuitive, responsive, useful, and low frustration experiences for VR users. The center of an effective gesture system is a universal hand model that can map to inputs from several different kinds of sensors rather than depending on a specific commercial product. Parts of the hand are effectors in simulation space with a physics-based model. Therefore, translational and rotational forces from the hands will impact physical objects in VR which varies based on the mass of the virtual objects. We incorporate computer code w/ objects, calling them “Smart Objects”, which allows such objects to have movement properties and collision detection for expected manipulation. Examples of smart objects include scissors, a ball, a turning knob, a moving lever, or a human figure with moving limbs. Articulation points contain collision detectors and code to assist in expected hand actions. We include a library of more than 40 Smart Objects in the toolkit. Thus, is it possible to throw a ball, hit that ball with a bat, cut a bandage, turn on a ventilator or to lift and inspect a human arm.We mediate the interaction of the hands with virtual objects. Hands often violate the rules of a virtual world simply by passing through objects. One must interpret user intent. This can be achieved by introducing stickiness of the hands to objects. If the human’s hands overshoot an object, we place the hand onto that object’s surface unless the hand passes the object by a significant distance. We also make hands and fingers contact an object according to the object’s contours and do not allow fingers to sink into the interior of an object. Haptics, or a sense of physical resistance and tactile sensation from contacting physical objects is a supremely difficult technical challenge and is an expensive pursuit. Our approach ignores true haptics, but we have experimented with an alternative approach, called audio tactile synesthesia where we substitute the sensation of touch for that of sound. The idea is to associate parts of each hand with a tone of a specific frequency upon contacting objects. The attack rate of the sound envelope varies with the velocity of contact and hardness of the object being ‘touched’. Such sounds can feel softer or harder depending on the nature of ‘touch’ being experienced. This substitution technique can provide tactile feedback through indirect, yet still naturalistic means. The artificial intelligence (AI) technique to determine discrete hand gestures and motions within the physical space is a special form of AI called Long Short Term Memory (LSTM). LSTM allows much faster and flexible recognition than other machine learning approaches. LSTM is particularly effective with points in motion. Latency of recognition is very low. In addition to LSTM, we employ other synthetic vision & object recognition AI to the discrimination of real-world objects. This allows for methods to conduct virtual simulations. For example, it is possible to pick up a virtual syringe and inject a medication into a virtual patient through hand motions. We track the hand points to contact with the virtual syringe. We also detect when the hand is compressing the syringe plunger. We could also use virtual medications & instruments on human actors or manikins, not just on virtual objects. With object recognition AI, we can place a syringe on a tray in the physical world. The human user can pick up the syringe and use it on a virtual patient. Thus, we are able to blend physical and virtual simulation together seamlessly in a highly intuitive and naturalistic manner.The techniques and technologies explained here represent a baseline capability whereby interacting in mixed and virtual reality can now be much more natural and intuitive than it has ever been. We have now passed a threshold where we can do away with game controllers and magnetic trackers for VR. This advancement will contribute to greater adoption of VR solutions. To foster this, our team has committed to freely sharing these technologies for all purposes and at no cost as an open-source tool. We encourage the scientific, research, educational and medical communities to adopt these resources and determine their effectiveness and utilize these tools and practices to grow the body of useful VR applications.},
keywords = {DTIC, MedVR, mixed reality, MR, virtual reality, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Matt; Nye, Benjamin; Sinatra, Gale; Swartout, William; Sjӧberg, Molly; Porter, Molly; Nelson, David; Kennedy, Alana; Herrick, Imogen; Weeks, Danaan DeNeve; Lindsey, Emily
Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits Journal Article
In: Palaeontol Electron, 2022, ISSN: 19353952, 10948074.
Links | BibTeX | Tags: AR, MxR, VR
@article{davis_designing_2022,
title = {Designing scientifically-grounded paleoart for augmented reality at La Brea Tar Pits},
author = {Matt Davis and Benjamin Nye and Gale Sinatra and William Swartout and Molly Sjӧberg and Molly Porter and David Nelson and Alana Kennedy and Imogen Herrick and Danaan DeNeve Weeks and Emily Lindsey},
url = {https://palaeo-electronica.org/content/2022/3524-la-brea-tar-pits-paleoart},
doi = {10.26879/1191},
issn = {19353952, 10948074},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-15},
journal = {Palaeontol Electron},
keywords = {AR, MxR, VR},
pubstate = {published},
tppubtype = {article}
}
Zhou, Jincheng; Ustun, Volkan
PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture Incollection
In: Goertzel, Ben; Iklé, Matthew; Potapov, Alexey (Ed.): Artificial General Intelligence, vol. 13154, pp. 355–366, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-93757-7 978-3-030-93758-4.
Links | BibTeX | Tags: CogArch, Cognitive Architecture, DTIC, UARC
@incollection{zhou_pysigma_2022,
title = {PySigma: Towards Enhanced Grand Unification for the Sigma Cognitive Architecture},
author = {Jincheng Zhou and Volkan Ustun},
editor = {Ben Goertzel and Matthew Iklé and Alexey Potapov},
url = {https://link.springer.com/10.1007/978-3-030-93758-4_36},
doi = {10.1007/978-3-030-93758-4_36},
isbn = {978-3-030-93757-7 978-3-030-93758-4},
year = {2022},
date = {2022-01-01},
urldate = {2022-09-21},
booktitle = {Artificial General Intelligence},
volume = {13154},
pages = {355--366},
publisher = {Springer International Publishing},
address = {Cham},
keywords = {CogArch, Cognitive Architecture, DTIC, UARC},
pubstate = {published},
tppubtype = {incollection}
}
2021
Tran, Minh; Bradley, Ellen; Matvey, Michelle; Woolley, Joshua; Soleymani, Mohammad
Modeling Dynamics of Facial Behavior for Mental Health Assessment Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–5, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{tran_modeling_2021,
title = {Modeling Dynamics of Facial Behavior for Mental Health Assessment},
author = {Minh Tran and Ellen Bradley and Michelle Matvey and Joshua Woolley and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9666955/},
doi = {10.1109/FG52635.2021.9666955},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--5},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yin, Yufeng; Lu, Liupei; Wu, Yizhen; Soleymani, Mohammad
Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection Inproceedings
In: 2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), pp. 1–8, IEEE, Jodhpur, India, 2021, ISBN: 978-1-66543-176-7.
Links | BibTeX | Tags: DTIC, Emotions, Virtual Humans
@inproceedings{yin_self-supervised_2021,
title = {Self-Supervised Patch Localization for Cross-Domain Facial Action Unit Detection},
author = {Yufeng Yin and Liupei Lu and Yizhen Wu and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9667048/},
doi = {10.1109/FG52635.2021.9667048},
isbn = {978-1-66543-176-7},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-23},
booktitle = {2021 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021)},
pages = {1--8},
publisher = {IEEE},
address = {Jodhpur, India},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ionescu, Alina; Daele, Tom Van; Rizzo, Albert; Blair, Carolyn; Best, Paul
360° Videos for Immersive Mental Health Interventions: a Systematic Review Journal Article
In: J. technol. behav. sci., vol. 6, no. 4, pp. 631–651, 2021, ISSN: 2366-5963.
Abstract | Links | BibTeX | Tags: MedVR, VR
@article{ionescu_360_2021,
title = {360° Videos for Immersive Mental Health Interventions: a Systematic Review},
author = {Alina Ionescu and Tom Van Daele and Albert Rizzo and Carolyn Blair and Paul Best},
url = {https://doi.org/10.1007/s41347-021-00221-7},
doi = {10.1007/s41347-021-00221-7},
issn = {2366-5963},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-13},
journal = {J. technol. behav. sci.},
volume = {6},
number = {4},
pages = {631--651},
abstract = {Virtual reality is increasingly recognized as a powerful method for clinical interventions in the mental health field, but has yet to achieve mainstream adoption in routine mental healthcare settings. A similar, yet slightly different technology, immersive 360° videos might have the potential to cover this gap, by requiring both lower costs and less technical skills to construct and operate such virtual environments. This systematic review therefore aims to identify, evaluate, and summarize mental health interventions using immersive 360° videos to support an understanding of their implementation in daily clinical practice. The quality of the 14 selected studies was evaluated using a critical appraisal tool, addressing populations with clinical levels of psychopathological symptoms, somatic conditions associated with psychological implications, and other at-risk groups. Immersive 360° videos successfully increased users’ feelings of presence, given their realistic features, and therefore yielded positive outcomes in clinical interventions where presence is considered as an essential precondition. Because the technical skills required to create immersive 360° video footage are fairly limited, most of the interventions using this approach have been created by mental health researchers or clinicians themselves. Immersive 360° videos are still in an early phase of implementation as a tool for clinical interventions for mental health, resulting in high heterogeneity in focus, procedures, and research designs. An important next step for making use of this technology may therefore involve the creation of standardized procedures, as a means to increase the quality of research and evidence-based interventions.},
keywords = {MedVR, VR},
pubstate = {published},
tppubtype = {article}
}
Liu, Lixing; Gurney, Nikolos; McCullough, Kyle; Ustun, Volkan
Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations Inproceedings
In: 2021 Winter Simulation Conference (WSC), pp. 1–12, IEEE, Phoenix, AZ, USA, 2021, ISBN: 978-1-66543-311-2.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, Virtual Humans
@inproceedings{liu_graph_2021,
title = {Graph Neural Network Based Behavior Prediction to Support Multi-Agent Reinforcement Learning in Military Training Simulations},
author = {Lixing Liu and Nikolos Gurney and Kyle McCullough and Volkan Ustun},
url = {https://ieeexplore.ieee.org/document/9715433/},
doi = {10.1109/WSC52266.2021.9715433},
isbn = {978-1-66543-311-2},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-21},
booktitle = {2021 Winter Simulation Conference (WSC)},
pages = {1--12},
publisher = {IEEE},
address = {Phoenix, AZ, USA},
keywords = {DTIC, Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Jiaman; Villegas, Ruben; Ceylan, Duygu; Yang, Jimei; Kuang, Zhengfei; Li, Hao; Zhao, Yajie
Task-Generic Hierarchical Human Motion Prior using VAEs Inproceedings
In: 2021 International Conference on 3D Vision (3DV), pp. 771–781, IEEE, London, United Kingdom, 2021, ISBN: 978-1-66542-688-6.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{li_task-generic_2021,
title = {Task-Generic Hierarchical Human Motion Prior using VAEs},
author = {Jiaman Li and Ruben Villegas and Duygu Ceylan and Jimei Yang and Zhengfei Kuang and Hao Li and Yajie Zhao},
url = {https://ieeexplore.ieee.org/document/9665881/},
doi = {10.1109/3DV53792.2021.00086},
isbn = {978-1-66542-688-6},
year = {2021},
date = {2021-12-01},
urldate = {2022-09-22},
booktitle = {2021 International Conference on 3D Vision (3DV)},
pages = {771--781},
publisher = {IEEE},
address = {London, United Kingdom},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Awada, Mohamad; Becerik-Gerber, Burcin; Lucas, Gale; Roll, Shawn C.
Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic Journal Article
In: ASME Journal of Engineering for Sustainable Buildings and Cities, vol. 2, no. 4, pp. 041001, 2021, ISSN: 2642-6641, 2642-6625.
Abstract | Links | BibTeX | Tags: DTIC, UARC
@article{awada_associations_2021,
title = {Associations Among Home Indoor Environmental Quality Factors and Worker Health While Working From Home During COVID-19 Pandemic},
author = {Mohamad Awada and Burcin Becerik-Gerber and Gale Lucas and Shawn C. Roll},
url = {https://asmedigitalcollection.asme.org/sustainablebuildings/article/2/4/041001/1122847/Associations-Among-Home-Indoor-Environmental},
doi = {10.1115/1.4052822},
issn = {2642-6641, 2642-6625},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-26},
journal = {ASME Journal of Engineering for Sustainable Buildings and Cities},
volume = {2},
number = {4},
pages = {041001},
abstract = {Abstract The outbreak of SARS-CoV-2 virus forced office workers to conduct their daily work activities from home over an extended period. Given this unique situation, an opportunity emerged to study the satisfaction of office workers with indoor environmental quality (IEQ) factors of their houses where work activities took place and associate these factors with mental and physical health. We designed and administered a questionnaire that was open for 45 days during the COVID-19 pandemic and received valid data from 988 respondents. The results show that low satisfaction with natural lighting, glare, and humidity predicted eye-related symptoms, while low satisfaction with noise was a strong predictor of fatigue or tiredness, headaches or migraines, anxiety, and depression or sadness. Nose- and throat-related symptoms and skin-related symptoms were only uniquely predicted by low satisfaction with humidity. Low satisfaction with glare uniquely predicted an increase in musculoskeletal discomfort. Symptoms related to mental stress, rumination, or worry were predicted by low satisfaction with air quality and noise. Finally, low satisfaction with noise and indoor temperature predicted the prevalence of symptoms related to trouble concentrating, maintaining attention, or focus. Workers with higher income were more satisfied with humidity, air quality, and indoor temperature and had better overall mental health. Older individuals had increased satisfaction with natural lighting, humidity, air quality, noise, and indoor temperature. Findings from this study can inform future design practices that focus on hybrid home-work environments by highlighting the impact of IEQ factors on occupant well-being.},
keywords = {DTIC, UARC},
pubstate = {published},
tppubtype = {article}
}
We've Entered a New Era of Streaming Health Care. Now What? Journal Article
In: IEEE Spectrum, 2021.
Abstract | Links | BibTeX | Tags: CBC
@article{noauthor_weve_2021,
title = {We've Entered a New Era of Streaming Health Care. Now What?},
url = {https://spectrum.ieee.org/digital-health},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Spectrum},
abstract = {COVID-19 forced the transition to digital medicine, but there's much still to do.},
keywords = {CBC},
pubstate = {published},
tppubtype = {article}
}
Schuller, Bjorn W.; Picard, Rosalind; Andre, Elisabeth; Gratch, Jonathan; Tao, Jianhua
Intelligent Signal Processing for Affective Computing [From the Guest Editors] Journal Article
In: IEEE Signal Process. Mag., vol. 38, no. 6, pp. 9–11, 2021, ISSN: 1053-5888, 1558-0792.
Links | BibTeX | Tags: Emotions, Virtual Humans
@article{schuller_intelligent_2021,
title = {Intelligent Signal Processing for Affective Computing [From the Guest Editors]},
author = {Bjorn W. Schuller and Rosalind Picard and Elisabeth Andre and Jonathan Gratch and Jianhua Tao},
url = {https://ieeexplore.ieee.org/document/9591500/},
doi = {10.1109/MSP.2021.3096415},
issn = {1053-5888, 1558-0792},
year = {2021},
date = {2021-11-01},
urldate = {2022-09-29},
journal = {IEEE Signal Process. Mag.},
volume = {38},
number = {6},
pages = {9--11},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Leeds, Andrew; Kwok, David; Fast, Ed; Rizzo, Albert Skip; Hartholt, Arno
Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy Inproceedings
In: 2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 247–250, 2021.
Abstract | Links | BibTeX | Tags: DTIC, MedVR, VR
@inproceedings{mozgai_building_2021,
title = {Building BRAVEMIND Vietnam: User-Centered Design for Virtual Reality Exposure Therapy},
author = {Sharon Mozgai and Andrew Leeds and David Kwok and Ed Fast and Albert Skip Rizzo and Arno Hartholt},
doi = {10.1109/AIVR52153.2021.00056},
year = {2021},
date = {2021-11-01},
booktitle = {2021 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {247--250},
abstract = {There has been a recent resurgence of Vietnam-era Veterans seeking Veteran Affairs (VA) services for post-traumatic stress disorder (PTSD). Multiple studies have reported positive outcomes using Virtual Reality Exposure Therapy (VRET) with Iraq/Afghanistan Veteran PTSD patients, but there have been fewer investigations into the acceptability and feasibility of VRET with older Veterans. We have extended an established VRET system, BRAVEMIND, to include Vietnam relevant content following a user-centered iterative design methodology. This paper will present the BRAVEMIND Vietnam VRET system, including setup, content, and technical architecture along with the findings of an initial focus group with Vietnam Veterans. These Veterans rated this system to be acceptable as a treatment tool for combat-related PTSD and provided valuable first-person accounts of their time in Vietnam to help guide the evolution of the VR content.},
keywords = {DTIC, MedVR, VR},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; McCullough, Kyle; Fast, Ed; Leeds, Andrew; Mozgai, Sharon; Aris, Tim; Ustun, Volkan; Gordon, Andrew; McGroarty, Christopher
Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE) Inproceedings
In: 2021.
BibTeX | Tags: AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC
@inproceedings{hartholt_rapid_2021,
title = {Rapid Prototyping for Simulation and Training with the Rapid Integration & Development Environment (RIDE)},
author = {Arno Hartholt and Kyle McCullough and Ed Fast and Andrew Leeds and Sharon Mozgai and Tim Aris and Volkan Ustun and Andrew Gordon and Christopher McGroarty},
year = {2021},
date = {2021-11-01},
keywords = {AI, DTIC, Integration Technology, Machine Learning, Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiang, Sitao; Gu, Yuming; Xiang, Pengda; Chai, Menglei; Li, Hao; Zhao, Yajie; He, Mingming
DisUnknown: Distilling Unknown Factors for Disentanglement Learning Inproceedings
In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14790–14799, IEEE, Montreal, QC, Canada, 2021, ISBN: 978-1-66542-812-5.
Links | BibTeX | Tags: DTIC, UARC, VGL
@inproceedings{xiang_disunknown_2021,
title = {DisUnknown: Distilling Unknown Factors for Disentanglement Learning},
author = {Sitao Xiang and Yuming Gu and Pengda Xiang and Menglei Chai and Hao Li and Yajie Zhao and Mingming He},
url = {https://ieeexplore.ieee.org/document/9709965/},
doi = {10.1109/ICCV48922.2021.01454},
isbn = {978-1-66542-812-5},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
pages = {14790--14799},
publisher = {IEEE},
address = {Montreal, QC, Canada},
keywords = {DTIC, UARC, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Kontogiorgos, Dimosthenis; Tran, Minh; Gustafson, Joakim; Soleymani, Mohammad
A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures Inproceedings
In: Proceedings of the 2021 International Conference on Multimodal Interaction, pp. 112–120, ACM, Montréal QC Canada, 2021, ISBN: 978-1-4503-8481-0.
Links | BibTeX | Tags: DTIC, Emotions, UARC, Virtual Humans
@inproceedings{kontogiorgos_systematic_2021,
title = {A Systematic Cross-Corpus Analysis of Human Reactions to Robot Conversational Failures},
author = {Dimosthenis Kontogiorgos and Minh Tran and Joakim Gustafson and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3462244.3479887},
doi = {10.1145/3462244.3479887},
isbn = {978-1-4503-8481-0},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-23},
booktitle = {Proceedings of the 2021 International Conference on Multimodal Interaction},
pages = {112--120},
publisher = {ACM},
address = {Montréal QC Canada},
keywords = {DTIC, Emotions, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Adami, Pooya; Rodrigues, Patrick B.; Woods, Peter J.; Becerik-Gerber, Burcin; Soibelman, Lucio; Copur-Gencturk, Yasemin; Lucas, Gale
Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation Journal Article
In: Advanced Engineering Informatics, vol. 50, pp. 101431, 2021, ISSN: 14740346.
Links | BibTeX | Tags: DTIC, Learning Sciences, UARC, VR
@article{adami_effectiveness_2021,
title = {Effectiveness of VR-based training on improving construction workers’ knowledge, skills, and safety behavior in robotic teleoperation},
author = {Pooya Adami and Patrick B. Rodrigues and Peter J. Woods and Burcin Becerik-Gerber and Lucio Soibelman and Yasemin Copur-Gencturk and Gale Lucas},
url = {https://linkinghub.elsevier.com/retrieve/pii/S147403462100183X},
doi = {10.1016/j.aei.2021.101431},
issn = {14740346},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-26},
journal = {Advanced Engineering Informatics},
volume = {50},
pages = {101431},
keywords = {DTIC, Learning Sciences, UARC, VR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Lucas, Gale M.
Emotionally resonant media Incollection
In: Routledge International Handbook of Emotions and Media, pp. 285–302, Routledge, London, 2021, ISBN: 978-0-429-46575-8.
Links | BibTeX | Tags: Emotions
@incollection{gratch_emotionally_2021,
title = {Emotionally resonant media},
author = {Jonathan Gratch and Gale M. Lucas},
url = {https://www.taylorfrancis.com/books/9780429465758/chapters/10.4324/9780429465758-18},
doi = {10.4324/9780429465758-18},
isbn = {978-0-429-46575-8},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
booktitle = {Routledge International Handbook of Emotions and Media},
pages = {285--302},
publisher = {Routledge},
address = {London},
edition = {2},
keywords = {Emotions},
pubstate = {published},
tppubtype = {incollection}
}
Hou, Yu; Chen, Meida; Volk, Rebekka; Soibelman, Lucio
An Approach to Semantically Segmenting Building Components and Outdoor Scenes Based on Multichannel Aerial Imagery Datasets Journal Article
In: Remote Sensing, vol. 13, no. 21, pp. 4357, 2021, ISSN: 2072-4292.
Abstract | Links | BibTeX | Tags:
@article{hou_approach_2021,
title = {An Approach to Semantically Segmenting Building Components and Outdoor Scenes Based on Multichannel Aerial Imagery Datasets},
author = {Yu Hou and Meida Chen and Rebekka Volk and Lucio Soibelman},
url = {https://www.mdpi.com/2072-4292/13/21/4357},
doi = {10.3390/rs13214357},
issn = {2072-4292},
year = {2021},
date = {2021-10-01},
urldate = {2022-09-28},
journal = {Remote Sensing},
volume = {13},
number = {21},
pages = {4357},
abstract = {As-is building modeling plays an important role in energy audits and retrofits. However, in order to understand the source(s) of energy loss, researchers must know the semantic information of the buildings and outdoor scenes. Thermal information can potentially be used to distinguish objects that have similar surface colors but are composed of different materials. To utilize both the red–green–blue (RGB) color model and thermal information for the semantic segmentation of buildings and outdoor scenes, we deployed and adapted various pioneering deep convolutional neural network (DCNN) tools that combine RGB information with thermal information to improve the semantic and instance segmentation processes. When both types of information are available, the resulting DCNN models allow us to achieve better segmentation performance. By deploying three case studies, we experimented with our proposed DCNN framework, deploying datasets of building components and outdoor scenes, and testing the models to determine whether the segmentation performance had improved or not. In our observation, the fusion of RGB and thermal information can help the segmentation task in specific cases, but it might also make the neural networks hard to train or deteriorate their prediction performance in some cases. Additionally, different algorithms perform differently in semantic and instance segmentation.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}