Publications
Search
Pallavicini, Federica; Giglioli, Irene Alice Chicchi; Kim, Gerard Jounghyun; Alcañiz, Mariano; Rizzo, Albert
Editorial: Virtual Reality, Augmented Reality and Video Games for Addressing the Impact of COVID-19 on Mental Health Journal Article
In: Frontiers in Virtual Reality, vol. 2, 2021, ISSN: 2673-4192.
@article{pallavicini_editorial_2021,
title = {Editorial: Virtual Reality, Augmented Reality and Video Games for Addressing the Impact of COVID-19 on Mental Health},
author = {Federica Pallavicini and Irene Alice Chicchi Giglioli and Gerard Jounghyun Kim and Mariano Alcañiz and Albert Rizzo},
url = {https://www.frontiersin.org/articles/10.3389/frvir.2021.719358},
issn = {2673-4192},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
journal = {Frontiers in Virtual Reality},
volume = {2},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Core, Mark G.; Jaiswa, Shikhar; Ghosal, Aviroop; Auerbach, Daniel
Acting Engaged: Leveraging Play Persona Archetypes for Semi-Supervised Classification of Engagement Technical Report
International Educational Data Mining Society 2021, (Publication Title: International Educational Data Mining Society ERIC Number: ED615498).
@techreport{nye_acting_2021,
title = {Acting Engaged: Leveraging Play Persona Archetypes for Semi-Supervised Classification of Engagement},
author = {Benjamin D. Nye and Mark G. Core and Shikhar Jaiswa and Aviroop Ghosal and Daniel Auerbach},
url = {https://eric.ed.gov/?id=ED615498},
year = {2021},
date = {2021-01-01},
urldate = {2023-03-31},
institution = {International Educational Data Mining Society},
abstract = {Engaged and disengaged behaviors have been studied across a variety of educational contexts. However, tools to analyze engagement typically require custom-coding and calibration for a system. This limits engagement detection to systems where experts are available to study patterns and build detectors. This work studies a new approach to classify engagement patterns without expert input, by using a play persona methodology where labeled archetype data is generated by novice testers acting out different engagement patterns in a system. Domain-agnostic task features (e.g., response time to an activity, scores/correctness, task difficulty) are extracted from standardized data logs for both archetype and authentic user sessions. A semi-supervised methodology was used to label engagement; bottom-up clusters were combined with archetype data to build a classifier. This approach was analyzed with a focus on cold-start performance on small samples, using two metrics: consistency with larger full-sample cluster assignments and stability of points staying in the same cluster once assigned. These were compared against a baseline of clustering without an incrementally trained classifier. Findings on a data set from a branching multiple-choice scenario-based tutoring system indicated that approximately 52 unlabeled samples and 51 play-test labeled samples were sufficient to classify holdout sessions at 85% consistency with a full set of 145 unsupervised samples. Additionally, alignment to play persona samples for the full set matched expert labels for clusters. Use-cases and limitations of this approach are discussed. [For the full proceedings, see ED615472.]},
note = {Publication Title: International Educational Data Mining Society
ERIC Number: ED615498},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Nye, Benjamin D.; Core, Mark G.; Ghosal, Aviroop; Walker, Peter B.
Metrics for Engagement in Games and Simulations for Learning Book Section
In: Using Cognitive and Affective Metrics in Educational Simulations and Games, Routledge, 2021, (Num Pages: 24).
@incollection{nye_metrics_2021,
title = {Metrics for Engagement in Games and Simulations for Learning},
author = {Benjamin D. Nye and Mark G. Core and Aviroop Ghosal and Peter B. Walker},
url = {https://www.taylorfrancis.com/chapters/edit/10.4324/9780429282201-5/metrics-engagement-games-simulations-learning-benjamin-nye-mark-core-aviroop-ghosal-peter-walker},
year = {2021},
date = {2021-01-01},
booktitle = {Using Cognitive and Affective Metrics in Educational Simulations and Games},
publisher = {Routledge},
abstract = {Games and simulations can be more engaging than other educational tools (e.g., textbooks, videos, problem sets), and this engagement can lead to improved short- and long-term learning. However, engagement in game-based learning is not automatic, and instead requires iterative design. In this work, we explore and compare metrics from research on learning sciences and from game design, considering different time scales of human action, ranging from biological engagement (e.g., eye gaze) up to lasting social ties (e.g., community building). Certain game-design approaches used for commercial games may be useful for game-based learning, such as establishing bottom-line metrics aligned to why the game was built or analyzing engagement in terms of facets or archetypes rather than on a unidirectional scale. Further research is required to study the interaction between engagement at different time scales, particularly for cases where higher long-term engagement is indicated by lower short-term engagement (e.g., skipping easy content).},
note = {Num Pages: 24},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chen, Meida; Feng, Andrew; McCullough, Kyle; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio
3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework Journal Article
In: Journal of Computing in Civil Engineering, vol. 34, no. 6, 2020, ISSN: 0887-3801, 1943-5487.
@article{chen_3d_2020,
title = {3D Photogrammetry Point Cloud Segmentation Using a Model Ensembling Framework},
author = {Meida Chen and Andrew Feng and Kyle McCullough and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29CP.1943-5487.0000929},
doi = {10.1061/(ASCE)CP.1943-5487.0000929},
issn = {0887-3801, 1943-5487},
year = {2020},
date = {2020-11-01},
journal = {Journal of Computing in Civil Engineering},
volume = {34},
number = {6},
abstract = {The US Army is paying increased attention to the development of rapid three-dimensional (3D) reconstruction using photogrammetry and unmanned aerial vehicle (UAV) technologies for creating virtual environments and simulations in areas of interest. The ability of the intelligence community, mission commanders, and front-line soldiers to understand their deployed physical environment in advance is critical in the planning and rehearsal phases of any military operation. In order to achieve various simulation capabilities such as destruction operations, route planning, and explosive-standoff distances computation among others, reconstructed 3D data needs to be properly attributed. In this paper, we introduce a model ensembling framework for segmenting a 3D photogrammetry point cloud into top-level terrain elements (i.e., ground, human-made objects, and vegetation). Preprocessing and postprocessing methods were designed to overcome the data segmentation challenges posed by photogrammetric data-quality issues. A large UAV-based photogrammetric database was created for validation purposes. The designed model ensembling framework was compared with existing point cloud segmentation algorithms, and it outperformed other algorithms and achieved the best F1-score. Because the ultimate goal of segmenting a photogrammetric-generated point cloud is to create realistic virtual environments for simulation. Qualitative results for creating virtual environments using the segmented data are also discussed in this paper. DOI: 10.1061/(ASCE)CP.1943-5487.0000929. © 2020 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Skoll, Devin; Miller, Jennifer C.; Saxon, Leslie A.
COVID-19 testing and infection surveillance: Is a combined digital contact-tracing and mass-testing solution feasible in the United States? Journal Article
In: Cardiovascular Digital Health Journal, vol. 1, no. 3, pp. 149–159, 2020, ISSN: 2666-6936.
@article{skoll_covid-19_2020,
title = {COVID-19 testing and infection surveillance: Is a combined digital contact-tracing and mass-testing solution feasible in the United States?},
author = {Devin Skoll and Jennifer C. Miller and Leslie A. Saxon},
url = {https://www.sciencedirect.com/science/article/pii/S2666693620300360},
doi = {10.1016/j.cvdhj.2020.09.004},
issn = {2666-6936},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Cardiovascular Digital Health Journal},
volume = {1},
number = {3},
pages = {149–159},
abstract = {Background
In December 2019, the novel COVID-19 virus spread from a cluster of pneumonia cases in Wuhan, China, to every corner of the globe, creating a worldwide pandemic pushing hospital systems past capacity and bringing economies worldwide to a halt. The COVID-19 pandemic is unique in comparison to prior coronavirus epidemics in its superior ability to be spread by asymptomatic and presymptomatic patients, allowing the virus to silently evade traditional symptoms-based screening approaches. Countries have implemented cutting-edge digital solutions to enhance traditional contact-tracing methodologies in combination with novel testing strategies to combat the virus, with variable levels of success. Despite having one of the most advanced and expensive health care systems in the world, the United States (U.S.) response is arguably one of the world’s largest failures, as it leads the globe in case number as well as deaths. Until a successful vaccine can be broadly distributed, it is imperative that the U.S. curb the viral spread by rapidly developing a framework implementing both enhanced tracing and testing strategies balancing the needs of public health while respecting individual liberties. This review will explore the role of technology-augmented contact-based surveillance in tracking the outbreak in select countries in comparison to the current U.S. approach. It will evaluate barriers in the U.S. to implementing similar technologies, focusing on privacy concerns and a lack of unified testing and tracing strategy. Finally, it will explore strategies for rapidly scaling testing in a cost-effective manner.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
In December 2019, the novel COVID-19 virus spread from a cluster of pneumonia cases in Wuhan, China, to every corner of the globe, creating a worldwide pandemic pushing hospital systems past capacity and bringing economies worldwide to a halt. The COVID-19 pandemic is unique in comparison to prior coronavirus epidemics in its superior ability to be spread by asymptomatic and presymptomatic patients, allowing the virus to silently evade traditional symptoms-based screening approaches. Countries have implemented cutting-edge digital solutions to enhance traditional contact-tracing methodologies in combination with novel testing strategies to combat the virus, with variable levels of success. Despite having one of the most advanced and expensive health care systems in the world, the United States (U.S.) response is arguably one of the world’s largest failures, as it leads the globe in case number as well as deaths. Until a successful vaccine can be broadly distributed, it is imperative that the U.S. curb the viral spread by rapidly developing a framework implementing both enhanced tracing and testing strategies balancing the needs of public health while respecting individual liberties. This review will explore the role of technology-augmented contact-based surveillance in tracking the outbreak in select countries in comparison to the current U.S. approach. It will evaluate barriers in the U.S. to implementing similar technologies, focusing on privacy concerns and a lack of unified testing and tracing strategy. Finally, it will explore strategies for rapidly scaling testing in a cost-effective manner.
Miller, Jennifer C; Barrett, Trevor; Patel, Neil; Souza, Andrew; Wood, John; Saxon, Leslie A
In: Circulation, vol. 142, no. Suppl_3, pp. A15845–A15845, 2020, (Publisher: American Heart Association).
@article{miller_heart_2020,
title = {Heart Heroes: A Gamified Mhealth Platform to Measure Continuous Cardiac Health Data in the Outpatient Setting of Adolescent Patients With Known and Suspected Heart Disease},
author = {Jennifer C Miller and Trevor Barrett and Neil Patel and Andrew Souza and John Wood and Leslie A Saxon},
url = {https://www.ahajournals.org/doi/abs/10.1161/circ.142.suppl_3.15845},
doi = {10.1161/circ.142.suppl_3.15845},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Circulation},
volume = {142},
number = {Suppl_3},
pages = {A15845–A15845},
abstract = {Introduction: Adolescents with heart disease report difficulty in communication about their health as a major inhibiting factor in their care. MHealth technologies collect health data in daily life and enable health data sharing between the provider and patient. The adolescent population has a high level of engagement with mobile devices and a willingness to use them for health-related activities.
Hypothesis: We hypothesized that our novel gamified mHealth platform Heart Hero can engage adolescent patients in the collection of cardiac health data in their daily life.
Methods: We designed the research app using ResearchKit to collect continuous physiological data from the Apple Watch and daily survey data on well-being, stress, medical adherence, and cardiac symptoms. Patients were provided the app, iPhone, and Apple Watch and enrolled for 27 days. A final in-app survey was provided to assess feedback. We enrolled 28 patients total who were scheduled for outpatient cardiopulmonary exercise testing.
Results: Mean age was 14.3 years old (SD +/-3.08) with a 1:1 M:F ratio. 61% of patients were ≥ 15 years of age. 94% of patients completed the final survey. Subjects on average completed 64% (SEM +/-5) of the daily quizzes with an average daily adherence of over 50% wearing the watch. 100% reported they liked using the watch and app, and 89% would like to continue wearing the Apple Watch. 53% reported the study encouraged them to exercise more while 21% reported encouragement to walk more. Fig 1 demonstrates A) Apple Watch and b) daily survey data collected from a patient throughout the study.
Conclusions: In conclusion, Heart Hero is a mHealth platform which can successfully be used to collect continuous health data from the adolescent population with high engagement characterized by adherence and positive patient feedback. Adherence to the app was notably superior to the initial 5 ResearchKit applications enrolling adult patients.
Download figure},
note = {Publisher: American Heart Association},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hypothesis: We hypothesized that our novel gamified mHealth platform Heart Hero can engage adolescent patients in the collection of cardiac health data in their daily life.
Methods: We designed the research app using ResearchKit to collect continuous physiological data from the Apple Watch and daily survey data on well-being, stress, medical adherence, and cardiac symptoms. Patients were provided the app, iPhone, and Apple Watch and enrolled for 27 days. A final in-app survey was provided to assess feedback. We enrolled 28 patients total who were scheduled for outpatient cardiopulmonary exercise testing.
Results: Mean age was 14.3 years old (SD +/-3.08) with a 1:1 M:F ratio. 61% of patients were ≥ 15 years of age. 94% of patients completed the final survey. Subjects on average completed 64% (SEM +/-5) of the daily quizzes with an average daily adherence of over 50% wearing the watch. 100% reported they liked using the watch and app, and 89% would like to continue wearing the Apple Watch. 53% reported the study encouraged them to exercise more while 21% reported encouragement to walk more. Fig 1 demonstrates A) Apple Watch and b) daily survey data collected from a patient throughout the study.
Conclusions: In conclusion, Heart Hero is a mHealth platform which can successfully be used to collect continuous health data from the adolescent population with high engagement characterized by adherence and positive patient feedback. Adherence to the app was notably superior to the initial 5 ResearchKit applications enrolling adult patients.
Download figure
Miller, Jennifer C.; Skoll, Devin; Saxon, Leslie A.
Home Monitoring of Cardiac Devices in the Era of COVID-19 Journal Article
In: Curr Cardiol Rep, vol. 23, no. 1, pp. 1, 2020, ISSN: 1534-3170.
@article{miller_home_2020,
title = {Home Monitoring of Cardiac Devices in the Era of COVID-19},
author = {Jennifer C. Miller and Devin Skoll and Leslie A. Saxon},
url = {https://doi.org/10.1007/s11886-020-01431-w},
doi = {10.1007/s11886-020-01431-w},
issn = {1534-3170},
year = {2020},
date = {2020-11-01},
urldate = {2023-03-31},
journal = {Curr Cardiol Rep},
volume = {23},
number = {1},
pages = {1},
abstract = {Despite the promise of remote patient monitoring (RPM), this technology remained underutilized secondary to a lack of data transparency and systems issues until the COVID-19 pandemic ushered in a new era of telehealth and virtual solutions out of necessity. This review will explore the data supporting the use of RPM via both implantable and wearable devices in the field of cardiology and the role of home monitoring using RPM in the era of COVID-19.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Traum, David
Masheli: A Choctaw-English bilingual chatbot Book Section
In: Conversational Dialogue Systems for the Next Decade, pp. 41–50, Springer, Switzerland, 2020.
@incollection{brixey_masheli_2020,
title = {Masheli: A Choctaw-English bilingual chatbot},
author = {Jacqueline Brixey and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-8395-7_4},
year = {2020},
date = {2020-10-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
pages = {41–50},
publisher = {Springer},
address = {Switzerland},
abstract = {We present the implementation of an autonomous Choctaw-English bilingual chatbot. Choctaw is an American indigenous language. The intended use of the chatbot is for Choctaw language learners to pratice conversational skills. The system’s backend is NPCEditor, a response selection program that is trained on linked questions and answers. The chatbot’s answers are stories and conversational utterances in both languages. We experiment with the ability of NPCEditor to appropriately respond to language mixed utterances, and describe a pilot study with Choctaw-English speakers.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pacheco, Luz; Merchant, Chirag; Skistad, Kristian; Jethwani, Aayushi
The Design of Charismatic Behaviors for Virtual Humans Proceedings Article
In: Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents, pp. 1–8, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7586-3.
@inproceedings{wang_design_2020,
title = {The Design of Charismatic Behaviors for Virtual Humans},
author = {Ning Wang and Luz Pacheco and Chirag Merchant and Kristian Skistad and Aayushi Jethwani},
url = {https://doi.org/10.1145/3383652.3423867},
doi = {10.1145/3383652.3423867},
isbn = {978-1-4503-7586-3},
year = {2020},
date = {2020-10-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 20th ACM International Conference on Intelligent Virtual Agents},
pages = {1–8},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IVA '20},
abstract = {Charisma is a powerful device of communication and persuasion. Researchers have pinpointed specific behaviors that contribute to the perception of charisma. How can we realize such behaviors in a virtual character? In this paper, we discuss our work in the design of charismatic behavior for a virtual human. We developed a series of verbal and nonverbal (with the focus on voice) charismatic strategies based on the analysis of behaviors of charismatic leaders. We developed scripted speech dialogues with the verbal strategies and recorded the speeches with actors using the nonverbal strategies. The dialogue is further implemented in a virtual human, embedded in a virtual classroom, to give a lecture on the human circulatory system. We conducted a study with the virtual human to assess the impact of charismatic verbal and nonverbal behaviors on perceived charisma. The results show the positive impact of the use of verbal strategies and how the use of voice can influence such impact. The results shed light on the next steps needed to automatically generate charismatic speech, voices, and gestures for virtual characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents Journal Article
In: International Journal of Semantic Computing, vol. 14, no. 03, pp. 315–332, 2020, ISSN: 1793-351X, 1793-7108.
@article{hartholt_multi-platform_2020,
title = {Multi-Platform Expansion of the Virtual Human Toolkit: Ubiquitous Conversational Agents},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://www.worldscientific.com/doi/abs/10.1142/S1793351X20400127},
doi = {10.1142/S1793351X20400127},
issn = {1793-351X, 1793-7108},
year = {2020},
date = {2020-09-01},
journal = {International Journal of Semantic Computing},
volume = {14},
number = {03},
pages = {315–332},
abstract = {We present an extension of the Virtual Human Toolkit to include a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The Toolkit uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation and rendering. It has been extended to support computing platforms beyond Windows by leveraging microservices. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in room-scale VR, autonomous AI in mobile AR, and real-time user performance feedback leveraging mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Carla; Georgila, Kallirroi; Yanov, Volodymyr; Traum, David
Towards Personalization of Spoken Dialogue System Communication Strategies Book Section
In: Conversational Dialogue Systems for the Next Decade, vol. 704, pp. 145–160, Springer Singapore, Singapore, 2020, ISBN: 9789811583940 9789811583957.
@incollection{gordon_towards_2020,
title = {Towards Personalization of Spoken Dialogue System Communication Strategies},
author = {Carla Gordon and Kallirroi Georgila and Volodymyr Yanov and David Traum},
url = {http://link.springer.com/10.1007/978-981-15-8395-7_11},
isbn = {9789811583940 9789811583957},
year = {2020},
date = {2020-09-01},
booktitle = {Conversational Dialogue Systems for the Next Decade},
volume = {704},
pages = {145–160},
publisher = {Springer Singapore},
address = {Singapore},
abstract = {This study examines the effects of 3 conversational traits – Register, Explicitness, and Misunderstandings – on user satisfaction and the perception of specific subjective features for Virtual Home Assistant spoken dialogue systems. Eight different system profiles were created, each representing a different combination of these 3 traits. We then utilized a novel Wizard of Oz data collection tool and recruited participants who interacted with the 8 different system profiles, and then rated the systems on 7 subjective features. Surprisingly, we found that systems which made errors were preferred overall, with the statistical analysis revealing error-prone systems were rated higher than systems which made no errors for all 7 of the subjective features rated. There were also some interesting interaction effects between the 3 conversational traits, such as implicit confirmations being preferred for systems employing a “conversational” Register, while explicit confirmations were preferred for systems employing a “formal” Register, even though there was no overall main effect for Explicitness. This experimental framework offers a fine-grained approach to the evaluation of user satisfaction which looks towards the personalization of communication strategies for spoken dialogue systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Feng, Andrew; Gordon, Andrew S.
Recognizing Multiplayer Behaviors Using Synthetic Training Data Proceedings Article
In: 2020 IEEE Conference on Games (CoG), pp. 463–470, 2020, (ISSN: 2325-4289).
@inproceedings{feng_recognizing_2020,
title = {Recognizing Multiplayer Behaviors Using Synthetic Training Data},
author = {Andrew Feng and Andrew S. Gordon},
doi = {10.1109/CoG47356.2020.9231742},
year = {2020},
date = {2020-08-01},
booktitle = {2020 IEEE Conference on Games (CoG)},
pages = {463–470},
abstract = {Accurate recognition of group behaviors is essential to the design of engaging networked multiplayer games. However, contemporary data-driven machine learning solutions are difficult to apply during the game development process, given that no authentic gameplay data is yet available for use as training data. In this paper, we investigate the use of synthetic training data, i.e., gameplay data that is generated by AI-controlled agent teams programmed to perform each of the behaviors to be recognized in groups of human players. The particular task we focus on is to recognize group movement formations in player-controlled avatars in a realistic virtual world. We choose five typical military team movement patterns for the formation recognition task and train machine learning models using procedurally generated unit trajectories as training data. The experiments were conducted using ResNet and EfficientNet, which are two popular convolutional neural network architectures for image classifications. The synthetic data is augmented by creating variations in image rotation, unit spacing, team size, and positional perturbations to bridge the gap between synthetic and human gameplay data. We demonstrate that high-accuracy behavior recognition can be achieved using deep neural networks by applying the aforementioned data augmentation methods to simulated gameplay data.},
note = {ISSN: 2325-4289},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saxon, Leslie A.; Varma, Niraj; Epstein, Laurence M.; Ganz, Leonard I.; Epstein, Andrew E.
Rates of Adoption and Outcomes After Firmware Updates for Food and Drug Administration Cybersecurity Safety Advisories Journal Article
In: Circulation: Arrhythmia and Electrophysiology, vol. 13, no. 8, pp. e008364, 2020, (Publisher: American Heart Association).
@article{saxon_rates_2020,
title = {Rates of Adoption and Outcomes After Firmware Updates for Food and Drug Administration Cybersecurity Safety Advisories},
author = {Leslie A. Saxon and Niraj Varma and Laurence M. Epstein and Leonard I. Ganz and Andrew E. Epstein},
url = {https://www.ahajournals.org/doi/full/10.1161/CIRCEP.120.008364},
doi = {10.1161/CIRCEP.120.008364},
year = {2020},
date = {2020-08-01},
urldate = {2023-03-31},
journal = {Circulation: Arrhythmia and Electrophysiology},
volume = {13},
number = {8},
pages = {e008364},
note = {Publisher: American Heart Association},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Lucas, Gale; Mozgai, Sharon; Gratch, Jonathan
The Effects of Experience on Deception in Human-Agent Negotiation Journal Article
In: Journal of Artificial Intelligence Research, vol. 68, pp. 633–660, 2020, ISSN: 1076-9757.
@article{mell_effects_2020,
title = {The Effects of Experience on Deception in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Sharon Mozgai and Jonathan Gratch},
url = {https://www.jair.org/index.php/jair/article/view/11924},
doi = {10.1613/jair.1.11924},
issn = {1076-9757},
year = {2020},
date = {2020-08-01},
urldate = {2023-03-31},
journal = {Journal of Artificial Intelligence Research},
volume = {68},
pages = {633–660},
abstract = {Negotiation is the complex social process by which multiple parties come to mutual agreement over a series of issues. As such, it has proven to be a key challenge problem for designing adequately social AIs that can effectively navigate this space. Artificial AI agents that are capable of negotiating must be capable of realizing policies and strategies that govern offer acceptances, offer generation, preference elicitation, and more. But the next generation of agents must also adapt to reflect their users’ experiences.
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The best human negotiators tend to have honed their craft through hours of practice and experience. But, not all negotiators agree on which strategic tactics to use, and endorsement of deceptive tactics in particular is a controversial topic for many negotiators. We examine the ways in which deceptive tactics are used and endorsed in non-repeated human negotiation and show that prior experience plays a key role in governing what tactics are seen as acceptable or useful in negotiation. Previous work has indicated that people that negotiate through artificial agent representatives may be more inclined to fairness than those people that negotiate directly. We present a series of three user studies that challenge this initial assumption and expand on this picture by examining the role of past experience.
This work constructs a new scale for measuring endorsement of manipulative negotiation tactics and introduces its use to artificial intelligence research. It continues by presenting the results of a series of three studies that examine how negotiating experience can change what negotiation tactics and strategies human endorse. Study #1 looks at human endorsement of deceptive techniques based on prior negotiating experience as well as representative effects. Study #2 further characterizes the negativity of prior experience in relation to endorsement of deceptive techniques. Finally, in Study #3, we show that the lessons learned from the empirical observations in Study #1 and #2 can in fact be induced—by designing agents that provide a specific type of negative experience, human endorsement of deception can be predictably manipulated.
Rakofsky, Jeffrey J.; Talbot, Thomas B.; Dunlop, Boadie W.
A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency Journal Article
In: Academic Psychiatry, 2020, ISSN: 1042-9670, 1545-7230.
@article{rakofsky_virtual_2020,
title = {A Virtual Standardized Patient–Based Assessment Tool to Evaluate Psychiatric Residents’ Psychopharmacology Proficiency},
author = {Jeffrey J. Rakofsky and Thomas B. Talbot and Boadie W. Dunlop},
url = {http://link.springer.com/10.1007/s40596-020-01286-x},
doi = {10.1007/s40596-020-01286-x},
issn = {1042-9670, 1545-7230},
year = {2020},
date = {2020-07-01},
journal = {Academic Psychiatry},
abstract = {Objectives A virtual standardized patient-based assessment simulator was developed to address biases and practical limitations in existing methods for evaluating residents’ proficiency in psychopharmacological knowledge and practice. Methods The simulator was designed to replicate an outpatient psychiatric clinic experience. The virtual patient reported symptoms of a treatment-resistant form of major depressive disorder (MDD), requiring the learner to use various antidepressants in order for the patient to fully remit. Test scores were based on the proportion of correct responses to questions asked by the virtual patient about possible side effects, dosing, and titration decisions, which depended upon the patient’s tolerability and response to the learner’s selected medications. The validation paradigm included a novice-expert performance comparison across 4th year medical students, psychiatric residents from all four post-graduate year classes, and psychiatry department faculty, and a correlational analysis of simulator performance with the PRITE Somatic Treatments subscale score. Post-test surveys evaluated the test takers’ subjective impressions of the simulator. Results Forty-three subjects completed the online exam and survey. Total mean scores on the exam differed significantly across all the learner groups in a step-wise manner from students to faculty (F = 6.10},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Li, Ruilong; Xiu, Yuliang; Saito, Shunsuke; Huang, Zeng; Olszewski, Kyle; Li, Hao
Monocular Real-Time Volumetric Performance Capture Journal Article
In: ResearchGate, pp. 30, 2020.
@article{li_monocular_2020,
title = {Monocular Real-Time Volumetric Performance Capture},
author = {Ruilong Li and Yuliang Xiu and Shunsuke Saito and Zeng Huang and Kyle Olszewski and Hao Li},
url = {https://www.researchgate.net/publication/343279742_Monocular_Real-Time_Volumetric_Performance_Capture},
year = {2020},
date = {2020-07-01},
journal = {ResearchGate},
pages = {30},
abstract = {We present the first approach to volumetric performance capture and novel-view rendering at real-time speed from monocular video, eliminating the need for expensive multi-view systems or cumbersome pre-acquisition of a personalized template model. Our system reconstructs a fully textured 3D human from each frame by leveraging Pixel-Aligned Implicit Function (PIFu). While PIFu achieves high-resolution reconstruction in a memory-efficient manner, its computationally expensive inference prevents us from deploying such a system for real-time applications. To this end, we propose a novel hierarchical surface localization algorithm and a direct rendering method without explicitly extracting surface meshes. By culling unnecessary regions for evaluation in a coarse-to-fine manner, we successfully accelerate the reconstruction by two orders of magnitude from the baseline without compromising the quality. Furthermore, we introduce an Online Hard Example Mining (OHEM) technique that effectively suppresses failure modes due to the rare occurrence of challenging examples. We adaptively update the sampling probability of the training data based on the current reconstruction accuracy, which effectively alleviates reconstruction artifacts. Our experiments and evaluations demonstrate the robustness of our system to various challenging angles, illuminations, poses, and clothing styles. We also show that our approach compares favorably with the state-of-the-art monocular performance capture. Our proposed approach removes the need for multi-view studio settings and enables a consumer-accessible solution for volumetric capture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Brixey, Jacqueline; Artstein, Ron
ChoCo: a multimodal corpus of the Choctaw language Journal Article
In: Language Resources and Evaluation, 2020, ISSN: 1574-020X, 1574-0218.
@article{brixey_choco_2020,
title = {ChoCo: a multimodal corpus of the Choctaw language},
author = {Jacqueline Brixey and Ron Artstein},
url = {http://link.springer.com/10.1007/s10579-020-09494-5},
doi = {10.1007/s10579-020-09494-5},
issn = {1574-020X, 1574-0218},
year = {2020},
date = {2020-07-01},
journal = {Language Resources and Evaluation},
abstract = {This article presents a general use corpus for Choctaw, an American indigenous language (ISO 639-2: cho, endonym: Chahta). The corpus contains audio, video, and text resources, with many texts also translated in English. The Oklahoma Choctaw and the Mississippi Choctaw variants of the language are represented in the corpus. The data set provides documentation support for this threatened language, and allows researchers and language teachers access to a diverse collection of resources.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rayatdoost, Soheil; Rudrauf, David; Soleymani, Mohammad
Expression-Guided EEG Representation Learning for Emotion Recognition Proceedings Article
In: Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3222–3226, IEEE, Barcelona, Spain, 2020, ISBN: 978-1-5090-6631-5.
@inproceedings{rayatdoost_expression-guided_2020,
title = {Expression-Guided EEG Representation Learning for Emotion Recognition},
author = {Soheil Rayatdoost and David Rudrauf and Mohammad Soleymani},
url = {https://ieeexplore.ieee.org/document/9053004/},
doi = {10.1109/ICASSP40776.2020.9053004},
isbn = {978-1-5090-6631-5},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {3222–3226},
publisher = {IEEE},
address = {Barcelona, Spain},
abstract = {Learning a joint and coordinated representation between different modalities can improve multimodal emotion recognition. In this paper, we propose a deep representation learning approach for emotion recognition from electroencephalogram (EEG) signals guided by facial electromyogram (EMG) and electrooculogram (EOG) signals. We recorded EEG, EMG and EOG signals from 60 participants who watched 40 short videos and self-reported their emotions. A cross-modal encoder that jointly learns the features extracted from facial and ocular expressions and EEG responses was designed and evaluated on our recorded data and MAHOB-HCI, a publicly available database. We demonstrate that the proposed representation is able to improve emotion recognition performance. We also show that the learned representation can be transferred to a different database without EMG and EOG and achieve superior performance. Methods that fuse behavioral and neural responses can be deployed in wearable emotion recognition solutions, practical in situations in which computer vision expression recognition is not feasible.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bonial, Claire; Donatelli, Lucia; Abrams, Mitchell; Lukin, Stephanie M; Tratz, Stephen; Marge, Matthew; Artstein, Ron; Traum, David; Voss, Clare R
Dialogue-AMR: Abstract Meaning Representation for Dialogue Proceedings Article
In: Proceedings of the 12th Language Resources and Evaluation Conference, pp. 12, European Language Resources Association, Marseille, France, 2020.
@inproceedings{bonial_dialogue-amr_2020,
title = {Dialogue-AMR: Abstract Meaning Representation for Dialogue},
author = {Claire Bonial and Lucia Donatelli and Mitchell Abrams and Stephanie M Lukin and Stephen Tratz and Matthew Marge and Ron Artstein and David Traum and Clare R Voss},
url = {https://www.aclweb.org/anthology/2020.lrec-1.86/},
year = {2020},
date = {2020-05-01},
booktitle = {Proceedings of the 12th Language Resources and Evaluation Conference},
pages = {12},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {This paper describes a schema that enriches Abstract Meaning Representation (AMR) in order to provide a semantic representation for facilitating Natural Language Understanding (NLU) in dialogue systems. AMR offers a valuable level of abstraction of the propositional content of an utterance; however, it does not capture the illocutionary force or speaker’s intended contribution in the broader dialogue context (e.g., make a request or ask a question), nor does it capture tense or aspect. We explore dialogue in the domain of human-robot interaction, where a conversational robot is engaged in search and navigation tasks with a human partner. To address the limitations of standard AMR, we develop an inventory of speech acts suitable for our domain, and present “Dialogue-AMR”, an enhanced AMR that represents not only the content of an utterance, but the illocutionary force behind it, as well as tense and aspect. To showcase the coverage of the schema, we use both manual and automatic methods to construct the “DialAMR” corpus—a corpus of human-robot dialogue annotated with standard AMR and our enriched Dialogue-AMR schema. Our automated methods can be used to incorporate AMR into a larger NLU pipeline supporting human-robot dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Yanov, Volodymyr; Traum, David
Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers Proceedings Article
In: Proceedings of the Twelfth Language Resources and Evaluation Conference, pp. 726–734, European Language Resources Association, Marseille, France, 2020, ISBN: 979-10-95546-34-4.
@inproceedings{georgila_predicting_2020,
title = {Predicting Ratings of Real Dialogue Participants from Artificial Data and Ratings of Human Dialogue Observers},
author = {Kallirroi Georgila and Carla Gordon and Volodymyr Yanov and David Traum},
url = {https://aclanthology.org/2020.lrec-1.91},
isbn = {979-10-95546-34-4},
year = {2020},
date = {2020-05-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the Twelfth Language Resources and Evaluation Conference},
pages = {726–734},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We collected a corpus of dialogues in a Wizard of Oz (WOz) setting in the Internet of Things (IoT) domain. We asked users participating in these dialogues to rate the system on a number of aspects, namely, intelligence, naturalness, personality, friendliness, their enjoyment, overall quality, and whether they would recommend the system to others. Then we asked dialogue observers, i.e., Amazon Mechanical Turkers (MTurkers), to rate these dialogues on the same aspects. We also generated simulated dialogues between dialogue policies and simulated users and asked MTurkers to rate them again on the same aspects. Using linear regression, we developed dialogue evaluation functions based on features from the simulated dialogues and the MTurkers' ratings, the WOz dialogues and the MTurkers' ratings, and the WOz dialogues and the WOz participants' ratings. We applied all these dialogue evaluation functions to a held-out portion of our WOz dialogues, and we report results on the predictive power of these different types of dialogue evaluation functions. Our results suggest that for three conversational aspects (intelligence, naturalness, overall quality) just training evaluation functions on simulated data could be sufficient.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2016
Manuvinakurike, Ramesh; Paetzel, Maike; Qu, Cheng; Schlangen, David; DeVault, David
Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 252–262, Association for Computational Linguistics, Los Angeles, CA, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_toward_2016,
title = {Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems},
author = {Ramesh Manuvinakurike and Maike Paetzel and Cheng Qu and David Schlangen and David DeVault},
url = {http://www.aclweb.org/anthology/W16-3632},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {252–262},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {In this paper, we present and evaluate an approach to incremental dialogue act (DA) segmentation and classification. Our approach utilizes prosodic, lexico-syntactic and contextual features, and achieves an encouraging level of performance in offline corpus-based evaluation as well as in simulated human-agent dialogues. Our approach uses a pipeline of sequential processing steps, and we investigate the contribution of different processing steps to DA segmentation errors. We present our results using both existing and new metrics for DA segmentation. The incremental DA segmentation capability described here may help future systems to allow more natural speech from users and enable more natural patterns of interaction.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ravi, Satheesh; Artstein, Ron
Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 111–116, Association for Computational Linguistics, Los Angeles, CA, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ravi_language_2016,
title = {Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil},
author = {Satheesh Ravi and Ron Artstein},
url = {http://www.aclweb.org/anthology/W16-3614},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {111–116},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {A training and test set for a dialogue system in the form of linked questions and responses is translated from English into Tamil. Accuracy of identifying an appropriate response in Tamil is 79%, compared to the English accuracy of 89%, suggesting that translation can be useful to start up a dialogue system. Machine translation of Tamil inputs into English also results in 79% accuracy. However, machine translation of the English training data into Tamil results in a drop in accuracy to 54% when tested on manually authored Tamil, indicating that there is still a large gap before machine translated dialogue systems can interact with human users.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Freed, Michael C.; Novak, Laura A.; Killgore, William D. S.; Rauch, Sheila A. M.; Koehlmoos, Tracey P.; Ginsberg, J. P.; Krupnick, Janice L.; Rizzo, Albert "Skip"; Andrews, Anne; Engel, Charles C.
IRB and Research Regulatory Delays Within the Military Health System: Do They Really Matter? And If So, Why and for Whom? Journal Article
In: The American Journal of Bioethics, vol. 16, no. 8, pp. 30–37, 2016, ISSN: 1526-5161, 1536-0075.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{freed_irb_2016,
title = {IRB and Research Regulatory Delays Within the Military Health System: Do They Really Matter? And If So, Why and for Whom?},
author = {Michael C. Freed and Laura A. Novak and William D. S. Killgore and Sheila A. M. Rauch and Tracey P. Koehlmoos and J. P. Ginsberg and Janice L. Krupnick and Albert "Skip" Rizzo and Anne Andrews and Charles C. Engel},
url = {http://www.tandfonline.com/doi/full/10.1080/15265161.2016.1187212},
doi = {10.1080/15265161.2016.1187212},
issn = {1526-5161, 1536-0075},
year = {2016},
date = {2016-08-01},
journal = {The American Journal of Bioethics},
volume = {16},
number = {8},
pages = {30–37},
abstract = {Institutional review board (IRB) delays may hinder the successful completion of federally funded research in the U.S. military. When this happens, time-sensitive, mission-relevant questions go unanswered. Research participants face unnecessary burdens and risks if delays squeeze recruitment timelines, resulting in inadequate sample sizes for definitive analyses. More broadly, military members are exposed to untested or undertested interventions, implemented by well-intentioned leaders who bypass the research process altogether. To illustrate, we offer two case examples. We posit that IRB delays often appear in the service of managing institutional risk, rather than protecting research participants. Regulators may see more risk associated with moving quickly than risk related to delay, choosing to err on the side of bureaucracy. The authors of this article, all of whom are military-funded researchers, government stakeholders, and/or human subject protection experts, offer feasible recommendations to improve the IRB system and, ultimately, research within military, veteran, and civilian populations.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Washburn, Micki; Bordnick, Patrick; Rizzo, Albert “Skip”
A pilot feasibility study of virtual patient simulation to enhance social work students’ brief mental health assessment skills Journal Article
In: Social Work in Health Care, pp. 1–19, 2016, ISSN: 0098-1389, 1541-034X.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{washburn_pilot_2016,
title = {A pilot feasibility study of virtual patient simulation to enhance social work students’ brief mental health assessment skills},
author = {Micki Washburn and Patrick Bordnick and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/00981389.2016.1210715},
doi = {10.1080/00981389.2016.1210715},
issn = {0098-1389, 1541-034X},
year = {2016},
date = {2016-08-01},
journal = {Social Work in Health Care},
pages = {1–19},
abstract = {This study presents preliminary feasibility and acceptability data on the use of virtual patient (VP) simulations to develop brief assessment skills within an interdisciplinary care setting. Results support the acceptability of technology-enhanced simulations and offer preliminary evidence for an association between engagement in VP practice simulations and improvements in diagnostic accuracy and clinical interviewing skills. Recommendations and next steps for research on technologyenhanced simulations within social work are discussed.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Chen, Chih-Fan; Bolas, Mark; Suma, Evan
Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, pp. 1–2, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4371-8.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{chen_real-time_2016,
title = {Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2945162},
doi = {10.1145/2945078.2945162},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
pages = {1–2},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {With the recent proliferation of high-fidelity head-mounted displays (HMDs), there is increasing demand for realistic 3D content that can be integrated into virtual reality environments. However, creating photorealistic models is not only difficult but also time consuming. A simpler alternative involves scanning objects in the real world and rendering their digitized counterpart in the virtual world. Capturing objects can be achieved by performing a 3D scan using widely available consumer-grade RGB-D cameras. This process involves reconstructing the geometric model from depth images generated using a structured light or time-of-flight sensor. The colormap is determined by fusing data from multiple color images captured during the scan. Existing methods compute the color of each vertex by averaging the colors from all these images. Blending colors in this manner creates low-fidelity models that appear blurry. (Figure 1 right). Furthermore, this approach also yields textures with fixed lighting that is baked on the model. This limitation becomes more apparent when viewed in head-tracked virtual reality, as the illumination (e.g. specular reflections) does not change appropriately based on the user's viewpoint},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113–120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {book}
}
Nye, Benjamin D.; Boyce, Michael W.; Sottilare, Robert
Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 19–37, US Army Research Laboratory, Orlando, FL, 2016, ISBN: 978-0-9893923-9-6.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{nye_defining_2016,
title = {Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy},
author = {Benjamin D. Nye and Michael W. Boyce and Robert Sottilare},
url = {https://gifttutoring.org/attachments/download/1736/Design%20Recommendations%20for%20ITS_Volume%204%20-%20Domain%20Modeling%20Book_web%20version_final.pdf},
isbn = {978-0-9893923-9-6},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {19–37},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Attempts to define ill-defined domains in intelligent tutoring system (ITS) research has been approached a number of times (Fournier-Viger, Nkambou, & Nguifo, 2010; Lynch, Ashley, Pinkwart, & Aleven, 2009; Mitrovic & Weerasinghe, 2009; Jacovina, Snow, Dai, & McNamara, 2015; Woods, Stensrud, Wray, Haley, & Jones, 2015). Related research has tried to determine levels of ill-definedness for a domain (Le, Loll, & Pinkwart, 2013). Despite such attempts, the field has not yet converged on common guidelines to distinguish between well-defined versus ill-defined domains. We argue that such guidelines struggle to converge because a domain is too large to meaningfully categorize: every domain contains a mixture of well-defined and ill-defined tasks. While the co-existence of well-defined and ill-defined tasks in a single domain is nearly universally-agreed upon by researchers; this key point is often quickly buried by an extensive discussion about what makes certain domain tasks ill-defined (e.g., disagreement about ideal solutions, multiple solution paths). In this chapter, we first take a step back to consider what is meant by a domain in the context of learning. Next, based on this definition for a domain, we map out the components that are in a learning domain, since each component may have ill-defined parts. This leads into a discussion about the strategies that have been used to make ill-defined domains tractable for certain types of pedagogy. Examples of ITS research that applies these strategies are noted. Finally, we conclude with practical how-to considerations and open research questions for approaching ill-defined domains. This chapter should be considered a companion piece to our chapter in the prior volume of this series (Nye, Goldberg, & Hu, 2015). This chapter focuses on how to understand and transform ill-defined parts of domains, while the prior chapter discusses commonly-used learning tasks and authoring approaches for both well-defined and ill-defined tasks. As such, this chapter is intended to help the learner understand if and how different parts of the domain are ill-defined (and what to do about them). The companion piece in the authoring tools volume discusses different categories of well and ill-defined tasks, from the standpoint of attempting to author and maintain an ITS.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Hu, Xiangen
Conceptualizing and Representing Domains to Guide Tutoring Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 15–18, US Army Research Laboratory, Orlando, FL, 2016.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@incollection{nye_conceptualizing_2016,
title = {Conceptualizing and Representing Domains to Guide Tutoring},
author = {Benjamin D. Nye and Xiangen Hu},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA15&dq=%22data.+This+chapter+presents+an+excellent+overview+of+current+research+on+Q-matrices%22+%22edge+work+on+ensemble+methods+that+achieve+state+of+the+art+performance+by+combining%22+&ots=6MJhm1XHVV&sig=i14eJyin69Cy-jms2lWIFF4K3CU},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {15–18},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Any discussion about how intelligent tutoring system (ITS) domains must begin with considering how ITS conceptualize and represent domains. This process requires building formal, mathematically-specifiable operationalization of the often implicit knowledge about learning domains and their pedagogy. Across different domains and pedagogical approaches, a wide variety of methods have been taken: a scope that would be better-covered by an encyclopedia rather than a single book. Since this section could not possibly cover every possible approach to domain modeling, the chapters within this section were instead chosen to cover a representative range of fundamentally-different approaches to domain modeling.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks Proceedings Article
In: International Conference on Artificial General Intelligence, pp. 84–94, Springer, New York, NY, 2016, ISBN: 978-3-319-41649-6.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@inproceedings{rosenbloom_rethinking_2016,
title = {Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://link.springer.com/chapter/10.1007/978-3-319-41649-6_9},
doi = {10.1007/978-3-319-41649-6_9},
isbn = {978-3-319-41649-6},
year = {2016},
date = {2016-07-01},
booktitle = {International Conference on Artificial General Intelligence},
volume = {9782},
pages = {84–94},
publisher = {Springer},
address = {New York, NY},
abstract = {The status of Sigma’s grounding in graphical models is challenged by the ways in which their semantics has been violated while incorporating rule-based reasoning into them. This has led to a rethinking of what goes on in its graphical architecture, with results that include a straightforward extension to feedforward neural networks (although not yet with learning).},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jalal-Kamali, Ali; Pynadath, David V.
Toward a Bayesian Network Model of Events in International Relations Proceedings Article
In: Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation, Springer, Washington D.C., 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{jalal-kamali_toward_2016,
title = {Toward a Bayesian Network Model of Events in International Relations},
author = {Ali Jalal-Kamali and David V. Pynadath},
url = {https://books.google.com/books?id=_HGADAAAQBAJ&pg=PA321&lpg=PA321&dq=Toward+a+Bayesian+network+model+of+events+in+international+relations&source=bl&ots=JBOYm4KCF2&sig=eqmzgrWXwDroEtoLyxZxSjxDIAs&hl=en&sa=X&ved=0ahUKEwiIgoSS8o_PAhUUzGMKHWnaDlEQ6AEILjAC#v=onepage&q=Toward%20a%20Bayesian%20network%20model%20of%20events%20in%20international%20relations&f=false},
year = {2016},
date = {2016-07-01},
booktitle = {Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation},
publisher = {Springer},
address = {Washington D.C.},
abstract = {Formal models of international relations have a long history of exploiting representations and algorithms from artificial intelligence. As more news sources move online, there is an increasing wealth of data that can inform the creation of such models. The Global Database of Events, Language, and Tone (GDELT) extracts events from news articles from around the world, where the events represent actions taken by geopolitical actors, reflecting the actors’ relationships. We can apply existing machine-learning algorithms to automatically construct a Bayesian network that represents the distribution over the actions between actors. Such a network model allows us to analyze the interdependencies among events and generate the relative likelihoods of different events. By examining the accuracy of the learned network over different years and different actor pairs, we are able to identify aspects of international relations from a data-driven approach.We are also able to identify weaknesses in the model that suggest needs for additional domain knowledge.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification Journal Article
In: Journal of Artificial General Intelligence, 2016, ISSN: 1946-0163.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@article{rosenbloom_sigma_2016,
title = {The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://www.degruyter.com/view/j/jagi.ahead-of-print/jagi-2016-0001/jagi-2016-0001.xml},
doi = {10.1515/jagi-2016-0001},
issn = {1946-0163},
year = {2016},
date = {2016-07-01},
journal = {Journal of Artificial General Intelligence},
abstract = {Sigma (Σ) is a cognitive architecture and system whose development is driven by a combination of four desiderata: grand unification, generic cognition, functional elegance, and sufficient efficiency. Work towards these desiderata is guided by the graphical architecture hypothesis, that key to progress on them is combining what has been learned from over three decades’ worth of separate work on cognitive architectures and graphical models. In this article, these four desiderata are motivated and explained, and then combined with the graphical architecture hypothesis to yield a rationale for the development of Sigma. The current state of the cognitive architecture is then introduced in detail, along with the graphical architecture that sits below it and implements it. Progress in extending Sigma beyond these architectures and towards a full cognitive system is then detailed in terms of both a systematic set of higher level cognitive idioms that have been developed and several virtual humans that are built from combinations of these idioms. Sigma as a whole is then analyzed in terms of how well the progress to date satisfies the desiderata. This article thus provides the first full motivation, presentation and analysis of Sigma, along with a diversity of more specific results that have been generated during its development.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Book Section
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107–118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.
ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 26, no. 2, pp. 756–770, 2016, ISSN: 1560-4292, 1560-4306.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@article{nye_its_2016,
title = {ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem},
author = {Benjamin D. Nye},
url = {http://link.springer.com/10.1007/s40593-016-0098-8},
doi = {10.1007/s40593-016-0098-8},
issn = {1560-4292, 1560-4306},
year = {2016},
date = {2016-06-01},
journal = {International Journal of Artificial Intelligence in Education},
volume = {26},
number = {2},
pages = {756–770},
abstract = {Advanced learning technologies are reaching a new phase of their evolution where they are finally entering mainstream educational contexts, with persistent user bases. However, as AIED scales, it will need to follow recent trends in service-oriented and ubiquitous computing: breaking AIED platforms into distinct services that can be composed for different platforms (web, mobile, etc.) and distributed across multiple systems. This will represent a move from learning platforms to an ecosystem of interacting learning tools. Such tools will enable new opportunities for both user-adaptation and experimentation. Traditional macro-adaptation (problem selection) and step-based adaptation (hints and feedback) will be extended by meta-adaptation (adaptive system selection) and micro-adaptation (event-level optimization). The existence of persistent and widely-used systems will also support new paradigms for experimentation in education, allowing researchers to understand interactions and boundary conditions for learning principles. New central research questions for the field will also need to be answered due to these changes in the AIED landscape.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosoff, Heather; John, Richard S.
Semi-Automated Construction of Decision-Theoretic Models of Human Behavior Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 891–899, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{pynadath_semi-automated_2016,
title = {Semi-Automated Construction of Decision-Theoretic Models of Human Behavior},
author = {David V. Pynadath and Heather Rosoff and Richard S. John},
url = {http://dl.acm.org/citation.cfm?id=2937055},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {891–899},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Multiagent social simulation provides a powerful mechanism for policy makers to understand the potential outcomes of their decisions before implementing them. However, the value of such simulations depends on the accuracy of their underlying agent models. In this work, we present a method for automatically exploring a space of decision-theoretic models to arrive at a multiagent social simulation that is consistent with human behavior data. We start with a factored Partially Observable Markov Decision Process (POMDP) whose states, actions, and reward capture the questions asked in a survey from a disaster response scenario. Using input from domain experts, we construct a set of hypothesized dependencies that may or may not exist in the transition probability function. We present an algorithm to search through each of these hypotheses, evaluate their accuracy with respect to the data, and choose the models that best re ect the observed behavior, including individual di⬚erences. The result is a mechanism for constructing agent models that are grounded in human behavior data, while still being able to support hypothetical reasoning that is the main advantage of multiagent social simulation.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC, Virtual Humans
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {Learning Sciences, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
TALBOT, Thomas B.; KALISCH, Nicolai; CHRISTOFFERSEN, Kelly; LUCAS, Gale; FORBELL, Eric
Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 407–413, 2016.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{talbot_natural_2016,
title = {Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters.},
author = {Thomas B. TALBOT and Nicolai KALISCH and Kelly CHRISTOFFERSEN and Gale LUCAS and Eric FORBELL},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA407&dq=%22through+regular+web+browsers+and+is+capable+of+multiple+types+of%22+%22practice+targeting+diagnostic+interviews.+A+natural+language+interview%22+%22narrative+statement+based+upon+dialog+context.+The+dialog+manager%27s%22+&ots=Ej8L8hxLlb&sig=GMnqEb5n7CB9x1lWE4gfe5_4n8o},
doi = {10.3233/978-1-61499-625-5-407},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {407–413},
abstract = {A virtual standardized patient (VSP) prototype was tested for natural language understanding (NLU) performance. The conversational VSP was evaluated in a controlled 61 subject study over four repetitions of a patient case. The prototype achieved more than 92% appropriate response rate from naïve users on their first attempt and results were stable by their fourth case repetition. This level of performance exceeds prior efforts and is at a level comparable of accuracy as seen in human conversational patient training, with caveats. This level of performance was possible due to the use of a unified medical taxonomy underpinning that allows virtual patient language training to be applied to all cases in our system as opposed to benefiting a single patient case.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments Proceedings Article
In: 2nd Workshop on Everyday Virtual Reality, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_redirected_2016,
title = {The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://www.adalsimeone.me/papers/WEVR2016/WEVR2016_Azmandian.pdf},
year = {2016},
date = {2016-03-01},
booktitle = {2nd Workshop on Everyday Virtual Reality},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Pynadath, David V.
Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events Proceedings Article
In: Proceedings of The 29th International FLAIRS Conference, pp. 44–49, AAAI Press, Key Largo, FL, 2016.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{georgila_towards_2016,
title = {Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events},
author = {Kallirroi Georgila and David V. Pynadath},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12960/12539},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of The 29th International FLAIRS Conference},
pages = {44–49},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Accurate multiagent social simulation requires a computational model of how people incorporate their observations of real-world events into their beliefs about the state of their world. Current methods for creating such agent-based models typically rely on manual input that can be both burdensome and subjective. In this investigation, we instead pursue automated methods that can translate available data into the desired computational models. For this purpose, we use a corpus of real-world events in combination with longitudinal public opinion polls on a variety of opinion issues. We perform two experiments using automated methods taken from the literature. In our first experiment, we train maximum entropy classifiers to model changes in opinion scores as a function of real-world events. We measure and analyze the accuracy of our learned classifiers by comparing the opinion scores they generate against the opinion scores occurring in a held-out subset of our corpus. In our second experiment, we learn Bayesian networks to capture the same function.We then compare the dependency structures induced by the two methods to identify the event features that have the most significant effect on changes in public opinion.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Automated Path Prediction for Redirected Walking Using Navigation Meshes Proceedings Article
In: 2016 IEEE Symposium on 3D User Interfaces (3DUI), pp. 63–66, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_automated_2016,
title = {Automated Path Prediction for Redirected Walking Using Navigation Meshes},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7460032},
doi = {10.1109/3DUI.2016.7460032},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {63–66},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Silver, Kenneth
Ethics for a Combined Human-Machine Dialogue Agent Proceedings Article
In: Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium, pp. 184–189, AAAI Press, Stanford, California, 2016.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{artstein_ethics_2016,
title = {Ethics for a Combined Human-Machine Dialogue Agent},
author = {Ron Artstein and Kenneth Silver},
url = {http://www.aaai.org/ocs/index.php/SSS/SSS16/paper/viewFile/12706/11948},
year = {2016},
date = {2016-03-01},
booktitle = {Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium},
pages = {184–189},
publisher = {AAAI Press},
address = {Stanford, California},
abstract = {We discuss philosophical and ethical issues that arise from a dialogue system intended to portray a real person, using recordings of the person together with a machine agent that selects recordings during a synchronous conversation with a user. System output may count as actions of the speaker if the speaker intends to communicate with users and the outputs represent what the speaker would have chosen to say in context; in such cases the system can justifiably be said to be holding a conversation that is offset in time. The autonomous agent may at times misrepresent the speaker’s intentions, and such failures are analogous to good-faith misunderstandings. The user may or may not need to be informed that the speaker is not organically present, depending on the application.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Suma, Evan; Nordahl, Rolf; Bolas, Mark; Serafin, Stefania
Estimation of Detection Thresholds for Audiovisual Rotation Gains Proceedings Article
In: IEEE Virtual Reality 2016, pp. ID: A22, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{nilsson_estimation_2016,
title = {Estimation of Detection Thresholds for Audiovisual Rotation Gains},
author = {Niels Nilsson and Evan Suma and Rolf Nordahl and Mark Bolas and Stefania Serafin},
url = {http://ieeevr.org/2016/posters/},
year = {2016},
date = {2016-03-01},
booktitle = {IEEE Virtual Reality 2016},
pages = {ID: A22},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of visual stimuli. We describe a within-subjects study (n=31) exploring if participants’ ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Head Mounted Projection for Enhanced Gaze in Social Interactions Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 209–210, IEEE, Greenville, SC, 2016.
Abstract | Links | BibTeX | Tags: MedVR, MxR, UARC
@inproceedings{krum_head_2016,
title = {Head Mounted Projection for Enhanced Gaze in Social Interactions},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504727},
doi = {10.1109/VR.2016.7504727},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {209–210},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {MedVR, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
Abstract | Links | BibTeX | Tags: ARL, DoD, Social Simulation, UARC
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {ARL, DoD, Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan
Multimodal Behavior Analytics for Interactive Technologies Journal Article
In: KI - Künstliche Intelligenz, vol. 30, no. 1, pp. 91–92, 2016, ISSN: 0933-1875, 1610-1987.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{scherer_multimodal_2016,
title = {Multimodal Behavior Analytics for Interactive Technologies},
author = {Stefan Scherer},
url = {http://download.springer.com/static/pdf/790/art%253A10.1007%252Fs13218-015-0401-0.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Farticle%2F10.1007%2Fs13218-015-0401-0&token2=exp=1474903610 acl=%2Fstatic%2Fpdf%2F790%2Fart%25253A10.1007%25252Fs13218-015-0401-0.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Farticle%252F10.1007%252Fs13218-015-0401-0* hmac=8e31601212e82ac3ea1341f6bbddc376f14d6833e9b1df0adff03a332bb17122},
doi = {10.1007/s13218-015-0401-0},
issn = {0933-1875, 1610-1987},
year = {2016},
date = {2016-02-01},
journal = {KI - Künstliche Intelligenz},
volume = {30},
number = {1},
pages = {91–92},
abstract = {Human communication is multifaceted and information between humans is communicated on many channels in parallel. In order for a machine to become an efficient and accepted social companion, it is important that the machine understands interactive cues that not only represent direct communicative information such as spoken words but also nonverbal behavior. Hence, technologies to understand and put nonverbal communication into the context of the present interaction are essential for the advancement of human-machine interfaces [3, 4]. Multimodal behavior analytics—a transdisciplinary field of research—aims to close this gap and enables machines to automatically identify, characterize, model, and synthesize individuals’ multimodal nonverbal behavior within both human-machine as well as machine-mediated humanhuman interaction. The emerging technology of this field is relevant for a wide range of interaction applications, including but not limited to the areas of healthcare and education. Exemplarily, the characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or post-traumatic stress, holds transformative potential and could change treatment and the healthcare systems efficiency significantly [6]. Within the educational context the assessment of proficiency and expertise of individuals’ social skills, in particular for those with learning disabilities or social anxiety, can help create individualized education scenarios [2, 8]. The potential of machine-assisted training for individuals with autism spectrum disorders (ASD) for example could have far reaching impacts on our society. In the following, I highlight two behavior analytics approaches that were investigated in my PhD dissertation [3] and summarized in a multimodal framework for human behavior analysis [4].},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Roemmele, Melissa
Writing Stories with Help from Recurrent Neural Networks Proceedings Article
In: AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence, pp. 4311 – 4312, AAAI Press, Phoenix, AZ, 2016.
Abstract | Links | BibTeX | Tags: Narrative, UARC
@inproceedings{roemmele_writing_2016,
title = {Writing Stories with Help from Recurrent Neural Networks},
author = {Melissa Roemmele},
url = {http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11966},
year = {2016},
date = {2016-02-01},
booktitle = {AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence},
pages = {4311 – 4312},
publisher = {AAAI Press},
address = {Phoenix, AZ},
abstract = {This thesis explores the use of a recurrent neural network model for a novel story generation task. In this task, the model analyzes an ongoing story and generates a sentence that continues the story.},
keywords = {Narrative, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Lucas, Gale M.; Gratch, Jonathan; Rizzo, Albert Skip; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing, vol. 7, no. 1, pp. 59–73, 2016, ISSN: 1949-3045.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{scherer_self-reported_2016,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale M. Lucas and Jonathan Gratch and Albert Skip Rizzo and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/document/7117386/?arnumber=7117386},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {7},
number = {1},
pages = {59–73},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals’ with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g.sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Graesser, Arthur C; Hu, Xiangen; Nye, Benjamin D.; Sottilare, Robert A.
Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT) Book Section
In: Using Games and Simulations for Teaching and Assessment, pp. 58–79, Routledge, New York, NY, 2016, ISBN: 978-0-415-73787-6.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{graesser_intelligent_2016,
title = {Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT)},
author = {Arthur C Graesser and Xiangen Hu and Benjamin D. Nye and Robert A. Sottilare},
url = {https://www.researchgate.net/publication/304013322_Intelligent_Tutoring_Systems_Serious_Games_and_the_Generalized_Intelligent_Framework_for_Tutoring_GIFT},
isbn = {978-0-415-73787-6},
year = {2016},
date = {2016-01-01},
booktitle = {Using Games and Simulations for Teaching and Assessment},
pages = {58–79},
publisher = {Routledge},
address = {New York, NY},
abstract = {This chapter explores the prospects of integrating games with intelligent tutoring systems (ITSs). The hope is that there can be learning environments that optimize both motivation through games and deep learning through ITS technologies. Deep learning refers to the acquisition of knowledge, skills, strategies, and reasoning processes at the higher levels of Bloom’s (1956) taxonomy or the Knowledge-Learning-Instruction (KLI) framework (Koedinger, Corbett, & Perfetti, 2012), such as the application of knowledge to new cases, knowledge analysis and synthesis, problem solving, critical thinking, and other difficult cognitive processes. In contrast, shallow learning involves perceptual learning, memorization of explicit material, and mastery of simple rigid procedures. Shallow knowledge may be adequate for near transfer tests of knowledge/skills but not far transfer tests to new situations that have some modicum of complexity.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction Journal Article
In: IEEE Transactions on Affective Computing, vol. PP, no. 99, 2016, ISSN: 1949-3045.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{venek_adolescent_2016,
title = {Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7384418},
doi = {10.1109/TAFFC.2016.2518665},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {PP},
number = {99},
abstract = {Youth suicide is a major public health problem. It is the third leading cause of death in the United States for ages 13 through 18. Many adolescents that face suicidal thoughts or make a suicide plan never seek professional care or help. Within this work, we evaluate both verbal and nonverbal responses to a five-item ubiquitous questionnaire to identify and assess suicidal risk of adolescents. We utilize a machine learning approach to identify suicidal from non-suicidal speech as well as characterize adolescents that repeatedly attempted suicide in the past. Our findings investigate both verbal and nonverbal behavior information of the face-to-face clinician-patient interaction. We investigate 60 audio-recorded dyadic clinician-patient interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents. The interaction between clinician and adolescents is statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters’ behaviors in comparison to suicidal non-repeaters. By using a hierarchical classifier we were able to show that the verbal responses to the ubiquitous questions sections of the interviews were useful to discriminate suicidal and non-suicidal patients. However, to additionally classify suicidal repeaters and suicidal non-repeaters more information especially nonverbal information is required.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2015
Khooshabeh, Peter; Scherer, Stefan; Oiumette, Brett; Ryan, William S.; Lance, Brent J.; Gratch, Jonathan
Computational-based behavior analysis and peripheral psychophysiology Journal Article
In: Advances in Computational Psychophysiology, pp. 34–36, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@article{khooshabeh_computational-based_2015,
title = {Computational-based behavior analysis and peripheral psychophysiology},
author = {Peter Khooshabeh and Stefan Scherer and Brett Oiumette and William S. Ryan and Brent J. Lance and Jonathan Gratch},
url = {http://www.sciencemag.org/sites/default/files/custom-publishing/documents/CP_Supplement_Final_100215.pdf},
year = {2015},
date = {2015-10-01},
journal = {Advances in Computational Psychophysiology},
pages = {34–36},
abstract = {Computational-based behavior analysis aims to automatically identify, characterize, model, and synthesize multimodal nonverbal behavior within both human–machine as well as machine-mediated human–human interaction. It uses state-of-the-art machine learning algorithms to track human nonverbal and verbal information, such as facial expressions, gestures, and posture, as well as what and how a person speaks. The emerging technology from this field of research is relevant for a wide range of interactive and social applications, including health care and education. The characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or posttraumatic stress, could have significant benefits for treatments and the overall efficiency of the health care system.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli; Nichols, Jeffrey
GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_goaalll_2015,
title = {GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion},
author = {Jonathan Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler and Jeffrey Nichols},
url = {http://ict.usc.edu/pubs/GOAALLL!%20Using%20Sentiment%20in%20the%20World%20Cup%20to%20Explore%20Theories%20of%20Emotion.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Sporting events evoke strong emotions amongst fans and thus act as natural laboratories to explore emotions and how they unfold in the wild. Computational tools, such as sentiment analysis, provide new ways to examine such dynamic emotional processes. In this article we use sentiment analysis to examine tweets posted during 2014 World Cup. Such analysis gives insight into how people respond to highly emotional events, and how these emotions are shaped by contextual factors, such as prior expectations, and how these emotions change as events unfold overtime. Here we report on some preliminary analysis of a World Cup twitter corpus using sentiment analysis techniques. We show these tools can give new insights into existing theories of what makes a sporting match exciting. This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion. We also discuss some challenges that such data present for existing sentiment analysis techniques and discuss future analysis.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}