Publications
Search
Katz, Andrea C.; Norr, Aaron M.; Buck, Benjamin; Fantelli, Emily; Edwards-Stewart, Amanda; Koenen-Woods, Patricia; Zetocha, Kimberlee; Smolenski, Derek J.; Holloway, Kevin; Rothbaum, Barbara O.; Difede, JoAnn; Rizzo, Albert; Skopp, Nancy; Mishkind, Matt; Gahm, Gregory; Reger, Greg M.; Andrasik, Frank
In: Psychological Trauma: Theory, Research, Practice, and Policy, 2020, ISSN: 1942-969X, 1942-9681.
@article{katz_changes_2020,
title = {Changes in physiological reactivity in response to the trauma memory during prolonged exposure and virtual reality exposure therapy for posttraumatic stress disorder.},
author = {Andrea C. Katz and Aaron M. Norr and Benjamin Buck and Emily Fantelli and Amanda Edwards-Stewart and Patricia Koenen-Woods and Kimberlee Zetocha and Derek J. Smolenski and Kevin Holloway and Barbara O. Rothbaum and JoAnn Difede and Albert Rizzo and Nancy Skopp and Matt Mishkind and Gregory Gahm and Greg M. Reger and Frank Andrasik},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/tra0000567},
doi = {10.1037/tra0000567},
issn = {1942-969X, 1942-9681},
year = {2020},
date = {2020-04-01},
journal = {Psychological Trauma: Theory, Research, Practice, and Policy},
abstract = {This study is among the first to examine how physiological processes change throughout PTSD treatment and the first to compare standard exposure therapy to therapy augmented with virtual reality (VR) in active-duty soldiers with PTSD. Results showed that soldiers in VR therapy had smaller physical reactions to trauma memories compared to those who did not receive treatment, whereas those who got standard treatment did not. These findings provide insight into possible mechanisms of PTSD treatment, point to potential objective indicators of early treatment response in active-duty soldiers, and suggest that VR treatment might lead to earlier symptom reduction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chaffey, Patricia; Artstein, Ron; Georgila, Kallirroi; Pollard, Kimberly A.; Gilani, Setareh Nasihati; Krum, David M.; Nelson, David; Huynh, Kevin; Gainer, Alesia; Alavi, Seyed Hossein; Yahata, Rhys; Leuski, Anton; Yanov, Volodymyr; Traum, David
Human swarm interaction using plays, audibles, and a virtual spokesperson Proceedings Article
In: Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II, pp. 40, SPIE, Online Only, United States, 2020, ISBN: 978-1-5106-3603-3 978-1-5106-3604-0.
@inproceedings{chaffey_human_2020,
title = {Human swarm interaction using plays, audibles, and a virtual spokesperson},
author = {Patricia Chaffey and Ron Artstein and Kallirroi Georgila and Kimberly A. Pollard and Setareh Nasihati Gilani and David M. Krum and David Nelson and Kevin Huynh and Alesia Gainer and Seyed Hossein Alavi and Rhys Yahata and Anton Leuski and Volodymyr Yanov and David Traum},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11413/2557573/Human-swarm-interaction-using-plays-audibles-and-a-virtual-spokesperson/10.1117/12.2557573.full},
doi = {10.1117/12.2557573},
isbn = {978-1-5106-3603-3 978-1-5106-3604-0},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications II},
pages = {40},
publisher = {SPIE},
address = {Online Only, United States},
abstract = {This study explores two hypotheses about human-agent teaming: 1. Real-time coordination among a large set of autonomous robots can be achieved using predefined “plays” which define how to execute a task, and “audibles” which modify the play on the fly; 2. A spokesperson agent can serve as a representative for a group of robots, relaying information between the robots and human teammates. These hypotheses are tested in a simulated game environment: a human participant leads a search-and-rescue operation to evacuate a town threatened by an approaching wildfire, with the object of saving as many lives as possible. The participant communicates verbally with a virtual agent controlling a team of ten aerial robots and one ground vehicle, while observing a live map display with real-time location of the fire and identified survivors. Since full automation is not currently possible, two human controllers control the agent’s speech and actions, and input parameters to the robots, which then operate autonomously until the parameters are changed. Designated plays include monitoring the spread of fire, searching for survivors, broadcasting warnings, guiding residents to safety, and sending the rescue vehicle. A successful evacuation of all the residents requires personal intervention in some cases (e.g., stubborn residents) while delegating other responsibilities to the spokesperson agent and robots, all in a rapidly changing scene. The study records the participants’ verbal and nonverbal behavior in order to identify strategies people use when communicating with robotic swarms, and to collect data for eventual automation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Li, Ruilong; Bladin, Karl; Zhao, Yajie; Chinara, Chinmay; Ingraham, Owen; Xiang, Pengda; Ren, Xinglei; Prasad, Pratusha; Kishore, Bipin; Xing, Jun; Li, Hao
Learning Formation of Physically-Based Face Attributes Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
@inproceedings{li_learning_2020,
title = {Learning Formation of Physically-Based Face Attributes},
author = {Ruilong Li and Karl Bladin and Yajie Zhao and Chinmay Chinara and Owen Ingraham and Pengda Xiang and Xinglei Ren and Pratusha Prasad and Bipin Kishore and Jun Xing and Hao Li},
url = {https://www.computer.org/csdl/proceedings-article/cvpr/2020/716800d407/1m3oiaP9ouQ},
doi = {10.1109/CVPR42600.2020.00347},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {Based on a combined data set of 4000 high resolution facial scans, we introduce a non-linear morphable face model, capable of producing multifarious face geometry of pore-level resolution, coupled with material attributes for use in physically-based rendering. We aim to maximize the variety of face identities, while increasing the robustness of correspondence between unique components, including middle-frequency geometry, albedo maps, specular intensity maps and high-frequency displacement details. Our deep learning based generative model learns to correlate albedo and geometry, which ensures the anatomical correctness of the generated assets. We demonstrate potential use of our generative model for novel identity generation, model fitting, interpolation, animation, high fidelity data visualization, and low-to-high resolution data domain transferring. We hope the release of this generative model will encourage further cooperation between all graphics, vision, and data focused professionals, while demonstrating the cumulative value of every individual’s complete biometric profile.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Olszewski, Kyle; Ceylan, Duygu; Xing, Jun; Echevarria, Jose; Chen, Zhili; Chen, Weikai; Li, Hao
Intuitive, Interactive Beard and Hair Synthesis with Generative Models Proceedings Article
In: Proceedings of the CVPR 2020, IEEE, Seattle, Washington, 2020.
@inproceedings{olszewski_intuitive_2020,
title = {Intuitive, Interactive Beard and Hair Synthesis with Generative Models},
author = {Kyle Olszewski and Duygu Ceylan and Jun Xing and Jose Echevarria and Zhili Chen and Weikai Chen and Hao Li},
url = {http://arxiv.org/abs/2004.06848},
doi = {10.1109/CVPR42600.2020.00747},
year = {2020},
date = {2020-04-01},
booktitle = {Proceedings of the CVPR 2020},
publisher = {IEEE},
address = {Seattle, Washington},
abstract = {We present an interactive approach to synthesizing realistic variations in facial hair in images, ranging from subtle edits to existing hair to the addition of complex and challenging hair in images of clean-shaven subjects. To circumvent the tedious and computationally expensive tasks of modeling, rendering and compositing the 3D geometry of the target hairstyle using the traditional graphics pipeline, we employ a neural network pipeline that synthesizes realistic and detailed images of facial hair directly in the target image in under one second. The synthesis is controlled by simple and sparse guide strokes from the user defining the general structural and color properties of the target hairstyle. We qualitatively and quantitatively evaluate our chosen method compared to several alternative approaches. We show compelling interactive editing results with a prototype user interface that allows novice users to progressively refine the generated image to match their desired hairstyle, and demonstrate that our approach also allows for flexible and high-fidelity scalp hair synthesis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davis, Dan M; Guizani, Skander; Jaksha, Evan
Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_52, pp. 11, 2020.
@article{davis_establishing_2020,
title = {Establishing Metrics and Creating Standards: Quantifying Efficacy of Battlefield Simulations},
author = {Dan M Davis and Skander Guizani and Evan Jaksha},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_52},
pages = {11},
abstract = {This paper asserts that quantification and verification of Battlefield simulations is necessary to assess, verify, and guide the researchers, military commanders, and users in both the simulations’ development and their implementation. The authors present their observations on previous development activities that were hampered by lack of effective metrics and present their arguments that much of this was driven by a lack of standards. Tracing back using commonly accepted System Engineering practices, they show how lack of such standards makes even to the development of effective metrics problematic. The paper documents the experiences and enumerates the potential pitfalls of these shortcomings. Both the authors' experiences in military service and the technical literature supporting their theses are adduced to support their analysis of the current technical research and development environment. Then the paper evaluates several System Engineering tools to further investigate and establish the ultimate goals of these formalized processes. Using their current project in establishing virtual on-line mentors as an exemplar of the way such tools would be effective, the authors make a case for the needs for metrics standards that both are accepted by consensus and are ultimately directed at providing the warfighter with all of the training possible before putting that warfighters in harm's way and imperiling the missions for which they are putting themselves at risk. Examples of the nature and reaction to simulator training, virtual human interaction, computer agent interfaces and implementation issues are given to further illuminate for the reader the possible extensions of these approaches into the reader's own research as well as calling for a more community-wide recognition of the needs for standards both for implementation and for metrics to assess Battlefield Simulation utility to the warfighter. Future investigations, analysis and action are considered and evaluated},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Davis, Dan M; Rosenberg, Milton; Davis, Mark C
Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence Journal Article
In: SISO Simulation Innovation Workshop, no. 2020_SIW_39, pp. 11, 2020.
@article{davis_proactive_2020,
title = {Proactive Natural Language Processing: Addressing Terminology Disparity and Team Coalescence},
author = {Dan M Davis and Milton Rosenberg and Mark C Davis},
url = {https://www.sisostds.org/Default.aspx?tabid=105&EntryId=51197},
year = {2020},
date = {2020-04-01},
journal = {SISO Simulation Innovation Workshop},
number = {2020_SIW_39},
pages = {11},
abstract = {There is a continuing need for battlefield simulations and virtual humans. Most recently, the authors have been focused on the creation of virtual conversation environments to leverage the mentoring skills of selected individuals by creating large libraries of short video clips of advice which are then presented to the user in response to their questions. In these endeavors two issues have arisen; the inconsistency of the definitions used and the need to ameliorate the impacts of short-tour intervals on team formation. This paper will address both of these issues, review existing research, document some early research into these impediments, and discuss the similarities of these issues to those faced by the standards community writ large. They will cite and review the work of Professor Bruce Tuckman: Forming, Storming, Norming, and Performing. The benefits of using virtual humans to enhance these processes are outlined. The need for and design of proactive Natural Language Processing-enabled virtual humans and computer agents is set forth and analyzed. The paper will lay out the research goals, identify the semantic differences, and report on the potential impacts of those differences. In its totality, this paper intends to demonstrate that, in addition to the need to evangelize about the necessity of standards, this community has a lot to contribute to researchers, developers, and implementers faced with destructive differences in terminology, understanding and practice. All of this data and analysis will be presented in a way that should make sure that the insights garnered therefrom are accessible by members of this and other communities and they can be implemented and modified, as is most effective. Future advances now in development are discussed, along with the utility of these new capabilities and approaches.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert "Skip"
An Adaptive Agent-Based Interface for Personalized Health Interventions Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 118–119, ACM, Cagliari Italy, 2020, ISBN: 978-1-4503-7513-9.
@inproceedings{mozgai_adaptive_2020,
title = {An Adaptive Agent-Based Interface for Personalized Health Interventions},
author = {Sharon Mozgai and Arno Hartholt and Albert "Skip" Rizzo},
url = {https://dl.acm.org/doi/10.1145/3379336.3381467},
doi = {10.1145/3379336.3381467},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {118–119},
publisher = {ACM},
address = {Cagliari Italy},
abstract = {This demo introduces a novel mHealth application with an agentbased interface designed to collect multimodal data with passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin) as well as through user self-report. This mHealth application delivers personalized and adaptive multimedia content via smartphone application specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health via novel adaptive logic-based algorithms while employing behavior change techniques (e.g., goal-setting, barrier identification, etc.). A virtual human coach leads all interactions to improve adherence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert
The Passive Sensing Agent: A Multimodal Adaptive mHealth Application Proceedings Article
In: Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops), pp. 1–3, IEEE, Austin, TX, USA, 2020, ISBN: 978-1-72814-716-1.
@inproceedings{mozgai_passive_2020,
title = {The Passive Sensing Agent: A Multimodal Adaptive mHealth Application},
author = {Sharon Mozgai and Arno Hartholt and Albert Rizzo},
url = {https://ieeexplore.ieee.org/document/9156177/},
doi = {10.1109/PerComWorkshops48775.2020.9156177},
isbn = {978-1-72814-716-1},
year = {2020},
date = {2020-03-01},
booktitle = {Proceedings of the 2020 IEEE International Conference on Pervasive Computing and Communications Workshops (PerCom Workshops)},
pages = {1–3},
publisher = {IEEE},
address = {Austin, TX, USA},
abstract = {We are demoing the Passive Sensing Agent (PSA), an mHealth virtual human coach, that collects multimodal data through passive sensors native to popular wearables (e.g., Apple Watch, FitBit, and Garmin). This virtual human interface delivers adaptive multi-media content via smartphone application that is specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Initially developed for the military, the PSA delivers health interventions (e.g., educational exercises, physical challenges, and performance feedback) matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). A virtual human coach leads all interactions including the firsttime user experience and the brief daily sessions. All interactions were specifically designed to engage and motivate the user while continuously collecting data on their cognitive, emotional, and physical fitness. This multi-component application is integrated and deployed on an iPhone and Apple Watch prototype; a civilian version is currently in-development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shmueli-Scheuer, Michal; Artstein, Ron; Khazaeni, Yasaman; Fang, Hao; Liao, Q. Vera
user2agent: 2nd Workshop on User-Aware Conversational Agents Proceedings Article
In: Proceedings of the 25th International Conference on Intelligent User Interfaces Companion, pp. 9–10, Association for Computing Machinery, New York, NY, USA, 2020, ISBN: 978-1-4503-7513-9.
@inproceedings{shmueli-scheuer_user2agent_2020,
title = {user2agent: 2nd Workshop on User-Aware Conversational Agents},
author = {Michal Shmueli-Scheuer and Ron Artstein and Yasaman Khazaeni and Hao Fang and Q. Vera Liao},
url = {https://doi.org/10.1145/3379336.3379356},
doi = {10.1145/3379336.3379356},
isbn = {978-1-4503-7513-9},
year = {2020},
date = {2020-03-01},
urldate = {2023-03-31},
booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces Companion},
pages = {9–10},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IUI '20},
abstract = {Conversational agents are becoming increasingly popular. These systems present an extremely rich and challenging research space for addressing many aspects of user awareness and adaptation, such as user profiles, contexts, personalities, emotions, social dynamics, conversational styles, etc. Adaptive interfaces are of long-standing interest for the HCI community. Meanwhile, new machine learning approaches are introduced in the current generation of conversational agents, such as deep learning, reinforcement learning, and active learning. It is imperative to consider how various aspects of user-awareness should be handled by these new techniques. The goal of this workshop is to bring together researchers in HCI, user modeling, and the AI and NLP communities from both industry and academia, who are interested in advancing the state-of-the-art on the topic of user-aware conversational agents. Through a focused and open exchange of ideas and discussions, we will work to identify central research topics in user-aware conversational agents and develop a strong interdisciplinary foundation to address them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; Prasad, Pratusha Bhuvana; McAlinden, Ryan; Soibelman, Lucio; Enloe, Mike
Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),, pp. 13, ResearchGate, Orlando, FL, 2020.
@inproceedings{chen_fully_2020,
title = {Fully Automated Photogrammetric Data Segmentation and Object Information Extraction Approach for Creating Simulation Terrain},
author = {Meida Chen and Andrew Feng and Pratusha Bhuvana Prasad and Ryan McAlinden and Lucio Soibelman and Mike Enloe},
url = {https://www.researchgate.net/publication/338557943_Fully_Automated_Photogrammetric_Data_Segmentation_and_Object_Information_Extraction_Approach_for_Creating_Simulation_Terrain},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC),},
pages = {13},
publisher = {ResearchGate},
address = {Orlando, FL},
abstract = {Our previous works have demonstrated that visually realistic 3D meshes can be automatically reconstructed with lowcost, off-the-shelf unmanned aerial systems (UAS) equipped with capable cameras, and efficient photogrammetric software techniques (McAlinden, Suma, Grechkin, & Enloe, 2015; Spicer, McAlinden, Conover, & Adelphi, 2016). However, such generated data do not contain semantic information/features of objects (i.e., man-made objects, vegetation, ground, object materials, etc.) and cannot allow the sophisticated user-level and system-level interaction. Considering the use case of the data in creating realistic virtual environments for training and simulations (i.e., mission planning, rehearsal, threat detection, etc.), segmenting the data and extracting object information are essential tasks. Previous studies have focused on and made valuable contributions to segment Light Detection and Ranging (LIDAR) generated 3D point clouds and classifying ground materials from real-world images. However, only a few studies have focused on the data created using the photogrammetric technique.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zhao, Sicheng; Wang, Shangfei; Soleymani, Mohammad; Joshi, Dhiraj; Ji, Qiang
Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey Journal Article
In: ACM Transactions on Multimedia Computing, Communications, and Applications, vol. 15, no. 3s, pp. 1–32, 2020, ISSN: 1551-6857, 1551-6865.
@article{zhao_affective_2020,
title = {Affective Computing for Large-scale Heterogeneous Multimedia Data: A Survey},
author = {Sicheng Zhao and Shangfei Wang and Mohammad Soleymani and Dhiraj Joshi and Qiang Ji},
url = {https://dl.acm.org/doi/10.1145/3363560},
doi = {10.1145/3363560},
issn = {1551-6857, 1551-6865},
year = {2020},
date = {2020-01-01},
journal = {ACM Transactions on Multimedia Computing, Communications, and Applications},
volume = {15},
number = {3s},
pages = {1–32},
abstract = {The wide popularity of digital photography and social networks has generated a rapidly growing volume of multimedia data (i.e., images, music, and videos), resulting in a great demand for managing, retrieving, and understanding these data. Affective computing (AC) of these data can help to understand human behaviors and enable wide applications. In this article, we survey the state-of-the-art AC technologies comprehensively for large-scale heterogeneous multimedia data. We begin this survey by introducing the typical emotion representation models from psychology that are widely employed in AC. We briefly describe the available datasets for evaluating AC algorithms. We then summarize and compare the representative methods on AC of different multimedia types, i.e., images, music, videos, and multimodal data, with the focus on both handcrafted features-based methods and deep learning methods. Finally, we discuss some challenges and future directions for multimedia affective computing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Shinagawa, Seitaro; Yoshino, Koichiro; Alavi, Seyed Hossein; Georgila, Kallirroi; Traum, David; Sakti, Sakriani; Nakamura, Satoshi
An Interactive Image Editing System Using an Uncertainty-Based Confirmation Strategy Journal Article
In: IEEE Access, vol. 8, pp. 98471–98480, 2020, ISSN: 2169-3536, (Conference Name: IEEE Access).
@article{shinagawa_interactive_2020,
title = {An Interactive Image Editing System Using an Uncertainty-Based Confirmation Strategy},
author = {Seitaro Shinagawa and Koichiro Yoshino and Seyed Hossein Alavi and Kallirroi Georgila and David Traum and Sakriani Sakti and Satoshi Nakamura},
url = {https://ieeexplore.ieee.org/abstract/document/9099288},
doi = {10.1109/ACCESS.2020.2997012},
issn = {2169-3536},
year = {2020},
date = {2020-01-01},
journal = {IEEE Access},
volume = {8},
pages = {98471–98480},
abstract = {We propose an interactive image editing system that has a confirmation dialogue strategy using an entropy-based uncertainty calculation on its generated images with Deep Convolutional Generative Adversarial Networks (DCGAN). DCGAN is an image generative model that learns an image manifold of a given dataset and enables continuous change of an image. Our proposed image editing system combines DCGAN with a natural language interface that accepts image editing requests in natural language. Although such a system is helpful for human users, it often faces uncertain requests to generate acceptable images. A promising approach to solve this problem is introducing a dialogue process that shows multiple candidates and confirms the user's intention. However, confirming every editing request creates redundant dialogues. To achieve more efficient dialogues, we propose an entropy-based dialogue strategy that decides when the system should confirm, and enables effective image editing through a dialogue that reduces redundant confirmations. We conducted image editing dialogue experiments using an avatar face illustration dataset for editing by natural language requests. Through quantitative and qualitative analysis, our results show that our entropy-based confirmation strategy achieved an effective dialogue by generating images desired by users.},
note = {Conference Name: IEEE Access},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Delogu, Francesca; Rodriguez, Kepa J.; Poesio, Massimo
Annotating a broad range of anaphoric phenomena, in a variety of genres: the ARRAU Corpus Journal Article
In: Natural Language Engineering, vol. 26, no. 1, pp. 95–128, 2020, ISSN: 1351-3249, 1469-8110, (Publisher: Cambridge University Press).
@article{uryupina_annotating_2020,
title = {Annotating a broad range of anaphoric phenomena, in a variety of genres: the ARRAU Corpus},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Francesca Delogu and Kepa J. Rodriguez and Massimo Poesio},
url = {https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/annotating-a-broad-range-of-anaphoric-phenomena-in-a-variety-of-genres-the-arrau-corpus/17E7FA2CB2E36C213E2649479593B6B0},
doi = {10.1017/S1351324919000056},
issn = {1351-3249, 1469-8110},
year = {2020},
date = {2020-01-01},
urldate = {2023-03-31},
journal = {Natural Language Engineering},
volume = {26},
number = {1},
pages = {95–128},
abstract = {This paper presents the second release of arrau, a multigenre corpus of anaphoric information created over 10 years to provide data for the next generation of coreference/anaphora resolution systems combining different types of linguistic and world knowledge with advanced discourse modeling supporting rich linguistic annotations. The distinguishing features of arrau include the following: treating all NPs as markables, including non-referring NPs, and annotating their (non-) referentiality status; distinguishing between several categories of non-referentiality and annotating non-anaphoric mentions; thorough annotation of markable boundaries (minimal/maximal spans, discontinuous markables); annotating a variety of mention attributes, ranging from morphosyntactic parameters to semantic category; annotating the genericity status of mentions; annotating a wide range of anaphoric relations, including bridging relations and discourse deixis; and, finally, annotating anaphoric ambiguity. The current version of the dataset contains 350K tokens and is publicly available from LDC. In this paper, we discuss in detail all the distinguishing features of the corpus, so far only partially presented in a number of conference and workshop papers, and we also discuss the development between the first release of arrau in 2008 and this second one.},
note = {Publisher: Cambridge University Press},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bell, Benjamin; Kelsey, Elaine; Nye, Benjamin; Bennett, Winston (“Wink”)
Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training Proceedings Article
In: Sottilare, Robert A.; Schwarz, Jessica (Ed.): Adaptive Instructional Systems, pp. 271–282, Springer International Publishing, Cham, 2020, ISBN: 978-3-030-50788-6.
@inproceedings{bell_adapting_2020,
title = {Adapting Instruction by Measuring Engagement with Machine Learning in Virtual Reality Training},
author = {Benjamin Bell and Elaine Kelsey and Benjamin Nye and Winston (“Wink”) Bennett},
editor = {Robert A. Sottilare and Jessica Schwarz},
url = {https://link.springer.com/chapter/10.1007/978-3-030-50788-6_20},
doi = {10.1007/978-3-030-50788-6_20},
isbn = {978-3-030-50788-6},
year = {2020},
date = {2020-01-01},
booktitle = {Adaptive Instructional Systems},
pages = {271–282},
publisher = {Springer International Publishing},
address = {Cham},
series = {Lecture Notes in Computer Science},
abstract = {The USAF has established a new approach to Specialized Undergraduate Pilot Training (SUPT) called Pilot Training Next (PTN) that integrates traditional flying sorties with VR-enabled ground-based training devices and data-driven proficiency tracking to achieve training efficiencies, improve readiness, and increase throughput. Eduworks and USC’s Institute for Creative Technologies are developing machine learning (ML) models that can measure user engagement during any computer-mediated training (simulation, courseware) and offer recommendations for restoring lapses in engagement. We are currently developing and testing this approach, called the Observational Motivation and Engagement Generalized Appliance (OMEGA) in a PTN context. Two factors motivate this work. First, one goal of PTN is for an instructor pilot (IP) to simultaneously monitor multiple simulator rides. Being alerted to distraction, attention and engagement can help an IP manage multiple students at the same time, with recommendations for restoring engagement providing further instructional support. Second, the virtual environment provides a rich source of raw data that machine learning models can use to associate user activity with user engagement. We have created a testbed for data capture in order to construct the ML models, based on theoretical foundations we developed previously. We are running pilots through multiple PTN scenarios and collecting formative data from instructors to evaluate the utility of the recommendations OMEGA generates regarding how lapsed engagement can be restored. We anticipate findings that validate the use of ML models for learning to detect engagement from the rich data sources characteristic of virtual environments. These findings will be applicable across a broad range of conventional and VR training applications.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Joshi, Himanshu; Ustun, Volkan
(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML Proceedings Article
In: Proceedings of the 7th Annual Conference on Advances in Cognitive Systems, pp. 113–131, Cognitive Systems Foundation, Cambridge, MA, 2019.
@inproceedings{rosenbloom_subsymbolic_2019,
title = {(Sub)Symbolic × (a)symmetric × (non)combinatory: A map of AI approaches spanning symbolic/statistical to neural/ML},
author = {Paul S. Rosenbloom and Himanshu Joshi and Volkan Ustun},
url = {https://drive.google.com/file/d/1Ynp75A048Mfuh7e3kf_V7hs5kFD7uHsT/view},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 7th Annual Conference on Advances in Cognitive Systems},
pages = {113–131},
publisher = {Cognitive Systems Foundation},
address = {Cambridge, MA},
abstract = {The traditional symbolic versus subsymbolic dichotomy can be decomposed into three more basic dichotomies, to yield a 3D (2×2×2) space in which symbolic/statistical and neural/ML approaches to intelligence appear in opposite corners. Filling in all eight resulting cells then yields a map that spans a number of standard AI approaches plus a few that may be less familiar. Based on this map, four hypotheses are articulated, explored, and evaluated concerning its relevance to both a deeper understanding of the field of AI as a whole and the general capabilities required in complete AI/cognitive systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Fast, Ed; Reilly, Adam; Whitcup, Wendy; Liewer, Matt; Mozgai, Sharon
Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR Proceedings Article
In: Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR), pp. 308–3084, IEEE, San Diego, CA, USA, 2019, ISBN: 978-1-72815-604-0.
@inproceedings{hartholt_ubiquitous_2019,
title = {Ubiquitous Virtual Humans: A Multi-platform Framework for Embodied AI Agents in XR},
author = {Arno Hartholt and Ed Fast and Adam Reilly and Wendy Whitcup and Matt Liewer and Sharon Mozgai},
url = {https://ieeexplore.ieee.org/document/8942321/},
doi = {10.1109/AIVR46125.2019.00072},
isbn = {978-1-72815-604-0},
year = {2019},
date = {2019-12-01},
booktitle = {Proceedings of the 2019 IEEE International Conference on Artificial Intelligence and Virtual Reality (AIVR)},
pages = {308–3084},
publisher = {IEEE},
address = {San Diego, CA, USA},
abstract = {We present an architecture and framework for the development of virtual humans for a range of computing platforms, including mobile, web, Virtual Reality (VR) and Augmented Reality (AR). The framework uses a mix of in-house and commodity technologies to support audio-visual sensing, speech recognition, natural language processing, nonverbal behavior generation and realization, text-to-speech generation, and rendering. This work builds on the Virtual Human Toolkit, which has been extended to support computing platforms beyond Windows. The resulting framework maintains the modularity of the underlying architecture, allows re-use of both logic and content through cloud services, and is extensible by porting lightweight clients. We present the current state of the framework, discuss how we model and animate our characters, and offer lessons learned through several use cases, including expressive character animation in seated VR, shared space and navigation in roomscale VR, autonomous AI in mobile AR, and real-time user performance feedback based on mobile sensors in headset AR.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Meida; Feng, Andrew; McAlinden, Ryan; Soibelman, Lucio
Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations Journal Article
In: Journal of Management in Engineering, vol. 36, no. 2, pp. 04019046, 2019, ISSN: 0742-597X, 1943-5479.
@article{chen_photogrammetric_2019,
title = {Photogrammetric Point Cloud Segmentation and Object Information Extraction for Creating Virtual Environments and Simulations},
author = {Meida Chen and Andrew Feng and Ryan McAlinden and Lucio Soibelman},
url = {http://ascelibrary.org/doi/10.1061/%28ASCE%29ME.1943-5479.0000737},
doi = {10.1061/(ASCE)ME.1943-5479.0000737},
issn = {0742-597X, 1943-5479},
year = {2019},
date = {2019-11-01},
journal = {Journal of Management in Engineering},
volume = {36},
number = {2},
pages = {04019046},
abstract = {Photogrammetric techniques have dramatically improved over the last few years, enabling the creation of visually compelling three-dimensional (3D) meshes using unmanned aerial vehicle imagery. These high-quality 3D meshes have attracted notice from both academicians and industry practitioners in developing virtual environments and simulations. However, photogrammetric generated point clouds and meshes do not allow both user-level and system-level interaction because they do not contain the semantic information to distinguish between objects. Thus, segmenting generated point clouds and meshes and extracting the associated object information is a necessary step. A framework for point cloud and mesh classification and segmentation is presented in this paper. The proposed framework was designed considering photogrammetric data-quality issues and provides a novel way of extracting object information, including (1) individual tree locations and related features and (2) building footprints. Experiments were conducted to rank different point descriptors and evaluate supervised machine-learning algorithms for segmenting photogrammetric generated point clouds. The proposed framework was validated using data collected at the University of Southern California (USC) and the Muscatatuck Urban Training Center (MUTC). DOI: 10.1061/(ASCE) ME.1943-5479.0000737. © 2019 American Society of Civil Engineers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Feng, Andrew; Gordon, Andrew S.
Latent Terrain Representations for Trajectory Prediction Proceedings Article
In: Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19, pp. 1–4, ACM Press, Chicago, IL, USA, 2019, ISBN: 978-1-4503-6951-0.
@inproceedings{feng_latent_2019,
title = {Latent Terrain Representations for Trajectory Prediction},
author = {Andrew Feng and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3356392.3365218},
doi = {10.1145/3356392.3365218},
isbn = {978-1-4503-6951-0},
year = {2019},
date = {2019-11-01},
booktitle = {Proceedings of the 1st ACM SIGSPATIAL International Workshop on Computing with Multifaceted Movement Data - MOVE'19},
pages = {1–4},
publisher = {ACM Press},
address = {Chicago, IL, USA},
abstract = {In natural outdoor environments, the shape of the surface terrain is an important factor in selecting a traversal path, both when operating off-road vehicles and maneuvering on foot. With the increased availability of digital elevation models for outdoor terrain, new opportunities exist to exploit this contextual information to improve automated path prediction. In this paper, we investigate predictive neural network models for outdoor trajectories that traverse terrain with known surface topography. We describe a method of encoding digital surface models as vectors in latent space using Wasserstein Autoencoders, and their use in convolutional neural networks that predict future trajectory positions from past trajectory data. We observe gains in predictive performance across three experiments, using both synthetic and recorded trajectories on real-world terrain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
An, Capt Eric; Nolty, Anne A T; Amano, Stacy S; Rizzo, Albert A; Buckwalter, J Galen; Rensberger, Jared
Heart Rate Variability as an Index of Resilience Journal Article
In: Military Medicine, 2019, ISSN: 0026-4075, 1930-613X.
@article{an_heart_2019,
title = {Heart Rate Variability as an Index of Resilience},
author = {Capt Eric An and Anne A T Nolty and Stacy S Amano and Albert A Rizzo and J Galen Buckwalter and Jared Rensberger},
url = {https://academic.oup.com/milmed/advance-article/doi/10.1093/milmed/usz325/5586497},
doi = {10.1093/milmed/usz325},
issn = {0026-4075, 1930-613X},
year = {2019},
date = {2019-10-01},
journal = {Military Medicine},
abstract = {Introduction: Resilience is the ability to maintain or quickly return to a stable physical and psychological equilibrium despite experiencing stressful events. Flexibility of the autonomic nervous system is particularly important for adaptive stress responses and may contribute to individual differences in resilience. Power spectrum analysis of heart rate variability (HRV) allows measurement of sympathovagal balance, which helps to evaluate autonomic flexibility. The present study investigated HRV as a broad index of resilience. Materials and Methods: Twenty-four male participants from the Army National Guard Special Forces completed psychological measures known to relate to resilience and had HRV measured while undergoing stressful virtual environment scenarios. Pearson product-moment correlations were used to explore the relationships between HRV and resilience factors. All research was conducted with the oversight of the Human Subjects Review Committee of Fuller Theological Seminary. Results: Trends toward significance were reported in order to provide results that would reasonably be expected in a study of higher power. Trends between resilience factors and HRV were found only during specific stress-inducing simulations (see Tables III). Conclusion: Greater resilience to stress was associated with HRV during nonstress periods. Higher levels of resilience to traumatic events were associated with HRV during circumstances that were more stressful and emotionally distressing. Post hoc analysis revealed that specific factors including flexibility, emotional control, and spirituality were driving the relationship between general resilience and HRV following emotionally laden stressors. Less stress vulnerability was associated with HRV following intermittent brief stressors. In sum, HRV appears to represent some aspects of an individual’s overall resilience profile. Although resilience remains a complex, multidimensional construct, HRV shows promise as a global psychophysiological index of resilience. This study also offers important perspectives concerning ways to optimize both physical and psychological health.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Soleymani, Mohammad; Stefanov, Kalin; Kang, Sin-Hwa; Ondras, Jan; Gratch, Jonathan
Multimodal Analysis and Estimation of Intimate Self-Disclosure Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19, pp. 59–68, ACM Press, Suzhou, China, 2019, ISBN: 978-1-4503-6860-5.
@inproceedings{soleymani_multimodal_2019,
title = {Multimodal Analysis and Estimation of Intimate Self-Disclosure},
author = {Mohammad Soleymani and Kalin Stefanov and Sin-Hwa Kang and Jan Ondras and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3340555.3353737},
doi = {10.1145/3340555.3353737},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction on - ICMI '19},
pages = {59–68},
publisher = {ACM Press},
address = {Suzhou, China},
abstract = {Self-disclosure to others has a proven benefit for one’s mental health. It is shown that disclosure to computers can be similarly beneficial for emotional and psychological well-being. In this paper, we analyzed verbal and nonverbal behavior associated with self-disclosure in two datasets containing structured human-human and human-agent interviews from more than 200 participants. Correlation analysis of verbal and nonverbal behavior revealed that linguistic features such as affective and cognitive content in verbal behavior, and nonverbal behavior such as head gestures are associated with intimate self-disclosure. A multimodal deep neural network was developed to automatically estimate the level of intimate self-disclosure from verbal and nonverbal behavior. Between modalities, verbal behavior was the best modality for estimating self-disclosure within-corpora achieving r = 0.66. However, the cross-corpus evaluation demonstrated that nonverbal behavior can outperform language modality in cross-corpus evaluation. Such automatic models can be deployed in interactive virtual agents or social robots to evaluate rapport and guide their conversational strategy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2015
Lucas, Gale; Gratch, Jonathan; Scherer, Stefan; Boberg, Jill; Stratou, Giota
Towards an Affective Interface for Assessment of Psychological Distress Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{lucas_towards_2015,
title = {Towards an Affective Interface for Assessment of Psychological Distress},
author = {Gale Lucas and Jonathan Gratch and Stefan Scherer and Jill Boberg and Giota Stratou},
url = {http://ict.usc.edu/pubs/Towards%20an%20Affective%20Interface%20for%20Assessment%20of%20Psychological%20Distress.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Even with the rise in use of TeleMedicine for health care and mental health, research suggests that clinicians may have difficulty reading nonverbal cues in computer-mediated situations. However, the recent progress in tracking affective markers (i.e., displays of emotional expressions on face and in voice) has opened the door to new clinical applications that might help health care providers better read nonverbal behaviors when employing TeleMedicine. For example, an interface that automatically quantified affective markers could assist clinicians in their assessment of and treatment for psychological distress (i.e., symptoms of depression and PTSD). To move towards this prospect, we will show that clinicians’ judgments of these nonverbal affective markers (e.g., smile, frown, eye contact, tense voice) could be informed by such technology. The results of our evaluation suggest that clinicians’ ratings of nonverbal affective markers are less predictive of psychological distress than automatically quantified affective markers. Because such quantifications are more strongly associated with psychological distress than clinician ratings of these same nonverbal behaviors, an affective interface providing quantifications of nonverbal affective markers could potentially improve assessment of psychological distress.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis-Philippe; Shapiro, Ari; Scherer, Stefan
Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework Proceedings Article
In: Proceedings of UbiComp 2015, ACM, Osaka, Japan, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{chollet_exploring_2015,
title = {Exploring Feedback Strategies to Improve Public Speaking: An Interactive Virtual Audience Framework},
author = {Mathieu Chollet and Torsten Wortwein and Louis-Philippe Morency and Ari Shapiro and Stefan Scherer},
url = {http://ict.usc.edu/pubs/Exploring%20Feedback%20Strategies%20to%20Improve%20Public%20Speaking%20-%20An%20Interactive%20Virtual%20Audience%20Framework.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of UbiComp 2015},
publisher = {ACM},
address = {Osaka, Japan},
abstract = {Good public speaking skills convey strong and effective communication, which is critical in many professions and used in everyday life. The ability to speak publicly requires a lot of training and practice. Recent technological developments enable new approaches for public speaking training that allow users to practice in a safe and engaging environment. We explore feedback strategies for public speaking training that are based on an interactive virtual audience paradigm. We investigate three study conditions: (1) a non-interactive virtual audience (control condition), (2) direct visual feedback, and (3) nonverbal feedback from an interactive virtual audience. We perform a threefold evaluation based on self-assessment questionnaires, expert assessments, and two objectively annotated measures of eye-contact and avoidance of pause fillers. Our experiments show that the interactive virtual audience brings together the best of both worlds: increased engagement and challenge as well as improved public speaking skills as judged by experts.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning in Multi-Party Trading Dialog Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 32 – 41, Prague, Czech Republic, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hiraoka_reinforcement_2015,
title = {Reinforcement Learning in Multi-Party Trading Dialog},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {http://ict.usc.edu/pubs/Reinforcement%20Learning%20in%20Multi-Party%20Trading%20Dialog.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {32 – 41},
address = {Prague, Czech Republic},
abstract = {In this paper, we apply reinforcement learning (RL) to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents.We experiment with different RL algorithms and reward functions. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is a very hard problem; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of two strong hand-crafted baselines.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Scherer, Stefan; Morency, Louis-Philippe
A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ghosh_multi-label_2015,
title = {A Multi-label Convolutional Neural Network Approach to Cross-Domain Action Unit Detection},
author = {Sayan Ghosh and Eugene Laksana and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/A%20Multi-label%20Convolutional%20Neural%20Network%20Approach%20to%20Cross-Domain%20Action%20Unit%20Detection.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Action Unit (AU) detection from facial images is an important classification task in affective computing. However most existing approaches use carefully engineered feature extractors along with off-the-shelf classifiers. There has also been less focus on how well classifiers generalize when tested on different datasets. In our paper, we propose a multi-label convolutional neural network approach to learn a shared representation between multiple AUs directly from the input image. Experiments on three AU datasets- CK+, DISFA and BP4D indicate that our approach obtains competitive results on all datasets. Cross-dataset experiments also indicate that the network generalizes well to other datasets, even when under different training and testing conditions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alfonso, Bexy; Pynadath, David V.; Lhommet, Margot; Marsella, Stacy
Emotional Perception for Updating Agents’ Beliefs Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@inproceedings{alfonso_emotional_2015,
title = {Emotional Perception for Updating Agents’ Beliefs},
author = {Bexy Alfonso and David V. Pynadath and Margot Lhommet and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Emotional%20Perception%20for%20Updating%20Agents%e2%80%99%20Beliefs.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {The relative influence of perception and situation in emotional judgments has been extensively debated in psychology. A main issue in this debate concerns how these sources of information are integrated. This work proposes a method able to make probabilistic predictions of appraisals of other agents, using mental models of those agents. From these appraisal predictions, predictions about another agent’s expressions are made, integrated with observations of the other agent’s ambiguous emotional expressions using Bayesian techniques, resulting in updates to the agent’s mental models. Our method is inspired by psychological work on human interpretation of emotional expressions. We demonstrate how these appraisals of others’ emotions and observations of their expressions can be an integral part of an agent capable of Theory of Mind reasoning.},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan; Rosenfeld, Avi
Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_saying_2015,
title = {Saying YES! The Cross-cultural Complexities of Favors and Trust in Human-Agent Negotiation},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch and Avi Rosenfeld},
url = {http://ict.usc.edu/pubs/Saying%20YES!%20The%20Cross-cultural%20Complexities%20of%20Favors%20and%20Trust%20in%20Human-Agent%20Negotiation.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Negotiation between virtual agents and humans is a complex field that requires designers of systems to be aware not only of the efficient solutions to a given game, but also the mechanisms by which humans create value over multiple negotiations. One way of considering the agent’s impact beyond a single negotiation session is by considering the use of external “ledgers” across multiple sessions. We present results that describe the effects of favor exchange on negotiation outcomes, fairness, and trust for two distinct cross-cultural populations, and illustrate the ramifications of their similarities and differences on virtual agent design.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Hoegen, Rens; Lucas, Gale; Gratch, Jonathan
Emotional Signaling in a Social Dilemma: an Automatic Analysis Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{stratou_emotional_2015,
title = {Emotional Signaling in a Social Dilemma: an Automatic Analysis},
author = {Giota Stratou and Rens Hoegen and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotional%20Signaling%20in%20a%20Social%20Dilemma-an%20Automatic%20Analysis.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Emotional signaling plays an important role in negotiations and other social decision-making tasks as it can signal intention and shape joint decisions. Specifically it has been shown to influence cooperation or competition. This has been shown in previous studies for scripted interactions that control emotion signaling and rely on manual coding of affect. In this work we examine face-to-face interactions in an iterative social dilemma task (prisoner’s dilemma) via an automatic framework for facial expression analysis. We explore if automatic analysis of emotion can give insight into the social function of emotion in face-toface interactions. Our analysis suggests that positive and negative displays of emotion are associated with more prosocial and proself game acts respectively. Moreover signaling cooperative intentions to the opponent via positivity can leave participants more open to exploitation, whereas signaling a more tough stance via negativity seems to discourage exploitation. However, the benefit of negative affect is short-term and both players do worse over time if they show negative emotions},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Lucas, Gale; Gratch, Jonathan
Multimodal Approach for Automatic Recognition of Machiavellianism Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nazari_multimodal_2015,
title = {Multimodal Approach for Automatic Recognition of Machiavellianism},
author = {Zahra Nazari and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Multimodal%20Approach%20for%20Automatic%20Recognition%20of%20Machiavellianism.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Machiavellianism, by definition, is the tendency to use other people as a tool to achieve one's own goals. Despite the large focus on the Big Five traits of personality, this anti-social trait is relatively unexplored in the computational realm. Automatically recognizing anti-social traits can have important uses across a variety of applications. In this paper, we use negotiation as a setting that provides Machiavellians with the opportunity to reveal their exploitative inclinations. We use textual, visual, acoustic, and behavioral cues to automatically predict High vs. Low Machiavellian personalities. These learned models have good accuracy when compared with other personalityrecognition methods, and we provide evidence that the automatically-learned models are consistent with existing literature on this anti-social trait, giving evidence that these results can generalize to other domains.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics Proceedings Article
In: Proceedings of ACII 2015, IEEE, Xi'an, China, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{gratch_appraisal_2015,
title = {The Appraisal Equivalence Hypothesis: Verifying the domain-independence of a computational model of emotion dynamics},
author = {Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/The%20Appraisal%20Equivalence%20Hypothesis-Verifying%20the%20domain-independence%20of%20a%20computational%20model%20of%20emotion%20dynamics.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of ACII 2015},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Appraisal theory is the most influential theory within affective computing, and serves as the basis for several computational models of emotion. The theory makes strong claims of domain-independence: seemingly different situations, both within and across domains are claimed to produce the identical emotional responses if and only if they are appraised the same way. This article tests this claim, and the predictions of a computational model that embodies it, in two very different interactive games. The results extend prior empirical evidence for appraisal theory to situations where emotions unfold and change over time.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Georgila, Kallirroi; Artstein, Ron; Leuski, Anton
Evaluating Spoken Dialogue Processing for Time-Offset Interaction Proceedings Article
In: Proceedings of 16th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL), pp. 199–208, Association for Computational Linguistics, Prague, Czech Republic, 2015, ISBN: 978-1-941643-75-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{traum_evaluating_2015,
title = {Evaluating Spoken Dialogue Processing for Time-Offset Interaction},
author = {David Traum and Kallirroi Georgila and Ron Artstein and Anton Leuski},
url = {http://ict.usc.edu/pubs/Evaluating%20Spoken%20Dialogue%20Processing%20for%20Time-Offset%20Interaction.pdf},
isbn = {978-1-941643-75-4},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of 16th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL)},
pages = {199–208},
publisher = {Association for Computational Linguistics},
address = {Prague, Czech Republic},
abstract = {This paper presents the first evaluation of a full automated prototype system for time-offset interaction, that is, conversation between a live person and recordings of someone who is not temporally co-present. Speech recognition reaches word error rates as low as 5% with general purpose language models and 19% with domain-specific models, and language understanding can identify appropriate direct responses to 60–66% of user utterances while keeping errors to 10–16% (the remainder being indirect, or off-topic responses). This is sufficient to enable a natural flow and relatively open-ended conversations, with a collection of under 2000 recorded statements.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; Evangelista, Edgar; New, Raymond; Campbell, Julia; Richmond, Todd; McGroarty, Christopher; Vogt, Brian
Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping Proceedings Article
In: Proceeding of 15 Simulation Interoperability Workshop, Orlando, FL, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, UARC, Virtual Humans
@inproceedings{spicer_innovation_2015,
title = {Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping},
author = {Ryan Spicer and Edgar Evangelista and Raymond New and Julia Campbell and Todd Richmond and Christopher McGroarty and Brian Vogt},
url = {http://ict.usc.edu/pubs/Innovation%20and%20Rapid%20Evolutionary%20Design%20by%20Virtual%20Doing-Understanding%20Early%20Synthetic.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of 15 Simulation Interoperability Workshop},
address = {Orlando, FL},
abstract = {The proliferation and maturation of tools supporting virtual environments combined with emerging immersive capabilities (e.g. Oculus Rift and other head mounted displays) point towards the ability to take nascent ideas and realize them in engaging ways through an Early Synthetic Prototyping (ESP) system. In effect, “bend electrons before bending metal,” enabling Soldier (end-user) feedback early in the design process, while fostering an atmosphere of collaboration and innovation. Simulation has been used in a variety of ways for concept, design, and testing, but current methods do not put the user into the system in ways that provide deep feedback and enable a dialogue between Warfighter and Engineer (as well as other stakeholders) that can inform design. This paper will discuss how the process of ESP is teased out by using iterative rapid virtual prototyping based on an initial ESP schema, resulting in a rather organic design process – Innovation and Rapid Evolutionary Design by Virtual Doing. By employing canonical use cases, working through the draft schema allows the system to help design itself and inform the process evolution. This type of self-referential meta-design becomes increasingly powerful and relevant given the ability to rapidly create assets, capabilities and environments that immerse developers, stakeholders, and end users early and often in the process. Specific examples of using rapid virtual prototyping for teasing out the design and implications/applications of ESP will be presented, walking through the evolution of both schema and prototypes with specific use cases. In addition, this paper will cover more generalized concepts, approaches, analytics, and lessons-learned as well as implications for innovation throughout research, development, and industry.},
keywords = {ARL, DoD, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Honig, Wolfgang; Milanes, Christina; Scaria, Lisa; Phan, Thai; Bolas, Mark; Ayanian, Nora
Mixed Reality for Robotics Proceedings Article
In: 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5382 – 5387, IEEE, Hamburg, Germany, 2015.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{honig_mixed_2015,
title = {Mixed Reality for Robotics},
author = {Wolfgang Honig and Christina Milanes and Lisa Scaria and Thai Phan and Mark Bolas and Nora Ayanian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7354138&tag=1},
doi = {10.1109/IROS.2015.7354138},
year = {2015},
date = {2015-09-01},
booktitle = {2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
pages = {5382 – 5387},
publisher = {IEEE},
address = {Hamburg, Germany},
abstract = {Mixed Reality can be a valuable tool for research and development in robotics. In this work, we refine the definition of Mixed Reality to accommodate seamless interaction between physical and virtual objects in any number of physical or virtual environments. In particular, we show that Mixed Reality can reduce the gap between simulation and implementation by enabling the prototyping of algorithms on a combination of physical and virtual objects, including robots, sensors, and humans. Robots can be enhanced with additional virtual capabilities, or can interact with humans without sharing physical space. We demonstrate Mixed Reality with three representative experiments, each of which highlights the advantages of our approach. We also provide a testbed for Mixed Reality with three different virtual robotics environments in combination with the Crazyflie 2.0 quadcopter.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Georgila, Kallirroi; Traum, David
Which Synthetic Voice Should I Choose for an Evocative Task? Proceedings Article
In: Proceeding of SIGDIAL 2015, pp. 105 – 113, Prague, Czech Republic, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{pincus_which_2015,
title = {Which Synthetic Voice Should I Choose for an Evocative Task?},
author = {Eli Pincus and Kallirroi Georgila and David Traum},
url = {http://ict.usc.edu/pubs/Which%20Synthetic%20Voice%20Should%20I%20Choose%20for%20an%20Evocative%20Task.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of SIGDIAL 2015},
pages = {105 – 113},
address = {Prague, Czech Republic},
abstract = {We explore different evaluation methods for 4 different synthetic voices and 1 human voice. We investigate whether intelligibility, naturalness, or likability of a voice is correlated to the voice’s evocative function potential, a measure of the voice’s ability to evoke an intended reaction from the listener. We also investigate the extent to which naturalness and likability ratings vary depending on whether or not exposure to a voice is extended and continuous vs. short-term and sporadic (interleaved with other voices). Finally, we show that an automatic test can replace the standard intelligibility tests for text-to-speech (TTS) systems, which eliminates the need to hire humans to performtranscription tasks saving both time and money.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Paetzel, Maike; Manuvinakurike, Ramesh; DeVault, David
"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent Proceedings Article
In: Proceedings of SIGDIAL 2015, pp. 77 – 86, Prague, Czech Republic, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{paetzel_so_2015,
title = {"So, which one is it?" The effect of alternative incremental architectures in a high-performance game-playing agent},
author = {Maike Paetzel and Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/So,%20which%20one%20is%20it%20-%20The%20effect%20of%20alternative%20incremental%20architectures%20in%20a%20high-performance%20game-playing%20agent.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceedings of SIGDIAL 2015},
pages = {77 – 86},
address = {Prague, Czech Republic},
abstract = {This paper introduces Eve, a highperformance agent that plays a fast-paced image matching game in a spoken dialogue with a human partner. The agent can be optimized and operated in three different modes of incremental speech processing that optionally include incremental speech recognition, language understanding, and dialogue policies. We present our framework for training and evaluating the agent’s dialogue policies. In a user study involving 125 human participants, we evaluate three incremental architectures against each other and also compare their performance to human-human gameplay. Our study reveals that the most fully incremental agent achieves game scores that are comparable to those achieved in human-human gameplay, are higher than those achieved by partially and nonincremental versions, and are accompanied by improved user perceptions of efficiency, understanding of speech, and naturalness of interaction.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruijnes, Merijn; Akker, Rieks; Hartholt, Arno; Heylen, Dirk
Virtual Suspect William Proceedings Article
In: Intelligent Virtual Agents, pp. 67–76, Springer, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{bruijnes_virtual_2015,
title = {Virtual Suspect William},
author = {Merijn Bruijnes and Rieks Akker and Arno Hartholt and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Virtual%20Suspect%20William.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {67–76},
publisher = {Springer},
abstract = {We evaluate an algorithm which computes the responses of an agent that plays the role of a suspect in simulations of police interrogations. The algorithm is based on a cognitive model - the response model - that is centred around keeping track of interpersonal relations. The model is parametrized in such a way that different personalities of the virtual suspect can be defined. In the evaluation we defined three different personalities and had participants guess the personality based on the responses the model provided in an interaction with the participant. We investigate what factors contributed to the ability of a virtual agent to show behaviour that was recognized by participants as belonging to a persona.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew; Leuski, Anton; Casas, Dan; Shapiro, Ari
Smart Mobile Virtual Humans: “Chat with Me!” Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 475–478, Springer, Delft, Netherlands, 2015.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{kang_smart_2015,
title = {Smart Mobile Virtual Humans: “Chat with Me!”},
author = {Sin-Hwa Kang and Andrew Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Smart%20Mobile%20Virtual%20Humans%20-%20Chat%20with%20Me.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {475–478},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {In this study, we are interested in exploring whether people would talk with 3D animated virtual humans using a smartphone for a longer amount of time as a sign of feeling rapport [5], compared to non-animated or audio-only characters in everyday life. Based on previous studies [2, 7, 10], users prefer animated characters in emotionally engaged interactions when the characters were displayed on mobile devices, yet in a lab setting. We aimed to reach a broad range of users outside of the lab in natural settings to investigate the potential of our virtual human on smartphones to facilitate casual, yet emotionally engaging conversation. We also found that the literature has not reached a consensus regarding the ideal gaze patterns for a virtual human, one thing researchers agree on is that inappropriate gaze could negatively impact conversations at times, even worse than receiving no visual feedback at all [1, 4]. Everyday life may bring the experience of awkwardness or uncomfortable sentiments in reaction to continuous mutual gaze. On the other hand, gaze aversion could also make a speaker think their partner is not listening. Our work further aims to address this question of what constitutes appropriate eye gaze in emotionally engaged interactions. We developed a 3D animated and chat-based virtual human which presented emotionally expressive nonverbal behaviors such as facial expressions, head gestures, gaze, and other upper body movements (see Figure 1). The virtual human displayed appropriate gaze that was either consisted of constant mutual gaze or gaze aversion based on a statistical model of saccadic eye movement [8] while listening. Both gaze patterns were accompanied by other forms of appropriate nonverbal feedback. To explore the question of optimal communicative medium, we distributed our virtual human application to users via an app store for Android-powered phones (i.e. Google Play Store) in order to target users who owned a smartphone and could use our application in various natural settings.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Xueming; Wang, Shanhe; Busch, Jay; Phan, Thai; McSheery, Tracy; Bolas, Mark; Debevec, Paul
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Posters, pp. 94, ACM, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC
@inproceedings{yu_virtual_2015,
title = {Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking},
author = {Xueming Yu and Shanhe Wang and Jay Busch and Thai Phan and Tracy McSheery and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Posters},
pages = {94},
publisher = {ACM},
address = {Los Angeles, CA},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
keywords = {Graphics, MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham
Skin Stretch - Simulating Dynamic Skin Microgeometry Proceedings Article
In: ACM SIGGRAPH 2015 Computer Animation Festival, pp. 133, Los Angeles, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{nagano_skin_2015,
title = {Skin Stretch - Simulating Dynamic Skin Microgeometry},
author = {Koki Nagano and Graham Fyffe},
url = {http://ict.usc.edu/pubs/Skin%20Stretch%20-%20Simulating%20Dynamic%20Skin%20Microgeometry.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
pages = {133},
address = {Los Angeles, CA},
abstract = {This demonstration of the effects of skin microstructure deformation on high-resolution dynamic facial rendering features the state-of-the-art skin in microstructure simulation, facial scanning, and rendering. Facial animations made with the technique show more realistic and expressive skin under facial expression.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Lucas, Gale M.; Gratch, Jonathan
Opponent Modeling for Virtual Human Negotiators Proceedings Article
In: Intelligent Virtual Agents, pp. 39–49, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nazari_opponent_2015,
title = {Opponent Modeling for Virtual Human Negotiators},
author = {Zahra Nazari and Gale M. Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Opponent%20Modeling%20for%20Virtual%20Human%20Negotiators.pdf},
doi = {10.1007/978-3-319-21996-7_4},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {39–49},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {Negotiation is a challenging domain for virtual human research. One aspect of this problem, known as opponent modeling, is discovering what the other party wants from the negotiation. Research in automated negotiation has yielded a number opponent modeling techniques but we show that these methods do not easily transfer to human-agent settings. We propose a more effective heuristic for inferring preferences both from a negotiator’s pattern of offers and verbal statements about their preferences. This method has the added advantage that it can detect negotiators that lie about their preferences. We discuss several ways the method can enhance the capabilities of a virtual human negotiator.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.
Towards Adaptive, Interactive Virtual Humans in Sigma Proceedings Article
In: Intelligent Virtual Agents, pp. 98 –108, Springer, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@inproceedings{ustun_towards_2015,
title = {Towards Adaptive, Interactive Virtual Humans in Sigma},
author = {Volkan Ustun and Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Towards%20Adaptive,%20Interactive%20Virtual%20Humans%20in%20Sigma.pdf},
doi = {10.1007/978-3-319-21996-7_10},
isbn = {978-3-319-21995-0},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {98 –108},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {Sigma is a nascent cognitive architecture/system that combines concepts from graphical models with traditional symbolic architectures. Here an initial Sigma-based virtual human (VH) is introduced that combines probabilistic reasoning, rule-based decision-making, Theory of Mind, Simultaneous Localization and Mapping and reinforcement learning in a unified manner. This non-modular unification of diverse cognitive, robotic and VH capabilities provides an important first step towards fully adaptive and interactive VHs in Sigma.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale M.; Marsella, Stacy
Negotiation as a Challenge Problem for Virtual Humans Proceedings Article
In: Brinkman, Willem-Paul; Broekens, Joost; Heylen, Dirk (Ed.): Intelligent Virtual Agents, pp. 201–215, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_negotiation_2015,
title = {Negotiation as a Challenge Problem for Virtual Humans},
author = {Jonathan Gratch and David DeVault and Gale M. Lucas and Stacy Marsella},
editor = {Willem-Paul Brinkman and Joost Broekens and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Negotiation%20as%20a%20Challenge%20Problem%20for%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7_21},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {201–215},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {We argue for the importance of negotiation as a challenge problem for virtual human research, and introduce a virtual conversational agent that allows people to practice a wide range of negotiation skills. We describe the multi-issue bargaining task, which has become a de facto standard for teaching and research on negotiation in both the social and computer sciences. This task is popular as it allows scientists or instructors to create a variety of distinct situations that arise in real-life negotiations, simply by manipulating a small number of mathematical parameters. We describe the development of a virtual human that will allow students to practice the interpersonal skills they need to recognize and navigate these situations. An evaluation of an early wizard-controlled version of the system demonstrates the promise of this technology for teaching negotiation and supporting scientific research on social intelligence.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Susan; Morency, Louis-Philippe; Pynadath, David; Traum, David
Exploring the Implications of Virtual Human Research for Human-Robot Teams Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 186–196, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_exploring_2015,
title = {Exploring the Implications of Virtual Human Research for Human-Robot Teams},
author = {Jonathan Gratch and Susan Hill and Louis-Philippe Morency and David Pynadath and David Traum},
url = {http://ict.usc.edu/pubs/Exploring%20the%20Implications%20of%20Virtual%20Human%20Research%20for%20Human-Robot%20Teams.pdf},
doi = {10.1007/978-3-319-21067-4_20},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {186–196},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
abstract = {This article briefly explores potential synergies between the fields of virtual human and human-robot interaction research. We consider challenges in advancing the effectiveness of human-robot teams makes recommendations for enhancing this by facilitating synergies between robotics and virtual human research.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Stratou, Giota; Lucas, Gale M.; Gratch, Jonathan
Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma Proceedings Article
In: Intelligent Virtual Agents, pp. 452–460, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hoegen_comparing_2015,
title = {Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma},
author = {Rens Hoegen and Giota Stratou and Gale M. Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Comparing%20Behavior%20Towards%20Humans%20and%20Virtual%20Humans%20in%20a%20Social%20Dilemma.pdf},
doi = {10.1007/978-3-319-21996-7 48},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {452–460},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {The difference of shown social behavior towards virtual humans and real humans has been subject to much research. Many of these studies compare virtual humans (VH) that are presented as either virtual agents controlled by a computer or as avatars controlled by real humans. In this study we directly compare VHs with real humans. Participants played an economic game against a computer-controlled VH or a visible human opponent. Decisions made throughout the game were logged, additionally participants’ faces were filmed during the study and analyzed with expression recognition software. The analysis of choices showed participants are far more willing to violate social norms with VHs: they are more willing to steal and less willing to forgive. Facial expressions show trends that suggest they are treating VHs less socially. The results highlight, that even in impoverished social interactions, VHs have a long way to go before they can evoke truly human-like responses.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli
To tweet or not to tweet: The question of emotion and excitement about sporting events Proceedings Article
In: Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion, Geneva, Switzerland, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gratch_tweet_2015,
title = {To tweet or not to tweet: The question of emotion and excitement about sporting events},
author = {Jon Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler},
url = {http://ict.usc.edu/pubs/To%20tweet%20or%20not%20to%20tweet%20-The%20question%20of%20emotion%20and%20excitement%20about%20sporting%20events.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion},
address = {Geneva, Switzerland},
abstract = {Sporting events can serve as laboratories to explore emotion and computational tools provide new ways to examine emotional processes “in the wild”. Moreover, emotional processes are assumed -but untested- in sports economics. For example, according to the well-studied uncertainty of outcome hypothesis (UOH), “close” games are more exciting and therefore better attended. If one team were certain to win, it would take away a major source of excitement, reducing positive affect, and therefore decreasing attendance. The role of emotion here is assumed but has not been tested; furthermore, the measures used (ticket sales, attendance, TV-viewership) do not allow for such a test because they are devoid of emotional content. To address this problem, we use tweets per minute (specifically, tweets posted during 2014 World Cup with official game hashtags). Sentiment analysis of these tweets can give interesting insights into what emotional processes are involved. Another benefit of tweets is that they are dynamic, and novel results from dynamic analyses (of TV-viewership) suggest that the UOH effect can actually reverse as games unfold (people switch channels away from close games). We therefore also reconsider the UOH, specifically, extending it by both examining sentiment and dynamic changes during the game. To consider such changes, we focus on games that could have been close (high in uncertainty), but ended up being lower in uncertainty. We operationalize such unexpected certainty of outcome as the extent to which games are predicted to be “close” (based on betting odds), but ended up with a bigger difference between the teams’ scores than was expected. Statistical analyses revealed that, contrary to the UOH, games with a bigger difference in score between teams than expected had higher tweets per minute. We also performed sentiment analysis, categorizing each tweet as positive, negative or neutral, and found that games with higher tweets per minute also have a higher percentage of negative tweets. Furthermore, games that have a bigger difference than expected have a higher percentage of negative tweets (compared to games closer to what is expected). This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion (and not positive emotion). The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. Further research could explore the specific mechanisms that link negative sentiment to excitement, such as worry or out-group derogation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummins, Nicholas; Scherer, Stefan; Krajewski, Jarek; Schnieder, Sebastian; Epps, Julien; Quatieri, Thomas F.
A Review of Depression and Suicide Risk Assessment Using Speech Analysis Journal Article
In: Speech Communication, vol. 71, pp. 10 – 49, 2015, ISSN: 0167-6393.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{cummins_review_2015,
title = {A Review of Depression and Suicide Risk Assessment Using Speech Analysis},
author = {Nicholas Cummins and Stefan Scherer and Jarek Krajewski and Sebastian Schnieder and Julien Epps and Thomas F. Quatieri},
url = {http://www.sciencedirect.com/science/article/pii/S0167639315000369},
doi = {http://dx.doi.org/10.1016/j.specom.2015.03.004},
issn = {0167-6393},
year = {2015},
date = {2015-07-01},
journal = {Speech Communication},
volume = {71},
pages = {10 – 49},
abstract = {This paper is the first review into the automatic analysis of speech for use as an objective predictor of depression and suicidality. Both conditions are major public health concerns; depression has long been recognised as a prominent cause of disability and burden worldwide, whilst suicide is a misunderstood and complex course of death that strongly impacts the quality of life and mental health of the families and communities left behind. Despite this prevalence the diagnosis of depression and assessment of suicide risk, due to their complex clinical characterisations, are difficult tasks, nominally achieved by the categorical assessment of a set of specific symptoms. However many of the key symptoms of either condition, such as altered mood and motivation, are not physical in nature; therefore assigning a categorical score to them introduces a range of subjective biases to the diagnostic procedure. Due to these difficulties, research into finding a set of biological, physiological and behavioural markers to aid clinical assessment is gaining in popularity. This review starts by building the case for speech to be considered a key objective marker for both conditions; reviewing current diagnostic and assessment methods for depression and suicidality including key non-speech biological, physiological and behavioural markers and highlighting the expected cognitive and physiological changes associated with both conditions which affect speech production. We then review the key characteristics; size, associated clinical scores and collection paradigm, of active depressed and suicidal speech databases. The main focus of this paper is on how common paralinguistic speech characteristics are affected by depression and suicidality and the application of this information in classification and prediction systems. The paper concludes with an in-depth discussion on the key challenges – improving the generalisability through greater research collaboration and increased standardisation of data collection, and the mitigating unwanted sources of variability – that will shape the future research directions of this rapidly growing field of speech processing research.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Kommers, Cody; Ustun, Volkan; Demski, Abram; Rosenbloom, Paul
Hierarchical Reasoning with Distributed Vector Representations Proceedings Article
In: Proceedings of 37th Annual Conference of the Cognitive Science Society, Cognitive Science Society, Pasadena, CA, 2015.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@inproceedings{kommers_hierarchical_2015,
title = {Hierarchical Reasoning with Distributed Vector Representations},
author = {Cody Kommers and Volkan Ustun and Abram Demski and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Hierarchical%20Reasoning%20with%20Distributed%20Vector%20Representations.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of 37th Annual Conference of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Pasadena, CA},
abstract = {We demonstrate that distributed vector representations are capable of hierarchical reasoning by summing sets of vectors representing hyponyms (subordinate concepts) to yield a vector that resembles the associated hypernym (superordinate concept). These distributed vector representations constitute a potentially neurally plausible model while demonstrating a high level of performance in many different cognitive tasks. Experiments were run using DVRS, a word embedding system designed for the Sigma cognitive architecture, and Word2Vec, a state-of-the-art word embedding system. These results contribute to a growing body of work demonstrating the various tasks on which distributed vector representations perform competently.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Expression Graphs Unifying Factor Graphs and Sum-Product Networks Proceedings Article
In: Artificial General Intelligence, pp. 241–250, Springer, Berlin, Germany, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{demski_expression_2015,
title = {Expression Graphs Unifying Factor Graphs and Sum-Product Networks},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Expression%20Graphs%20Unifying%20Factor%20Graphs%20and%20Sum-Product%20Networks.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Artificial General Intelligence},
pages = {241–250},
publisher = {Springer},
address = {Berlin, Germany},
abstract = {Factor graphs are a very general knowledge representation, subsuming many existing formalisms in AI. Sum-product networks are a more recent representation, inspired by studying cases where factor graphs are tractable. Factor graphs emphasize expressive power, while sum-product networks restrict expressiveness to get strong guarantees on speed of inference. A sum-product network is not simply a restricted factor graph, however. Although the inference algorithms for the two structures are very similar, translating a sum-product network into factor graph representation can result in an exponential slowdown. We propose a formalism which generalizes factor graphs and sum-product networks, such that inference is fast in cases whose structure is close to a sum-product network.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{nagano_skin_2015-1,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Proceedings Article
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Gratch, Jonathan; Ustun, Volkan
Towards Emotion in Sigma: From Appraisal to Attention Proceedings Article
In: Proceedings of AGI 2015, pp. 142 – 151, Springer International Publishing, Berlin, Germany, 2015.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@inproceedings{rosenbloom_towards_2015,
title = {Towards Emotion in Sigma: From Appraisal to Attention},
author = {Paul S. Rosenbloom and Jonathan Gratch and Volkan Ustun},
url = {http://ict.usc.edu/pubs/Towards%20Emotion%20in%20Sigma%20-%20From%20Appraisal%20to%20Attention.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of AGI 2015},
volume = {9205},
pages = {142 – 151},
publisher = {Springer International Publishing},
address = {Berlin, Germany},
abstract = {A first step is taken towards incorporating emotional processing into Sigma, a cognitive architecture that is grounded in graphical models, with the addition of appraisal variables for expectedness and desirability plus their initial implications for attention at two levels of the control hierarchy. The results leverage many of Sigma's existing capabilities but with a few key additions.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Lucas, Gale; Gratch, Jonathan; Rizzo, Albert; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264), no. 99, 2015, ISSN: 1949-3045.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{scherer_self-reported_2015,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale Lucas and Jonathan Gratch and Albert Rizzo and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Self-reported%20symptoms%20of%20depression%20and%20PTSD%20are%20associated%20with%20reduced%20vowel%20space%20in%20screening%20interviews.pdf},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2015},
date = {2015-06-01},
journal = {IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264)},
number = {99},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g. sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Core, Mark G.; Goldberg, Benjamin S.
Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 3, pp. 303 – 318, U.S. Army Research Laboratory, 2015.
Abstract | Links | BibTeX | Tags: ARL, DoD, Learning Sciences, UARC
@incollection{lane_lowering_2015,
title = {Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools},
author = {H. Chad Lane and Mark G. Core and Benjamin S. Goldberg},
url = {http://ict.usc.edu/pubs/Lowering%20the%20Technical%20Skill%20Requirements%20for%20Building%20Intelligent%20Tutors-A%20Review%20of%20Authoring%20Tools.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {3},
pages = {303 – 318},
publisher = {U.S. Army Research Laboratory},
abstract = {In this chapter, we focus on intelligent tutoring systems (ITSs), an instance of educational technology that is often criticized for not reaching its full potential (Nye, 2013). Researchers have debated why, given such strong empirical evidence in their favor (Anderson, Corbett, Koedinger & Pelletier, 1995; D’Mello & Graesser, 2012; VanLehn et al., 2005; Woolf, 2009), intelligent tutors are not in every classroom, on every device, providing educators with fine-grained assessment information about their students. Although many factors contribute to a lack of adoption (Nye, 2014), one widely agreed upon reason behind slow adoption and poor scalability of ITSs is that the engineering demands are simply too great. This is no surprise given that the effectiveness of ITSs is often attributable to the use of rich knowledge representations and cognitively plausible models of domain knowledge (Mark & Greer, 1995; Valerie J. Shute & Psotka, 1996; VanLehn, 2006; Woolf, 2009), which are inherently burdensome to build. To put it another way: the features that tend to make ITSs effective are also the hardest to build. The heavy reliance on cognitive scientists and artificial intelligence (AI) software engineers seems to be a bottleneck.},
keywords = {ARL, DoD, Learning Sciences, UARC},
pubstate = {published},
tppubtype = {incollection}
}
Lane, H. Chad; Core, Mark G.; Hays, Matthew J.; Auerbach, Daniel; Rosenberg, Milton
Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective Proceedings Article
In: Artificial Intelligence in Education, pp. 195–204, Springer International Publishing, Madrid, Spain, 2015, ISBN: 978-3-319-19772-2 978-3-319-19773-9.
Abstract | Links | BibTeX | Tags: Learning Sciences, UARC
@inproceedings{chad_lane_situated_2015,
title = {Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective},
author = {H. Chad Lane and Mark G. Core and Matthew J. Hays and Daniel Auerbach and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Situated%20Pedagogical%20Authoring-Authoring%20Intelligent.pdf},
isbn = {978-3-319-19772-2 978-3-319-19773-9},
year = {2015},
date = {2015-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {9112},
pages = {195–204},
publisher = {Springer International Publishing},
address = {Madrid, Spain},
abstract = {We describe the Situated Pedagogical Authoring (SitPed) system that seeks to allow non-technical authors to create ITS content for soft-skills training, such as counseling skills. SitPed is built on the assertion that authoring tools should use the learner’s perspective to the greatest extent possible. SitPed provides tools for creating tasks lists, authoring assessment knowledge, and creating tutor messages. We present preliminary findings of a two-phase study comparing authoring in SitPed to an ablated version of the same system and a spreadsheet-based control. Findings suggest modest advantages for SitPed in terms of the quality of the authored content and student learning.},
keywords = {Learning Sciences, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Wienberg, Christopher; Gordon, Andrew S.
Insights on Privacy and Ethics from the Web’s Most Prolific Storytellers Proceedings Article
In: Proceedings of WebSci15, pp. 1 –10, ACM, Oxford, UK, 2015.
Abstract | Links | BibTeX | Tags: The Narrative Group, UARC
@inproceedings{wienberg_insights_2015,
title = {Insights on Privacy and Ethics from the Web’s Most Prolific Storytellers},
author = {Christopher Wienberg and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Insights%20on%20Privacy%20and%20Ethics%20from%20the%20Web's%20Most%20Prolific%20Storytellers.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of WebSci15},
pages = {1 –10},
publisher = {ACM},
address = {Oxford, UK},
abstract = {An analysis of narratives in English-language weblogs reveals a unique population of individuals who post personal stories with extraordinarily high frequency over extremely long periods of time. This population includes people who have posted personal narratives everyday for more than eight years. In this paper we describe our investigation of this interesting subset of web users, where we conducted ethnographic, face-to-face interviews with a sample of these bloggers (n = 11). Our ndings shed light on a culture of public documentation of private life, and provide insight into these bloggers' motivations, interactions with their readers, honesty, and thoughts on research that utilizes their data. We discuss the ethical implications for researchers working with web data, and speak to the relationship between large datasets and the real people behind them.},
keywords = {The Narrative Group, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Garten, Justin; Sagae, Kenji; Ustun, Volkan; Dehghani, Morteza
Combining Distributed Vector Representations for Words Proceedings Article
In: Proceedings of NAACL-HLT 2015, pp. 95–101, Association for Computational Linguistics, Denver, Colorado, 2015.
Abstract | Links | BibTeX | Tags: The Narrative Group, UARC
@inproceedings{garten_combining_2015,
title = {Combining Distributed Vector Representations for Words},
author = {Justin Garten and Kenji Sagae and Volkan Ustun and Morteza Dehghani},
url = {http://ict.usc.edu/pubs/Combining%20Distributed%20Vector%20Representations%20for%20Words.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of NAACL-HLT 2015},
pages = {95–101},
publisher = {Association for Computational Linguistics},
address = {Denver, Colorado},
abstract = {Recent interest in distributed vector representations for words has resulted in an increased diversity of approaches, each with strengths and weaknesses. We demonstrate how diverse vector representations may be inexpensively composed into hybrid representations, effectively leveraging strengths of individual components, as evidenced by substantial improvements on a standard word analogy task. We further compare these results over different sizes of training sets and find these advantages are more pronounced when training data is limited. Finally, we explore the relative impacts of the differences in the learning methods themselves and the size of the contexts they access.},
keywords = {The Narrative Group, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Leuski, Anton
CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval Proceedings Article
In: Proceedings of ACM International Conference on Multimedia Retrieval (ICMR), pp. 535–538, ACM, Shanghai, China, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{chatterjee_crmactive_2015,
title = {CRMActive: An Active Learning Based Approach for Effective Video Annotation and Retrieval},
author = {Moitreya Chatterjee and Anton Leuski},
url = {http://ict.usc.edu/pubs/CRMActive%20-%20An%20Active%20Learning%20Based%20Approach%20for%20Effective%20Video%20Annotation%20and%20Retrieval.pdf},
doi = {10.1145/2671188.2749342},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of ACM International Conference on Multimedia Retrieval (ICMR)},
pages = {535–538},
publisher = {ACM},
address = {Shanghai, China},
abstract = {Conventional multimedia annotation/retrieval systems such as Normalized Continuous Relevance Model (NormCRM) [7]require a fully labeled training data for a good performance. Active Learning, by determining an order for labeling the training data, allows for a good performance even before the training data is fully annotated. In this work we propose an active learning algorithm, which combines a novel measure of sample uncertainty with a novel clustering-based approach for determining sample density and diversity and integrate it with NormCRM. The clusters are also iteratively re⬚ned to ensure both feature and label-level agreement among samples. We show that our approach outperforms multiple baselines both on a new, open dataset and on the popular TRECVID corpus at both the tasks of annotation and text-based retrieval of videos.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Leuski, Anton; Maio, Heather; Mor-Barak, Tomer; Gordon, Carla; Traum, David
How Many Utterances Are Needed to Support Time-Offset Interaction? Proceedings Article
In: Proceedings of FLAIRS 28, pp. 144–149, AAAI Press, Hollywood, FL, 2015, ISBN: 978-1-57735-730-8.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{artstein_how_2015,
title = {How Many Utterances Are Needed to Support Time-Offset Interaction?},
author = {Ron Artstein and Anton Leuski and Heather Maio and Tomer Mor-Barak and Carla Gordon and David Traum},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS15/paper/view/10442},
isbn = {978-1-57735-730-8},
year = {2015},
date = {2015-05-01},
booktitle = {Proceedings of FLAIRS 28},
pages = {144–149},
publisher = {AAAI Press},
address = {Hollywood, FL},
abstract = {A set of several hundred recorded statements by a single speaker is sufficient to address unrestricted questions and sustain short conversations on a circumscribed topic. Statements were recorded by Pinchas Gutter, a Holocaust survivor, talking about his personal experiences before, during and after the Holocaust. These statements were delivered to participants in conversation, using a “Wizard of Oz” system, where live operators select an appropriate reaction to each user utterance in real time. Even though participants were completely unconstrained in the questions they could ask, the recorded statements were able to directly address at least 58% of user questions. The unanswered questions were then analyzed to identify gaps, and additional statements were recorded to fill the gaps. The statements will be put in an automated system using existing language understanding technology, to create the first full working system of time-offset interaction, allowing a live conversation with a real human who is not present for the conversation in real time.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan
An Effective Conversation Tactic for Creating Value over Repeated Negotiations Proceedings Article
In: Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems, pp. 1567–1576, International Foundation for Autonomous Agents and Multiagent Systems, Istanbul, Turkey, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{mell_effective_2015,
title = {An Effective Conversation Tactic for Creating Value over Repeated Negotiations},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/An%20Effective%20Conversation%20Tactic%20for%20Creating%20Value%20over%20Repeated%20Negotiations.pdf},
year = {2015},
date = {2015-05-01},
booktitle = {Proceedings of the 2015 International Conference on Autonomous Agents and Multiagent Systems},
pages = {1567–1576},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Istanbul, Turkey},
abstract = {Automated negotiation research focuses on getting the most value from a single negotiation, yet real-world settings often involve repeated serial negotiations between the same parties. Repeated negotiations are interesting because they allow the discovery of mutually beneficial solutions that don’t exist within the confines of a single negotiation. This paper introduces the notion of Pareto efficiency over time to formalize this notion of value-creation through repeated interactions. We review literature from human negotiation research and identify a dialog strategy, favors and ledgers, that facilitates this process. As part of a longer-term effort to build intelligent virtual humans that can train human negotiators, we create a conversational agent that instantiates this strategy, and assess its effectiveness with human users, using the established Colored Trails negotiation testbed. In an empirical study involving a series of repeated negotiations, we show that humans are more likely to discover Pareto optimal solutions overtime when matched with our favor-seeking agent. Further, an agent that asks for favors during early negotiations, regardless of whether these favors are ever repaid, leads participants to discover more joint value in later negotiations, even under the traditional definition of Pareto optimality within a single negotiation. Further, agents that match their words with deeds (repay their favors) create the most value for themselves. We discuss the implications of these findings for agents that engage in long-term interactions with human users.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Huang, Zeng; Natsume, Ryota; Morishima, Shigeo; Kanazawa, Angjoo; Li, Hao
PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization Journal Article
In: arXiv:1905.05172 [cs], 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{saito_pifu_2015,
title = {PIFu: Pixel-Aligned Implicit Function for High-Resolution Clothed Human Digitization},
author = {Shunsuke Saito and Zeng Huang and Ryota Natsume and Shigeo Morishima and Angjoo Kanazawa and Hao Li},
url = {http://arxiv.org/abs/1905.05172},
year = {2015},
date = {2015-05-01},
journal = {arXiv:1905.05172 [cs]},
abstract = {We introduce Pixel-aligned Implicit Function (PIFu), a highly effective implicit representation that locally aligns pixels of 2D images with the global context of their corresponding 3D object. Using PIFu, we propose an end-to-end deep learning method for digitizing highly detailed clothed humans that can infer both 3D surface and texture from a single image, and optionally, multiple input images. Highly intricate shapes, such as hairstyles, clothing, as well as their variations and deformations can be digitized in a unified way. Compared to existing representations used for 3D deep learning, PIFu can produce high-resolution surfaces including largely unseen regions such as the back of a person. In particular, it is memory efficient unlike the voxel representation, can handle arbitrary topology, and the resulting surface is spatially aligned with the input image. Furthermore, while previous techniques are designed to process either a single image or multiple views, PIFu extends naturally to arbitrary number of views. We demonstrate high-resolution and robust reconstructions on real world images from the DeepFashion dataset, which contains a variety of challenging clothing types. Our method achieves state-of-the-art performance on a public benchmark and outperforms the prior work for clothed human digitization from a single image.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Shim, Han Suk; Park, Sunghyun; Chatterjee, Moitreya; Scherer, Stefan; Sagae, Kenji; Morency, Louis-Philippe
ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA Proceedings Article
In: Proceeding of ICASSP 2015, pp. 2239 – 2243, IEEE, Brisbane, Australia, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{shim_acoustic_2015,
title = {ACOUSTIC AND PARA-VERBAL INDICATORS OF PERSUASIVENESS IN SOCIAL MULTIMEDIA},
author = {Han Suk Shim and Sunghyun Park and Moitreya Chatterjee and Stefan Scherer and Kenji Sagae and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/ACOUSTIC%20AND%20PARA-VERBAL%20INDICATORS%20OF%20PERSUASIVENESS%20IN%20SOCIAL%20MULTIMEDIA.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceeding of ICASSP 2015},
pages = {2239 – 2243},
publisher = {IEEE},
address = {Brisbane, Australia},
abstract = {Persuasive communication and interaction play an important and pervasive role in many aspects of our lives. With the rapid growth of social multimedia websites such as YouTube, it has become more important and useful to understand persuasiveness in the context of online social multimedia content. In this paper, we present our resultsof conducting various analyses of persuasiveness in speech with our multimedia corpus of 1,000 movie review videos obtained from ExpoTV.com, a popular social multimedia website. Our experiments firstly show that a speaker’s level of persuasiveness can be predicted from acoustic characteristics and para-verbal cues related to speech fluency. Secondly, we show that taking acoustic cues in different time periods of a movie review can improve the performance of predicting a speaker’s level of persuasiveness. Lastly, we show that a speaker’s positive or negative attitude toward a topic influences the prediction performance as well.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Debevec, Paul
Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination Proceedings Article
In: Preceedings of ICCP 2015, pp. 1–10, IEEE, Houston, Texas, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{fyffe_single-shot_2015,
title = {Single-Shot Reflectance Measurement from Polarized Color Gradient Illumination},
author = {Graham Fyffe and Paul Debevec},
url = {http://ict.usc.edu/pubs/Single-Shot%20Reflectance%20Measurement%20from%20Polarized%20Color%20Gradient%20Illumination.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Preceedings of ICCP 2015},
pages = {1–10},
publisher = {IEEE},
address = {Houston, Texas},
abstract = {We present a method for acquiring the per-pixel diffuse albedo, specular albedo, and surface normal maps of a subject at a single instant in time. The method is single shot, requiring no optical flow, and per-pixel, making no assumptions regarding albedo statistics or surface connectivity. We photograph the subject inside a spherical illumination device emitting a static lighting pattern of vertically polarized RGB color gradients aligned with the XYZ axes, and horizontally polarized RGB color gradients in versely aligned with the XYZ axes. We capture simultaneous photographs using one of two possible setups: a single view setup using a coaxially aligned camera pair with a polarizing beam splitter, and a multi-view stereo setup with different orientations of linear polarizing filters placed on the cameras, enabling high-quality geometry reconstruction. From this lighting we derive full-color diffuse albedo, single-channel specular albedo suitable for dielectric materials, and polarization-preserving surface normals which are free of corruption from subsurface scattering. We provide simple formulae to estimate the diffuse albedo, specular albedo, and surface normal maps in the single-view and multi-view cases and show error bounds which are small for many common subjects including faces.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Morency, Louis-Philippe; Gratch, Jonathan; Pestian, John
REDUCED VOWEL SPACE IS A ROBUST INDICATOR OF PSYCHOLOGICAL DISTRESS: A CROSS-CORPUS ANALYSIS Proceedings Article
In: Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP), pp. 4789–4793, IEEE, Brisbane, Australia, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{scherer_reduced_2015,
title = {REDUCED VOWEL SPACE IS A ROBUST INDICATOR OF PSYCHOLOGICAL DISTRESS: A CROSS-CORPUS ANALYSIS},
author = {Stefan Scherer and Louis-Philippe Morency and Jonathan Gratch and John Pestian},
url = {http://ict.usc.edu/pubs/REDUCED%20VOWEL%20SPACE%20IS%20A%20ROBUST%20INDICATOR%20OF%20PSYCHOLOGICAL%20DISTRESS-A%20CROSS-CORPUS%20ANALYSIS.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
pages = {4789–4793},
publisher = {IEEE},
address = {Brisbane, Australia},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals’ with psychological and neurological disorders. Depression is known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and associated perceived hypoarticulation and reduced expressivity often rely on subjective assessments. Within this work, we investigate an automatic unsupervised machine learning approach to assess a speaker’s vowel space within three distinct speech corpora and compare observed vowel space measures of subjects with and without psychological conditions associated with psychological distress, namely depression, post-traumatic stress disorder (PTSD), and suicidality. Our experiments are based on recordings of over 300 individuals. The experiments show a significantly reduced vowel space in conversational speech for depression, PTSD, and suicidality. We further observe a similar trend of reduced vowel space for read speech. A possible explanation for a reduced vowel space is psychomotor retardation, a common symptom of depression that influences motor control and speech production.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andreatta, Pamela; Klotz, Jessica J.; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas B.
Outcomes From Two Forms of Training for First-Responder Competency in Cholinergic Crisis Management Journal Article
In: Military Medicine, vol. 180, no. 4, pp. 468–474, 2015, ISSN: 0026-4075, 1930-613X.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@article{andreatta_outcomes_2015,
title = {Outcomes From Two Forms of Training for First-Responder Competency in Cholinergic Crisis Management},
author = {Pamela Andreatta and Jessica J. Klotz and James M. Madsen and Charles G. Hurst and Thomas B. Talbot},
url = {http://ict.usc.edu/pubs/Outcomes%20From%20Two%20Forms%20of%20Training%20for%20First-Responder%20Competency%20in%20Cholinergic%20Crisis%20Management.pdf},
doi = {10.7205/MILMED-D-14-00290},
issn = {0026-4075, 1930-613X},
year = {2015},
date = {2015-04-01},
journal = {Military Medicine},
volume = {180},
number = {4},
pages = {468–474},
abstract = {Military and civilian first responders must be able to recognize and effectively manage mass disaster casualties. Clinical management of injuries resulting from nerve agents provides different challenges for first responders than those of conventional weapons. We evaluated the impact of a mixed-methods training program on competency acquisition in cholinergic crisis clinical management using multimedia with either live animal or patient actor examples, and hands-on practice using SimMan3G mannequin simulators. A purposively selected sample of 204 civilian and military first responders who had not previously completed nerve agent training were assessed pre- and post-training for knowledge, performance, self-efficacy, and affective state. We conducted analysis of variance with repeated measures; statistical significance p textbackslashtextbackslashtextless 0.05. Both groups had significant performance improvement across all assessment dimensions: knowledge textbackslashtextbackslashtextgreater 20%, performance textbackslashtextbackslashtextgreater 50%, self-efficacy textbackslashtextbackslashtextgreater 34%, and affective state textbackslashtextbackslashtextgreater 15%. There were no significant differences between the live animal and patient actor groups. These findings could aid in the specification of training for first-responder personnel in military and civilian service. Although less comprehensive than U.S. Army Medical Research Institute of Chemical Defense courses, the training outcomes associated with this easily distributed program demonstrate its value in increasing the competency of first responders in recognizing and managing a mass casualty cholinergic event.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Pynadath, David V.; Marsella, Stacy C.
Subjective Perceptions in Wartime Negotiation Journal Article
In: IEEE Transactions on Affective Computing, vol. 6, no. 2, pp. 118–126, 2015, ISSN: 1949-3045.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC
@article{wang_subjective_2015,
title = {Subjective Perceptions in Wartime Negotiation},
author = {Ning Wang and David V. Pynadath and Stacy C. Marsella},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6975149},
doi = {10.1109/TAFFC.2014.2378312},
issn = {1949-3045},
year = {2015},
date = {2015-04-01},
journal = {IEEE Transactions on Affective Computing},
volume = {6},
number = {2},
pages = {118–126},
abstract = {The prevalence of negotiation in social interaction has motivated researchers to develop virtual agents that can understand, facilitate, teach and even carry out negotiations. While much of this research has analyzed how to maximize the objective outcome, there is a growing body of work demonstrating that subjective perceptions of the outcome also play a critical role in human negotiation behavior. People derive subjective value from not only the outcome, but also from the process by which they achieve that outcome, from their relationship with their negotiation partner, etc. The affective responses evoked by these subjective valuations can be very different from what would be evoked by the objective outcome alone. We investigate such subjective valuations within human-agent negotiation in four variations of a wartime negotiation game. We observe that the objective outcomes of these negotiations are not strongly correlated with the human negotiators’ subjective perceptions, as measured by the Subjective Value Index. We examine the game dynamics and agent behaviors to identify features that induce different subjective values in the participants. We thus are able to identify characteristics of the negotiation process and the agents’ behavior that most impact people’s subjective valuations in our wartime negotiation games.⬚},
keywords = {Social Simulation, UARC},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul
Supraarchitectural Capability Integration: From Soar to Sigma Proceedings Article
In: Proceedings of the 13th International Conference on Cognitive Modeling, 2015, Groningen, The Netherlands, 2015.
Abstract | Links | BibTeX | Tags: CogArch, UARC, Virtual Humans
@inproceedings{rosenbloom_supraarchitectural_2015,
title = {Supraarchitectural Capability Integration: From Soar to Sigma},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Supraarchitectural%20Capability%20Integration%20-%20From%20Soar%20to%20Sigma.pdf},
year = {2015},
date = {2015-04-01},
booktitle = {Proceedings of the 13th International Conference on Cognitive Modeling, 2015},
address = {Groningen, The Netherlands},
abstract = {Integration across capabilities, both architectural and supraarchitectural, is critical for cognitive architectures. Here we revisit a classic failure of supraarchitectural capability integration in Soar, involving data chunking, to understand better both its source and how it and related integration issues can be overcome via three general extensions in Sigma.},
keywords = {CogArch, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}