Publications
Search
DeVault, David; Mell, Jonathan; Gratch, Jonathan
Toward Natural Turn-Taking in a Virtual Human Negotiation Agent Proceedings Article
In: AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction, pp. 2–9, AAAI Press, Palo Alto, California, 2015.
@inproceedings{devault_toward_2015,
title = {Toward Natural Turn-Taking in a Virtual Human Negotiation Agent},
author = {David DeVault and Jonathan Mell and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Toward%20Natural%20Turn-Taking%20in%20a%20Virtual%20Human%20Negotiation%20Agent.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction},
pages = {2–9},
publisher = {AAAI Press},
address = {Palo Alto, California},
abstract = {In this paper we assess our progress toward creating a virtual human negotiation agent with fluid turn-taking skills. To facilitate the design of this agent, we have collected a corpus of human-human negotiation roleplays as well as a corpus of Wizard-controlled human-agent negotiations in the same roleplay scenario.We compare the natural turn-taking behavior in our human-human corpus with that achieved in our Wizard-of-Oz corpus, and quantify our virtual human’s turn-taking skills using a combination of subjective and objective metrics. We also discuss our design for a Wizard user interface to support real-time control of the virtual human’s turntaking and dialogue behavior, and analyze our wizard’s usage of this interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Choi, Ahyoung; Melo, Celso M.; Khooshabeh, Peter; Woo, Woontack; Gratch, Jonathan
Physiological evidence for a dual process model of the social effects of emotion in computers Journal Article
In: International Journal of Human-Computer Studies, vol. 74, pp. 41–53, 2015, ISSN: 10715819.
@article{choi_physiological_2015,
title = {Physiological evidence for a dual process model of the social effects of emotion in computers},
author = {Ahyoung Choi and Celso M. Melo and Peter Khooshabeh and Woontack Woo and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581914001414},
doi = {10.1016/j.ijhcs.2014.10.006},
issn = {10715819},
year = {2015},
date = {2015-02-01},
journal = {International Journal of Human-Computer Studies},
volume = {74},
pages = {41–53},
abstract = {There has been recent interest on the impact of emotional expressions of computers on people's decision making. However, despite a growing body of empirical work, the mechanism underlying such effects is still not clearly understood. To address this issue the paper explores two kinds of processes studied by emotion theorists in human-human interaction: inferential processes, whereby people retrieve information from emotion expressions about other's beliefs, desires, and intentions; affective processes, whereby emotion expressions evoke emotions in others, which then influence their decisions. To tease apart these two processes as they occur in human-computer interaction, we looked at physiological measures (electrodermal activity and heart rate deceleration). We present two experiments where participants engaged in social dilemmas with embodied agents that expressed emotion. Our results show, first, that people's decisions were influenced by affective and cognitive processes and, according to the prevailing process, people behaved differently and formed contrasting subjective ratings of the agents; second we show that an individual trait known as electrodermal lability, which measures people's physiological sensitivity, predicted the extent to which affective or inferential processes dominated the interaction. We discuss implications for the design of embodied agents and decision making systems that use emotion expression to enhance interaction between humans and computers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Manuvinakurike, Ramesh; DeVault, David
Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection Proceedings Article
In: Proceedings of IWSDS 2015, pp. 1 –12, Busan, South Korea, 2015.
@inproceedings{manuvinakurike_pair_2015,
title = {Pair Me Up: A Web Framework for Crowd-Sourced Spoken Dialogue Collection},
author = {Ramesh Manuvinakurike and David DeVault},
url = {http://ict.usc.edu/pubs/Pair%20Me%20Up-%20A%20Web%20Framework%20for%20Crowd-Sourced%20Spoken%20Dialogue%20Collection.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of IWSDS 2015},
pages = {1 –12},
address = {Busan, South Korea},
abstract = {We describe and analyze a new web-based spoken dialogue data collection framework. The framework enables the capture of conversational speech from two remote users who converse with each other and play a dialogue game entirely through their web browsers.We report on the substantial improvements in the speed and cost of data capture we have observed with this crowd-sourced paradigm. We also analyze a range of data quality factors by comparing a crowd-sourced data set involving 196 remote users to a smaller but more quality controlled lab-based data set. We focus our comparison on aspects that are especially important in our spoken dialogue research, including audio quality, the effect of communication latency on the interaction, our ability to synchronize the collected data, our ability to collect examples of excellent game play, and the naturalness of the resulting interactions. This analysis illustrates some of the current trade-offs between lab-based and crowd-sourced spoken dialogue data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Scherer, Stefan; Gratch, Jonathan; Carnevale, Peter; Morency, Louis-Philippe
I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation Journal Article
In: IEEE Transactions on Affective Computing, vol. 6, no. 2, pp. 86 –96, 2015, ISSN: 1949-3045.
@article{park_i_2015,
title = {I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation},
author = {Sunghyun Park and Stefan Scherer and Jonathan Gratch and Peter Carnevale and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7024926},
doi = {10.1109/TAFFC.2015.2396079},
issn = {1949-3045},
year = {2015},
date = {2015-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {6},
number = {2},
pages = {86 –96},
abstract = {Negotiation is a component deeply ingrained in our daily lives, and it can be challenging for a person to predict the respondent’s reaction (acceptance or rejection) to a negotiation offer. In this work, we focus on finding acoustic and visual behavioral cues that are predictive of the respondent’s immediate reactions using a face-to-face negotiation dataset, which consists of 42 dyadic interactions in a simulated negotiation setting. We show our results of exploring 4 different sources of information, namely nonverbal behavior of the proposer, that of the respondent, mutual behavior between the interactants related to behavioral symmetry and asymmetry, and past negotiation history between the interactants. Firstly, we show that considering other sources of information (other than the nonverbal behavior of the respondent) can also have comparable performance in predicting respondent reactions. Secondly, we show that automatically extracted mutual behavioral cues of symmetry and asymmetry are predictive partially due to their capturing information of the nature of the interaction itself, whether it is cooperative or competitive. Lastly, we identify audio-visual behavioral cues that are most predictive of the respondent’s immediate reactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Efficient message computation in Sigma’s graphical architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 11, pp. 1–9, 2015, ISSN: 2212683X.
@article{rosenbloom_efficient_2015,
title = {Efficient message computation in Sigma’s graphical architecture},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X14000723},
doi = {10.1016/j.bica.2014.11.009},
issn = {2212683X},
year = {2015},
date = {2015-01-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {11},
pages = {1–9},
abstract = {Human cognition runs at ∼50 ms per cognitive cycle, implying that any biologically inspired cognitive architecture that strives for real-time performance needs to be able to run at this speed. Sigma is a cognitive architecture built upon graphical models – a broadly applicable state-of-the-art formalism for implementing cognitive capabilities – that are solved via message passing (with complex messages based on n-dimensional piecewise-linear functions). Earlier work explored optimizations to Sigma that reduced by an order of magnitude the number of messages sent per cycle. Here, optimizations are introduced that reduce by an order of magnitude the average time required per message sent.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas
COMRADE: Methods for Adaptive Competency Management and Just-in-Time Clinical Acumen Augmentation Journal Article
In: MedSim Magazine, pp. 26 – 28, 2015.
@article{talbot_comrade_2015,
title = {COMRADE: Methods for Adaptive Competency Management and Just-in-Time Clinical Acumen Augmentation},
author = {Thomas Talbot},
url = {http://ict.usc.edu/pubs/COMRADE%20-%20Methods%20for%20Adaptive%20Competency%20Management%20and%20Just-in-Time%20Clinical%20Acumen%20Augmentation.pdf},
year = {2015},
date = {2015-01-01},
journal = {MedSim Magazine},
pages = {26 – 28},
abstract = {Dr. Thomas Talbot shares ideas for enhancing the electronic medical Record to act as a didactic tool to support physician competency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Maslan, Nicole; Roemmele, Melissa; Gordon, Andrew S.
An Integrated Evaluation of Perception, Interpretation, and Narration Proceedings Article
In: Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, Austin, TX, 2015.
@inproceedings{maslan_integrated_2015,
title = {An Integrated Evaluation of Perception, Interpretation, and Narration},
author = {Nicole Maslan and Melissa Roemmele and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/An%20Integrated%20Evaluation%20of%20Perception,%20Interpretation,%20and%20Narration.PDF},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence},
address = {Austin, TX},
abstract = {In this paper, we describe our efforts to create an evaluation tool to aid in the development of artificial intelligence systems that integrate perception, reasoning, and language abilities. Based on an early and influential study by social psychologists Fritz Heider and Marianne Simmel, we created 100 short movies depicting the motions of two triangles and a circle around a box with a hinged opening. For each movie, we provide quantitative information about each object's trajectory, a formal description of the actions that can be perceived in each object's behavior, a formal interpretation of the social situation that is depicted, and a short English narration of the interpreted events.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Corbin, Carina; Morbini, Fabrizio; Traum, David
Creating a Virtual Neighbor Proceedings Article
In: Proceedings of International Workshop on Spoken Dialogue Systems, Busan, South Korea, 2015.
@inproceedings{corbin_creating_2015,
title = {Creating a Virtual Neighbor},
author = {Carina Corbin and Fabrizio Morbini and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20a%20Virtual%20Neighbor.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of International Workshop on Spoken Dialogue Systems},
address = {Busan, South Korea},
abstract = {We present the first version of our Virtual Neighbor, who can talk with users about people employed in the same institution. The Virtual Neighbor can discuss information about employees in a medium sized company or institute with users. The system acquires information from three sources: a personnel directory database, public web pages, and through dialogue interaction. Users can interact through face to face spoken dialogue, using components from the ICT Virtual human toolkit, or via a chat interface.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Proceedings Article
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Andreatta, Pamela; Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
@inproceedings{andreatta_assessment_2014,
title = {Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management},
author = {Pamela Andreatta and Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Assessment%20instrument%20validation%20for%20critical%20clinical%20competencies%20-%20pediatricneonatal%20intubation%20and%20cholinergic%20crisis%20management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage casualties that necessitate immediate application of critical clinical competencies. Two examples of these critical competencies are the clinical management of injuries resulting from nerve agents and difficult intubation, especially for pediatric or neonatal patients. The opportunity to learn and practice the necessary skills for these rare, but urgent, situations is complicated by the limited ability to replicate essential situational factors that influence performance in the applied clinical environment. Simulation-based training may resolve some of these challenges, however it is imperative that evidence be captured to document the achievement of performance competencies in the training environment that transfer to applied clinical care. The purpose of this study was to establish psychometric characteristics for competency assessment instruments associated with two such critical competencies: management of cholinergic crisis and pediatric-neonatal intubation. Methods: To inform the development of assessment instruments, we conducted comprehensive task analyses across each performance domain (knowledge, performance). Expert review confirmed content validity. Construct validity was established using the instruments to differentiate between the performance abilities of practitioners with variable experience (novice through expert). Purposively selected firstresponder subjects for pediatric-neonatal intubation (N=214) and cholinergic crisis management (N=123) were stratified by level of experience performing the requisite clinical competencies. All subjects completed knowledge and performance assessments. Reliability was established using test-retest (Pearson correlation) and internal consistency (Cronbach’s alpha) for knowledge and performance assessments. Results: Significantly higher scores for subjects with greater levels of experience, compared to those with less experience established construct validity for each assessment instrument (p textbackslashtextbackslashtextless .01). Significant correlations between test-retest outcomes indicated measurement reliability p textbackslashtextbackslashtextless .01. Cronbach’s alpha for knowledge and performance scores demonstrated excellent internal consistency. Conclusions: Psychometric evidence establishes the value of assessment for identifying and remedying critical competency performance gaps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy; Gratch, Jonathan
Computationally Modeling Human Emotion Journal Article
In: Communications of the ACM, vol. 57, no. 12, pp. 56–67, 2014.
@article{marsella_computationally_2014,
title = {Computationally Modeling Human Emotion},
author = {Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2631912},
doi = {10.1145/2631912},
year = {2014},
date = {2014-12-01},
journal = {Communications of the ACM},
volume = {57},
number = {12},
pages = {56–67},
abstract = {EMOTION’S ROLE IN human behavior is an old debate that has become increasingly relevant to the computational sciences. Two-and-a-half millennia ago, Aristotle espoused a view of emotion at times remarkably similar to modern psychological theories, arguing that emotions (such as anger), in moderation, play a useful role, especially in interactions with others. Those who express anger at appropriate times are praiseworthy, while those lacking in anger at appropriate times are treated as a fool. The Stoics took a different view; four centuries after Aristotle, Seneca considered emotions (such as anger) as a threat to reason, arguing, “reason … is only powerful so long as it remains isolated from emotions.” In the 8th century, David Hume radically departed from the Stoic perspective, arguing for the key motivating role of emotions, saying, “Reason is, and ought only to be the slave of the passions.” A similar dichotomy of views can be seen in the history of artificial intelligence (AI) and agent research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Training Effects for First-responder Competency in Cholinergic Crisis Management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
@inproceedings{klotz_training_2014,
title = {Training Effects for First-responder Competency in Cholinergic Crisis Management},
author = {Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Training%20Effects%20for%20First-responder%20Competency%20in%20Cholinergic%20Crisis%20Management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage mass disaster casualties. Clinical management of injuries resulting from nerve agents provides different challenges for first responders than those of conventional weapons. We evaluated the impact of a mixed-methods training program on competency acquisition in cholinergic crisis clinical management. Methods: We developed a multimedia and simulation-based training program based on the more comprehensive USAMRICD courses. The training program was designed to provide first-responders with the necessary abilities to recognize and manage a mass casualty cholinergic crisis event. Training included a learner controlled multimedia iPad app and hands-on instruction using SimMan3G™ mannequin simulators. We evaluated the impact of the training through a purposively selected sample of 204 civilian and military first responders who had not previously completed either of the referenced USAMRICD courses. We assessed knowledge, performance, affect, and self-efficacy measures pre- and post-training using previously validated assessment instruments. We calculated results using analysis of variance with repeated measures, and with statistical significance set at p textbackslashtextbackslashtextless .05. Results: Analyses demonstrated a significant improvement (p = .000) across all domains (knowledge, performance, self-efficacy, and affect). Knowledge scores increased from 60% to 81% correct. Performance scores increased from 16% to 68% correct. Self-efficacy scores increased from 51% to 87% confidence in ability to effectively manage a cholinergic crisis event. Affect scores increased from 75% to 81% personal comfort during procedures. Conclusions: These findings could aid in the selection of instructional methodologies available to a broad community of first-responder personnel in military and civilian service. Although less comprehensive than the USAMRICD courses, training outcomes associated with this easily distributed instruction set demonstrated its value in increasing the competency of first responders in recognizing and managing a mass casualty cholinergic event. Retention outcomes are in process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon
Virtual Humans for Interpersonal Processes and Skills Training Journal Article
In: AI Matters, vol. 1, no. 2, pp. 24–25, 2014, ISSN: 23723483.
@article{gratch_virtual_2014,
title = {Virtual Humans for Interpersonal Processes and Skills Training},
author = {Jon Gratch},
url = {http://dl.acm.org/citation.cfm?doid=2685328.2685336},
doi = {10.1145/2685328.2685336},
issn = {23723483},
year = {2014},
date = {2014-12-01},
journal = {AI Matters},
volume = {1},
number = {2},
pages = {24–25},
abstract = {Ellie is an interactive virtual human that performs mental health screens via natural language. This kiosk-based system is aimed at clients resistant to seeking traditional care. Research shows that, when such virtual humans interview people about their mental health, they are able to reduce impression management and fear of negative evaluation compared to interviews conducted with a human present. As both impression management and fear of negative evaluation inhibit people from opening up and disclosing personal information, this research also finds that people are more willing to disclose personal information to a virtual human interviewer than human interviewers. These results suggest that automated virtual humans can help overcome significant barriers to obtaining truthful client information during clinical interviews.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Proceedings Article
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Calvo, Rafael A.; D'Mello, Sidney; Gratch, Jonathan; Kappas, Arvid (Ed.)
The Oxford Handbook of Affective Computing Book
Oxford University Press, Oxford ; New York, 2014, ISBN: 978-0-19-994223-7.
@book{calvo_oxford_2014,
title = {The Oxford Handbook of Affective Computing},
editor = {Rafael A. Calvo and Sidney D'Mello and Jonathan Gratch and Arvid Kappas},
url = {https://global.oup.com/academic/product/the-oxford-handbook-of-affective-computing-9780199942237?cc=us&lang=en&},
isbn = {978-0-19-994223-7},
year = {2014},
date = {2014-12-01},
publisher = {Oxford University Press},
address = {Oxford ; New York},
abstract = {The Oxford Handbook of Affective Computing aims to be the definite reference for research in the burgeoning field of affective computing—a field that turns 18 at the time of writing. This introductory chapter is intended to convey the motivations of the editors and content of the chapters in order to orient the readers to the handbook. It begins with a very high overview of the field of affective computing along with a bit of reminiscence about its formation, short history, and major accomplishments. The five main sections of the handbook—history and theory, detection, generation, methodologies, and applications—are then discussed, along with a bird’s eye view of the 41 chapters covered in the book. The introduction is devoted to short descriptions of the chapters featured in the handbook. A brief descript of the Glossary concludes the Introduction.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Beidel, Deborah; Neer, Sandra; Bowers, Clint; Frueh, B; Rizzo, Albert
Using Virtual Reality as Part of an Intensive Treatment Program for PTSD Proceedings Article
In: Proceedings of I/ITSEC 2014, pp. 1 –10, Orlando, Florida, 2014.
@inproceedings{beidel_using_2014,
title = {Using Virtual Reality as Part of an Intensive Treatment Program for PTSD},
author = {Deborah Beidel and Sandra Neer and Clint Bowers and B Frueh and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Using%20Virtual%20Reality%20as%20Part%20of%20an%20Intensive%20Treatment%20Program%20for%20PTSD.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of I/ITSEC 2014},
pages = {1 –10},
address = {Orlando, Florida},
abstract = {Up to 18.5% of veterans returning from OIF/OEF are diagnosed with posttraumatic stress disorder (PTSD). In addition to symptoms of anxiety (intrusive thoughts, re-experiencing, hyperarousal, and avoidance), PTSD can result in social maladjustment, poor quality of life, and medical problems. Other emotional problems include guilt, anger, and unemployment, impulsive or violent behavior, and family discord. Many veterans seeking treatment for PTSD also seek disability compensation for debilitating occupational impairment. There are few administrative or research data to indicate veterans are recovering from PTSD. Exposure therapy, a form of behavior therapy, alleviates anxiety symptoms, but may not address the anger, depression and social impairment that accompanies this disorder. In this presentation, we will discuss an intensive treatment program, known as Trauma Management Therapy (TMT), which combines individual virtual reality (VR) assisted exposure therapy with group social and emotional rehabilitation skills training, delivered in a 3 week format. The presentation will demonstrate the VR environment (Virtual Iraq), will discuss how often/successfully various VR elements are integrated into a comprehensive treatment program, and the adaptability of the program for active duty military personnel, as well as veterans. We will discuss the format of the intensive program as well as factors such as compliance and drop-out rates, comparing these important clinical variables to more traditional outpatient treatment programs. Additionally, we will address common clinical concerns regarding the use of VR exposure therapy for individuals suffering from PTSD.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS Proceedings Article
In: Spoken Language Technology Workshop (SLT), 2014 IEEE, pp. 277–282, IEEE, South Lake Tahoe, NV, 2014, ISBN: 978-1-4799-7129-9.
@inproceedings{venek_adolescent_2014,
title = {ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7078587},
doi = {10.1109/SLT.2014.7078587},
isbn = {978-1-4799-7129-9},
year = {2014},
date = {2014-12-01},
booktitle = {Spoken Language Technology Workshop (SLT), 2014 IEEE},
pages = {277–282},
publisher = {IEEE},
address = {South Lake Tahoe, NV},
abstract = {Suicide among adolescents is a major public health problem: it is the third leading cause of death in the US for ages 13-18. Up to now, there is no objective ways to assess the suicidal risk, i.e. whether a patient is non-suicidal, suicidal re-attempter (i.e. repeater) or suicidal non-repeater (i.e. individuals with one suicide attempt or showing signs of suicidal gestures or ideation). Therefore, features of the conversation including verbal information and nonverbal acoustic information were investigated from 60 audio-recorded interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents interviewed by a social worker. The interaction between clinician and patients was statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters' behaviors in comparison to suicidal non-repeaters. By using a hierarchical ensemble classifier we were able to successfully discriminate non-suicidal patients, suicidal repeaters and suicidal non-repeaters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Trentin, Edmondo; Scherer, Stefan; Schwenker, Friedhelm
Emotion recognition from speech signals via a probabilistic echo-state network Journal Article
In: Pattern Recognition Letters, vol. 66, pp. 4 –12, 2014.
@article{trentin_emotion_2014,
title = {Emotion recognition from speech signals via a probabilistic echo-state network},
author = {Edmondo Trentin and Stefan Scherer and Friedhelm Schwenker},
url = {http://www.sciencedirect.com/science/article/pii/S0167865514003328},
doi = {dx.doi.org/10.1016/j.patrec.2014.10.015},
year = {2014},
date = {2014-11-01},
journal = {Pattern Recognition Letters},
volume = {66},
pages = {4 –12},
abstract = {The paper presents a probabilistic echo-state network (π -ESN) for density estimation over variable-length sequences of multivariate random vectors. The π -ESN stems from the combination of the reservoir of an ESN and a parametric density model based on radial basis functions. A constrained maximum likelihood training algorithm is introduced, suitable for sequence classification. Extensions of the algorithm to unsupervised clustering and semi-supervised learning (SSL) of sequences are proposed. Experiments in emotion recognition from speech signals are conducted on the WaSeP© dataset. Compared with established techniques, the π -ESN yields the highest recognition accuracies, and shows interesting clustering and SSL capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gordon, Andrew S.; Roemmele, Melissa
An Authoring Tool for Movies in the Style of Heider and Simmel Proceedings Article
In: Mitchell, Alex; Fernández-Vara, Clara; Thue, David (Ed.): Interactive Storytelling, pp. 49–60, Springer International Publishing, Singapore, 2014, ISBN: 978-3-319-12336-3 978-3-319-12337-0.
@inproceedings{gordon_authoring_2014,
title = {An Authoring Tool for Movies in the Style of Heider and Simmel},
author = {Andrew S. Gordon and Melissa Roemmele},
editor = {Alex Mitchell and Clara Fernández-Vara and David Thue},
url = {http://link.springer.com/10.1007/978-3-319-12337-0_5},
isbn = {978-3-319-12336-3 978-3-319-12337-0},
year = {2014},
date = {2014-11-01},
booktitle = {Interactive Storytelling},
volume = {8832},
pages = {49–60},
publisher = {Springer International Publishing},
address = {Singapore},
abstract = {Seventy years ago, psychologists Fritz Heider and Marianne Simmel described an influential study of the perception of intention, where a simple movie of animated geometric shapes evoked in their subjects rich narrative interpretations involving their psychology and social relationships. In this paper, we describe the Heider-Simmel Interactive Theater, a web application that allows authors to create their own movies in the style of Heider and Simmel’s original film, and associate with them a textual description of their narrative intentions. We describe an evaluation of our authoring tool in a classroom of 10th grade students, and an analysis of the movies and textual narratives that they created. Our results provide strong evidence that the authors of these films, as well as Heider and Simmel by extension, intended to convey narratives that are rich with social, cognitive, and emotional concerns.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.