Publications
Search
Kok, Iwan; Heylen, Dirk; Morency, Louis-Philippe
Speaker-Adaptive Multimodal Prediction Model for Listener Responses Proceedings Article
In: pp. 51–58, ACM Press, Sydney, Australia, 2013, ISBN: 978-1-4503-2129-7.
@inproceedings{de_kok_speaker-adaptive_2013,
title = {Speaker-Adaptive Multimodal Prediction Model for Listener Responses},
author = {Iwan Kok and Dirk Heylen and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Speaker-adaptive%20multimodal%20prediction%20model%20for%20listener%20responses.pdf},
doi = {10.1145/2522848.2522866},
isbn = {978-1-4503-2129-7},
year = {2013},
date = {2013-12-01},
pages = {51–58},
publisher = {ACM Press},
address = {Sydney, Australia},
abstract = {The goal of this paper is to acknowledge and model the variability in speaking styles in dyadic interactions and build a predictive algorithm for listener responses that is able to adapt to these different styles. The end result of this research will be a virtual human able to automatically respond to a human speaker with proper listener responses (e.g., head nods). Our novel speaker-adaptive prediction model is created from a corpus of dyadic interactions where speaker variability is analyzed to identify a subset of prototypical speaker styles. During a live interaction our prediction model automatically identifies the closest prototypical speaker style and predicts listener responses based on this communicative style. Central to our approach is the idea of "speaker profile" which uniquely identify each speaker and enables the matching between prototypical speakers and new speakers. The paper shows the merits of our speaker-adaptive listener response prediction model by showing improvement over a state-of-the-art approach which does not adapt to the speaker. Besides the merits of speaker-adaptation, our experiments highlights the importance of using multimodal features when comparing speakers to select the closest prototypical speaker style.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mahmoud, Marwa; Morency, Louis-Philippe; Robinson, Peter
Automatic Multimodal Descriptors of Rhythmic Body Movement Proceedings Article
In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 429–436, ACM, 2013.
@inproceedings{mahmoud_automatic_2013,
title = {Automatic Multimodal Descriptors of Rhythmic Body Movement},
author = {Marwa Mahmoud and Louis-Philippe Morency and Peter Robinson},
url = {http://ict.usc.edu/pubs/Automatic%20multimodal%20descriptors%20of%20rhythmic%20body%20movement.pdf},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 15th ACM on International conference on multimodal interaction},
pages = {429–436},
publisher = {ACM},
abstract = {Prolonged durations of rhythmic body gestures were proved to be correlated with different types of psychological disorders. To-date, there is no automatic descriptor that can robustly detect those behaviours. In this paper, we propose a cyclic gestures descriptor that can detect and localise rhythmic body movements by taking advantage of both colour and depth modalities. We show experimentally how our rhythmic descriptor can successfully localise the rhythmic gestures as: hands fidgeting, legs fidgeting or rocking, significantly higher than the majority vote classification baseline. Our experiments also demonstrate the importance of fusing both modalities, with a significant increase in performance when compared to individual modalities.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mohammadi, Gelareh; Park, Sunghyun; Sagae, Kenji; Vinciarelli, Alessandro; Morency, Louis-Philippe
Who Is Persuasive? The Role of Perceived Personality and Communication Modality in Social Multimedia Proceedings Article
In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 19–26, ACM Press, New York, NY, 2013, ISBN: 978-1-4503-2129-7.
@inproceedings{mohammadi_who_2013,
title = {Who Is Persuasive? The Role of Perceived Personality and Communication Modality in Social Multimedia},
author = {Gelareh Mohammadi and Sunghyun Park and Kenji Sagae and Alessandro Vinciarelli and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Who%20is%20persuasive%20-%20the%20role%20of%20perceived%20personality%20and%20communication%20modality%20in%20social%20multimedia.pdf},
doi = {10.1145/2522848.2522857},
isbn = {978-1-4503-2129-7},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 15th ACM on International conference on multimodal interaction},
pages = {19–26},
publisher = {ACM Press},
address = {New York, NY},
abstract = {Persuasive communication is part of everyone's daily life. With the emergence of social websites like YouTube, Facebook and Twitter, persuasive communication is now seen online on a daily basis. This paper explores the effect of multi-modality and perceived personality on persuasiveness of social multimedia content. The experiments are performed over a large corpus of movie review clips from Youtube which is presented to online annotators in three different modalities: only text, only audio and video. The annotators evaluated the persuasiveness of each review across different modalities and judged the personality of the speaker. Our detailed analysis confirmed several research hypotheses designed to study the relationships between persuasion, perceived personality and communicative channel, namely modality. Three hypotheses are designed: the first hypothesis studies the effect of communication modality on persuasion, the second hypothesis examines the correlation between persuasion and personality perception and finally the third hypothesis, derived from the first two hypotheses explores how communication modality influence the personality perception.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Chreston; Quek, Francis; Morency, Louis-Philippe
Interactive Relevance Search and Modeling: Support for Expert-Driven Analysis of Multimodal Data Proceedings Article
In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 149–156, ACM, Sydney, Australia, 2013.
@inproceedings{miller_interactive_2013,
title = {Interactive Relevance Search and Modeling: Support for Expert-Driven Analysis of Multimodal Data},
author = {Chreston Miller and Francis Quek and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Interactive%20relevance%20search%20and%20modeling%20-%20Support%20for%20expert-driven%20analysis%20of%20multimodal%20data.pdf},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 15th ACM on International conference on multimodal interaction},
pages = {149–156},
publisher = {ACM},
address = {Sydney, Australia},
abstract = {In this paper we present the findings of three longitudinal case studies in which a new method for conducting multimodal analysis of human behavior is tested. The focus of this new method is to engage a researcher integrally in the analysis process and allow them to guide the identification and discovery of relevant behavior instances within multimodal data. The case studies resulted in the creation of two analysis strategies: Single-Focus Hypothesis Testing and Multi-Focus Hypothesis Testing. Each were shown to be beneficial to multimodal analysis through supporting either a single focused deep analysis or analysis across multiple angles in unison. These strategies exemplified how challenging questions can be answered for multimodal datasets. The new method is described and the case studies’ findings are presented detailing how the new method supports multimodal analysis and opens the door for a new breed of analysis methods. Two of the three case studies resulted in publishable results for the respective participants.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Stratou, Giota; Morency, Louis-Philippe
Audiovisual Behavior Descriptors for Depression Assessment Proceedings Article
In: Proceedings of ICMI'13, pp. 135–140, ACM Press, Sydney, Australia, 2013, ISBN: 978-1-4503-2129-7.
@inproceedings{scherer_audiovisual_2013,
title = {Audiovisual Behavior Descriptors for Depression Assessment},
author = {Stefan Scherer and Giota Stratou and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Audiovisual%20behavior%20descriptors%20for%20depression%20assessment.pdf},
doi = {10.1145/2522848.2522886},
isbn = {978-1-4503-2129-7},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of ICMI'13},
pages = {135–140},
publisher = {ACM Press},
address = {Sydney, Australia},
abstract = {We investigate audiovisual indicators, in particular measures of reduced emotional expressivity and psycho-motor retardation, for depression within semi-structured virtual human interviews. Based on a standard self-assessment depression scale we investigate the statistical discriminative strength of the audiovisual features on a depression/no-depression basis. Within subject-independent unimodal and multimodal classification experiments we find that early feature-level fusion yields promising results and confirms the statistical findings. We further correlate the behavior descriptors with the assessed depression severity and find considerable correlation. Lastly, a joint multimodal factor analysis reveals two prominent factors within the data that show both statistical discriminative power as well as strong linear correlation with the depression severity score. These preliminary results based on a standard factor analysis are promising and motivate us to investigate this approach further in the future, while incorporating additional modalities.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun
Persuasiveness in Social Multimedia: The Role of Communication Modality and the Challenge of Crowdsourcing Annotations Proceedings Article
In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 321–324, ACM Press, New York, NY, 2013, ISBN: 978-1-4503-2129-7.
@inproceedings{park_persuasiveness_2013,
title = {Persuasiveness in Social Multimedia: The Role of Communication Modality and the Challenge of Crowdsourcing Annotations},
author = {Sunghyun Park},
url = {http://ict.usc.edu/pubs/Persuasiveness%20in%20social%20multimedia%20-%20the%20role%20of%20communication%20modality%20and%20the%20challenge%20of%20crowdsourcing%20annotations.pdf},
doi = {10.1145/2522848.2532198},
isbn = {978-1-4503-2129-7},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 15th ACM on International conference on multimodal interaction},
pages = {321–324},
publisher = {ACM Press},
address = {New York, NY},
abstract = {With an exponential growth in social multimedia contents online, there is an increasing importance of understanding why and how some contents are perceived as persuasive while others are ignored. This paper outlines my research goals in understanding human perception of persuasiveness in social multimedia contents, which involve studying how different communication modalities influence our perception and identifying key verbal and nonverbal behaviors that eventually lead us to believe someone is convincing and influential. For any research involving in-depth human behavior analysis, it is imperative to obtain accurate annotations of human behaviors at the micro-level. In addition to investigating persuasiveness, this work will also provide to the research community convenient web-based annotation tools, effective procedures for obtaining high-quality annotations with crowdsourcing, and evaluation metrics to fairly and accurately measure the quality and agreement of micro-level behavior annotations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Song, Yale; Morency, Louis-Philippe; Davis, Randall
Learning a Sparse Codebook of Facial and Body Microexpressions for Emotion Recognition Proceedings Article
In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 237–244, ACM Press, 2013, ISBN: 978-1-4503-2129-7.
@inproceedings{song_learning_2013,
title = {Learning a Sparse Codebook of Facial and Body Microexpressions for Emotion Recognition},
author = {Yale Song and Louis-Philippe Morency and Randall Davis},
url = {http://ict.usc.edu/pubs/Learning%20a%20sparse%20codebook%20of%20facial%20and%20body%20microexpressions%20for%20emotion%20recognition.pdf},
doi = {10.1145/2522848.2522851},
isbn = {978-1-4503-2129-7},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 15th ACM on International conference on multimodal interaction},
pages = {237–244},
publisher = {ACM Press},
abstract = {Obtaining a compact and discriminative representation of facial and body expressions is a difficult problem in emotion recognition. Part of the difficulty is capturing microexpressions, i.e., short, involuntary expressions that last for only a fraction of a second: at a micro-temporal scale, there are so many other subtle face and body movements that do not convey semantically meaningful information. We present a novel approach to this problem by exploiting the sparsity of the frequent micro-temporal motion patterns. Local space-time features are extracted over the face and body region for a very short time period, e.g., few milliseconds. A codebook of microexpressions is learned from the data and used to encode the features in a sparse manner. This allows us to obtain a representation that captures the most salient motion patterns of the face and body at a micro-temporal scale. Experiments performed on the AVEC 2012 dataset show our approach achieving the best published performance on the expectation dimension based solely on visual features. We also report experimental results on audio-visual emotion recognition, comparing early and late data fusion techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Faust, Lauren; Artstein, Ron
People hesitate more, talk less to virtual interviewers than to human interviewers Proceedings Article
In: Proceedings of the 17th Workshop on the Semantics and Pragmatics of Dialogue, Amsterdam, 2013.
@inproceedings{faust_people_2013,
title = {People hesitate more, talk less to virtual interviewers than to human interviewers},
author = {Lauren Faust and Ron Artstein},
url = {http://ict.usc.edu/pubs/People%20hesitate%20more,%20talk%20less%20to%20virtual%20interviewers%20than%20to%20human%20interviewers.pdf},
year = {2013},
date = {2013-12-01},
booktitle = {Proceedings of the 17th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Amsterdam},
abstract = {In a series of screening interviews for psychological distress, conducted separately by a human interviewer and by an animated virtual character controlled by a human, participants talked substantially less and produced twice as many filled pauses when talking to the virtual character. This contrasts with earlier findings, where people were less disfluent when talking to a computer dialogue system. The results suggest that the characteristics of computer-directed speech vary depending on the type of dialogue system used.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yu, Zhou; Scherer, Stefen; Devault, David; Gratch, Jonathan; Stratou, Giota; Morency, Louis-Philippe; Cassell, Justine
Multimodal Prediction of Psychological Disorders: Learning Verbal and Nonverbal Commonalities in Adjacency Pairs Proceedings Article
In: Semdial 2013 DialDam: Proceedings of the 17th Workshop on the Semantics and Pragmatics of Dialogue, pp. 160–169, Amsterdam, The Netherlands, 2013.
@inproceedings{yu_multimodal_2013,
title = {Multimodal Prediction of Psychological Disorders: Learning Verbal and Nonverbal Commonalities in Adjacency Pairs},
author = {Zhou Yu and Stefen Scherer and David Devault and Jonathan Gratch and Giota Stratou and Louis-Philippe Morency and Justine Cassell},
url = {http://www.cs.cmu.edu/afs/cs/user/zhouyu/www/semdial_2013_zhou.pdf},
year = {2013},
date = {2013-12-01},
booktitle = {Semdial 2013 DialDam: Proceedings of the 17th Workshop on the Semantics and Pragmatics of Dialogue},
pages = {160–169},
address = {Amsterdam, The Netherlands},
abstract = {Semi-structured interviews are widely used in medical settings to gather information from individuals about psychological disorders, such as depression or anxiety. These interviews typically consist of a series of question and response pairs, which we refer to as adjacency pairs. We pro-pose a computational model, the Multi-modal HCRF, that considers the commonalities among adjacency pairs and information from multiple modalities to infer the psychological states of the interviewees. We collect data and perform experiments on a human to virtual human interaction data set. Our multimodal approach gives a significant advantage over conventional holistic approaches which ignore the adjacency pair context in predicting depression from semi-structured inter- views.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz
Does History Help? An Experiment on How Context Affects Crowdsourcing Dialogue Annotation Proceedings Article
In: Proceedings of the Human Computation Workshop on Scaling Speech, Language Understanding and Dialogue through Crowdsourcing, Palm Springs, CA, 2013.
@inproceedings{nouri_does_2013,
title = {Does History Help? An Experiment on How Context Affects Crowdsourcing Dialogue Annotation},
author = {Elnaz Nouri},
url = {http://ict.usc.edu/pubs/Does%20History%20Help%20-%20An%20Experiment%20on%20How%20Context%20Affects%20Crowdsourcing%20Dialogue%20Annotation.pdf},
year = {2013},
date = {2013-11-01},
booktitle = {Proceedings of the Human Computation Workshop on Scaling Speech, Language Understanding and Dialogue through Crowdsourcing},
address = {Palm Springs, CA},
abstract = {Crowds of people can potentially solve some problems faster than individuals. Crowd sourced data can be leveraged to benefit the crowd by providing information or solutions faster than traditional means. Many tasks needed for developing dialogue systems such as annotation can benefit from crowdsourcing as well. We investigate how to outsource dialogue data annotation through Amazon Mechanical Turk. We are in particular interested in empirically analyzing how much context from previous parts of the dialogue (e.g. previous dialogue turns) is needed to be provided before the target part (dialogue turn) is presented to the annotator. The answer to this question is essentially important for leveraging crowd sourced data for appropriate and efficient response and coordination. We study the effect of presenting different numbers of previous data (turns) to the Turkers in annotating sentiments of dyadic negotiation dialogs on the inter annotator reliability and comparison to the gold standard.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ovchinnikova, Ekaterina; Gordon, Andrew S.; Hobbs, Jerry R.
Abduction for Discourse Interpretation: A Probabilistic Framework Proceedings Article
In: Proceedings of the Joint Symposium on Semantic Processing, pp. 42–50, Trento, Italy, 2013.
@inproceedings{ovchinnikova_abduction_2013,
title = {Abduction for Discourse Interpretation: A Probabilistic Framework},
author = {Ekaterina Ovchinnikova and Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Abduction%20for%20Discourse%20Interpretation%20-%20A%20Probabilistic%20Framework.%20Joint%20Symposium%20on%20Semantic%20Processing.PDF},
year = {2013},
date = {2013-11-01},
booktitle = {Proceedings of the Joint Symposium on Semantic Processing},
pages = {42–50},
address = {Trento, Italy},
abstract = {Abduction allows us to model interpretation of discourse as the explanation of observables, given additional knowledge about the world. In an abductive framework, many explanations can be constructed for the same observation, requiring an approach to estimate the likelihood of these alternative explanations. We show that, for discourse interpretation, weighted abduction has advantages over alternative approaches to estimating the likelihood of hypotheses. However, weighted abduction has no probabilistic interpretation, which makes the estimation and learning of weights difficult. To address this, we propose a formal probabilistic abductive framework that captures the advantages weighted abduction when applied to discourse interpretation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Yuyu; Feng, Andrew W.; Marsella, Stacy C.; Shapiro, Ari
A Practical and Configurable Lip Sync Method for Games Proceedings Article
In: ACM SIGGRAPH Motion in Games, Dublin, Ireland, 2013.
@inproceedings{xu_practical_2013,
title = {A Practical and Configurable Lip Sync Method for Games},
author = {Yuyu Xu and Andrew W. Feng and Stacy C. Marsella and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Practical%20and%20Configurable%20Lip%20Sync%20Method%20for%20Games.pdf},
year = {2013},
date = {2013-11-01},
booktitle = {ACM SIGGRAPH Motion in Games},
address = {Dublin, Ireland},
abstract = {We demonstrate a lip animation (lip sync) algorithm for real-time applications that can be used to generate synchronized facial movements with audio generated from natural speech or a text-to-speech engine. Our method requires an animator to construct animations using a canonical set of visemes for all pairwise combinations of a reduced phoneme set (phone bigrams). These animations are then stitched together to construct the final animation, adding velocity and lip-pose constraints. This method can be applied to any character that uses the same, small set of visemes. Our method can operate efficiently in multiple languages by reusing phone bigram animations that are shared among languages, and specific word sounds can be identified and changed on a per-character basis. Our method uses no machine learning, which offers two advantages over techniques that do: 1) data can be generated for non-human characters whose faces can not be easily retargeted from a human speaker’s face, and 2) the specific facial poses or shapes used for animation can be specified during the setup and rigging stage, and before the lip animation stage, thus making it suitable for game pipelines or circumstances where the speech targets poses are predetermined, such as after acquisition from an online 3D marketplace.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Huangfu, Luwen; Sagae, Kenji; Mao, Wenji; Chen, Wen
Identifying Personal Narratives in Chinese Weblog Posts Proceedings Article
In: Intelligent Narrative Technologies Workshop, Boston, MA, 2013.
@inproceedings{gordon_identifying_2013,
title = {Identifying Personal Narratives in Chinese Weblog Posts},
author = {Andrew S. Gordon and Luwen Huangfu and Kenji Sagae and Wenji Mao and Wen Chen},
url = {http://ict.usc.edu/pubs/Identifying%20Personal%20Narratives%20in%20Chinese%20Weblog%20Posts.PDF},
year = {2013},
date = {2013-10-01},
booktitle = {Intelligent Narrative Technologies Workshop},
address = {Boston, MA},
abstract = {Automated text classification technologies have enabled researchers to amass enormous collections of personal narratives posted to English-language weblogs. In this paper, we explore analogous approachesto identify personal narratives in Chinese weblog posts as a precursor to the future empirical studies of cross-cultural differences in narrative structure. We describe the collection of over half a million posts from a popular Chinese weblog hosting service, and the manual annotation of story and nonstory content in sampled posts. Using supervised machine learning methods, we developed an automated text classifier for personal narratives in Chinese posts, achieving classification accuracy comparable to previous work in English. Using this classifier, we automatically identify over sixty-four thousand personal narratives for use in future cross-cultural analyses and Chinese-language applications of narrative corpora.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Marsella, Stacy C.
Subjective Perceptions in Wartime Negotiation Proceedings Article
In: International Conference on Affective Computing and Intelligent Interaction, pp. 540 –545, Geneva, Switzerland, 2013.
@inproceedings{wang_subjective_2013,
title = {Subjective Perceptions in Wartime Negotiation},
author = {Ning Wang and David V. Pynadath and Stacy C. Marsella},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=6681486&tag=1},
doi = {10.1109/ACII.2013.95},
year = {2013},
date = {2013-09-01},
booktitle = {International Conference on Affective Computing and Intelligent Interaction},
pages = {540 –545},
address = {Geneva, Switzerland},
abstract = {The prevalence of negotiation in social interaction has motivated researchers to develop virtual agents that can understand, facilitate, teach and even carry out negotiations. While much of this research has analyzed how to maximize the objective outcome, there is a growing body of work demonstrating that subjective perceptions of the outcome also play a critical role in human negotiation behavior. People derive subjective value from not only the outcome, but also from the process by which they achieve that outcome, from their relationship with their negotiation partner, etc. The affective responses evoked by these subjective valuations can be very different from what would be evoked by the objective outcome alone. We investigate such subjective valuations within human-agent negotiation in four variations of a wartime negotiation game. We observe that the objective outcomes of these negotiations are not strongly correlated with the human negotiators’ subjective perceptions, as measured by the Subjective Value Index. We examine the game dynamics and agent behaviors to identify features that induce different subjective values in the participants. We thus are able to identify characteristics of the negotiation process and the agents’ behavior that most impact people’s subjective valuations in our wartime negotiation games.⬚},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ito, Jonathan Y.; Marsella, Stacy C.
Modeling Framing Effects Comparing an Appraisal-Based Model with Existing Models Proceedings Article
In: ACII 2013, pp. 381–386, IEEE Computer Society, 2013.
@inproceedings{ito_modeling_2013,
title = {Modeling Framing Effects Comparing an Appraisal-Based Model with Existing Models},
author = {Jonathan Y. Ito and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Framing%20Effects%20Comparing%20an%20Appraisal-Based%20Model%20with%20Existing%20Models.pdf},
year = {2013},
date = {2013-09-01},
booktitle = {ACII 2013},
pages = {381–386},
publisher = {IEEE Computer Society},
abstract = {One significant challenge in creating accurate models of human decision behavior is accounting for the effects of context. Research shows that seemingly minor changes in the presentation of a decision can lead to shifts in behavior; phenomena collectively referred to as framing effects. This work presents a computational modeling analysis comparing the effectiveness of Context Dependent Utility, an appraisal-based approach to modeling the multi-dimensional effects of context on decision behavior, against Cumulative Prospect Theory, Security-Potential/Aspiration Theory, the Transfer of Attention Exchange model, and a power-based utility function. To contrast model performance, a non-linear least-squares analysis and subsequent calculation of Akaike Information Criterion scores, which take into account goodness of fit while penalizing for model complexity, are employed. Results suggest that multi-dimensional models of context and framing, such as Context Dependent Utility, can be much more accurate in modeling decisions which similarly involve multi-dimensional considerations of context. Furthermore, this work demonstrates the effectiveness of employing affective constructs, such as appraisal, for encoding and evaluation of context within decision-theoretic frameworks to better model and predict human decision behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bousmalis, Konstantinos; Zafeiriou, Stefanos; Morency, Louis–Philippe; Pantic, Maja; Ghahramani, Zoubin
Variational Hidden Conditional Random Fields with Coupled Dirichlet Process Mixtures Proceedings Article
In: Hutchison, David; Kanade, Takeo; Kittler, Josef; Kleinberg, Jon M.; Mattern, Friedemann; Mitchell, John C.; Naor, Moni; Nierstrasz, Oscar; Rangan, C. Pandu; Steffen, Bernhard; Sudan, Madhu; Terzopoulos, Demetri; Tygar, Doug; Vardi, Moshe Y.; Weikum, Gerhard; Blockeel, Hendrik; Kersting, Kristian; Nijssen, Siegfried; Železný, Filip (Ed.): Machine Learning and Knowledge Discovery in Databases, pp. 531–547, Springer Berlin Heidelberg, Prague, Czech Republic, 2013, ISBN: 978-3-642-40990-5 978-3-642-40991-2.
@inproceedings{bousmalis_variational_2013,
title = {Variational Hidden Conditional Random Fields with Coupled Dirichlet Process Mixtures},
author = {Konstantinos Bousmalis and Stefanos Zafeiriou and Louis–Philippe Morency and Maja Pantic and Zoubin Ghahramani},
editor = {David Hutchison and Takeo Kanade and Josef Kittler and Jon M. Kleinberg and Friedemann Mattern and John C. Mitchell and Moni Naor and Oscar Nierstrasz and C. Pandu Rangan and Bernhard Steffen and Madhu Sudan and Demetri Terzopoulos and Doug Tygar and Moshe Y. Vardi and Gerhard Weikum and Hendrik Blockeel and Kristian Kersting and Siegfried Nijssen and Filip Železný},
url = {http://link.springer.com/10.1007/978-3-642-40991-2_34},
doi = {10.1007/978-3-642-40991-2_34},
isbn = {978-3-642-40990-5 978-3-642-40991-2},
year = {2013},
date = {2013-09-01},
booktitle = {Machine Learning and Knowledge Discovery in Databases},
volume = {8189},
pages = {531–547},
publisher = {Springer Berlin Heidelberg},
address = {Prague, Czech Republic},
abstract = {Hidden Conditional Random Fields (HCRFs) are discriminative latent variable models which have been shown to successfully learn the hidden structure of a given classification problem. An infinite HCRF is an HCRF with a countably infinite number of hidden states, which rids us not only of the necessity to specify a priori a fixed number of hidden states available but also of the problem of overfitting. Markov chain Monte Carlo (MCMC) sampling algorithms are often employed for inference in such models. However, convergence of such algorithm is rather difficult to verify, and as the complexity of the task at han increases, the computational cost of such algorithms often becomes prohibitive. These limitations can be overcome by variational techniques. In this paper, we present a generalized framework for infinite HCRF models, and a novel variational inference approach on a model based on coupled Dirichlet Process Mixtures, the HCRF–DPM. We show that the variational HCRF–DPM is able to converge to a correct number of represented hidden states, and performs as well as the best parametric HCRFs—chosen via cross-validation— for the difficult tasks of recognizing instances of agreement, disagreement, and pain in audiovisual sequences.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan; Carnevale, Peter
The Effect of Agency on the Impact of Emotion Expressions on People’s Decision Making Proceedings Article
In: 2013 Humaine Association Conference on Affective Computing and Intelligent Interaction, pp. 546–551, Geneva, Switzerland, 2013, ISBN: 978-0-7695-5048-0.
@inproceedings{de_melo_effect_2013,
title = {The Effect of Agency on the Impact of Emotion Expressions on People’s Decision Making},
author = {Celso M. Melo and Jonathan Gratch and Peter Carnevale},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6681487},
doi = {10.1109/ACII.2013.96},
isbn = {978-0-7695-5048-0},
year = {2013},
date = {2013-09-01},
booktitle = {2013 Humaine Association Conference on Affective Computing and Intelligent Interaction},
pages = {546–551},
address = {Geneva, Switzerland},
abstract = {Recent research in neuroeconomics reveals that people show different behavior and lower activation of brain regions associated with mentalizing (i.e., the inference of other’s mental states) when engaged in decision making tasks with a computer, when compared to a human. These findings are important for affective computing because they suggest people’s decision making might be influenced differently according to whether they believe the emotional expressions shown by a computer are being generated by a computer algorithm or a human. To test this, we had people engage in a social dilemma (Experiment 1) or a negotiation (Experiment 2) with virtual humans that were either agents (i.e., controlled by computers) or avatars (i.e., controlled by humans). The results show a clear agency effect: in Experiment 1, people cooperated more with virtual humans that showed facial cooperative displays (e.g., joy after mutual cooperation) rather than competitive displays (e.g., joy when the participant was exploited) but, the effect was only significant with avatars; in Experiment 2, people conceded more to an angry than a neutral virtual human but, once again, the effect was only significant with avatars.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Scherer, Stefan; Gratch, Jonathan; Carnevale, Peter; Morency, Louis-Philippe
Mutual Behaviors during Dyadic Negotiation: Automatic Prediction of Respondent Reactions Proceedings Article
In: Affective Computing and Intelligent Interaction, pp. 423–428, Geneva, Switzerland, 2013, ISBN: 978-0-7695-5048-0.
@inproceedings{park_mutual_2013,
title = {Mutual Behaviors during Dyadic Negotiation: Automatic Prediction of Respondent Reactions},
author = {Sunghyun Park and Stefan Scherer and Jonathan Gratch and Peter Carnevale and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6681467},
doi = {10.1109/ACII.2013.76},
isbn = {978-0-7695-5048-0},
year = {2013},
date = {2013-09-01},
booktitle = {Affective Computing and Intelligent Interaction},
pages = {423–428},
address = {Geneva, Switzerland},
abstract = {In this paper, we analyze face-to-face negotiation interactions with the goal of predicting the respondent’s immediate reaction (i.e., accept or reject) to a negotiation offer. Supported by the theory of social rapport, we focus on mutual behaviors which are defined as nonverbal characteristics that occur due to interactional influence. These patterns include behavioral symmetry (e.g., synchronized smiles) as well as asymmetry (e.g., opposite postures) between the two negotiators. In addition, we put emphasis on finding audio- visual mutual behaviors that can be extracted automatically, with the vision of a real-time decision support tool. We introduce a dyadic negotiation dataset consisting of 42 face-to- face interactions and show experiments confirming the importance of multimodal and mutual behaviors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stratou, Giota; Scherer, Stefan; Gratch, Jonathan; Morency, Louis-Philippe
Automatic Nonverbal Behavior Indicators of Depression and PTSD: Exploring Gender Differences Proceedings Article
In: Affective Computing and Intelligent Interaction, pp. 147–152, IEEE, Geneva, Switzerland, 2013, ISBN: 978-0-7695-5048-0.
@inproceedings{stratou_automatic_2013,
title = {Automatic Nonverbal Behavior Indicators of Depression and PTSD: Exploring Gender Differences},
author = {Giota Stratou and Stefan Scherer and Jonathan Gratch and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6681422},
doi = {10.1109/ACII.2013.31},
isbn = {978-0-7695-5048-0},
year = {2013},
date = {2013-09-01},
booktitle = {Affective Computing and Intelligent Interaction},
pages = {147–152},
publisher = {IEEE},
address = {Geneva, Switzerland},
abstract = {In this paper, we show that gender plays an important role in the automatic assessment of psychological conditions such as depression and post-traumatic stress disorder (PTSD). We identify a directly interpretable and intuitive set of predictive indicators, selected from three general categories of nonverbal behaviors: affect, expression variability and motor variability. For the analysis, we introduce a semi-structured virtual human interview dataset which includes 53 video recorded interactions. Our experiments on automatic classification of psychological conditions show that a gender-dependent approach significantly improves the performance over a gender agnostic one.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Stratou, Giota; Gratch, Jonathan; Morency, Louis-Philippe
Investigating Voice Quality as a Speaker-Independent Indicator of Depression and PTSD Proceedings Article
In: Annual Conference of the International Speech Communication Association (INTERSPEECH), Lyon, France, 2013.
@inproceedings{scherer_investigating_2013-1,
title = {Investigating Voice Quality as a Speaker-Independent Indicator of Depression and PTSD},
author = {Stefan Scherer and Giota Stratou and Jonathan Gratch and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Investigating%20Voice%20Quality%20as%20a%20Speaker-Independent%20Indicator%20of%20Depression%20and%20PTSD.pdf},
year = {2013},
date = {2013-08-01},
booktitle = {Annual Conference of the International Speech Communication Association (INTERSPEECH)},
address = {Lyon, France},
abstract = {We seek to investigate voice quality characteristics, in particular on a breathy to tense dimension, as an indicator for psychological distress, i.e. depression and post-traumatic stress disorder (PTSD), within semi-structured virtual human interviews. Our evaluation identifies significant differences between the voice quality of psychologically distressed participants and not-distressed participants within this limited corpus. We investigate the capability of automatic algorithms to classify psychologically distressed speech in speaker-independent experiments. Additionally, we examine the impact of the posed questions’ affective polarity, as motivated by findings in the literature on positive stimulus attenuation and negative stimulus potentiation in emotional reactivity of psychologically distressed participants. The experiments yield promising results using standard machine learning algorithms and solely four distinct features capturing the tenseness of the speaker’s voice⬚⬚⬚⬚⬚⬚⬚.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.