Publications
Search
Park, Sunghyun; Mohammadi, Gelareh; Artstein, Ron; Morency, Louis-Philippe
Crowdsourcing Micro-Level Multimedia Annotations: The Challenges of Elevation and Interface Proceedings Article
In: International ACM Workshop on Crowdsourcing for Multimedia (CrowdMM), Nara, Japan, 2012.
@inproceedings{park_crowdsourcing_2012,
title = {Crowdsourcing Micro-Level Multimedia Annotations: The Challenges of Elevation and Interface},
author = {Sunghyun Park and Gelareh Mohammadi and Ron Artstein and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Crowdsourcing%20Micro-Level%20Multimedia%20Annotations-%20The%20Challenges%20of%20Elevation%20and%20Interface.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {International ACM Workshop on Crowdsourcing for Multimedia (CrowdMM)},
address = {Nara, Japan},
abstract = {This paper presents a new evaluation procedure and tool for crowdsourcing micro-level multimedia annotations and shows that such annotations can achieve a quality comparable to that of expert annotations. We propose a new evaluation procedure, called MM-Eval (Micro-level Multimedia Evaluation), which compares fine time-aligned annotations using Krippendorff’s alpha metric and introduce two new metrics to evaluate the types of disagreement between coders. We also introduce OCTAB (Online Crowdsourcing Tool for Annotations of Behaviors), a web-based annotation tool that allows precise and convenient multimedia behavior annotations, directly from Amazon Mechanical Turk interface. With an experiment using the above tool and evaluation procedure, we show that a majority vote among annotations from 3 crowdsource workers leads to a quality comparable to that of local expert annotations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Weibel, Nadir; Oviatt, Sharon; Morency, Louis-Philippe
Multimodal Prediction of Expertise and Leadership in Learning Groups Proceedings Article
In: ACM International Conference on Multimodal Interaction (ICMI), Santa Monica, CA, 2012.
@inproceedings{scherer_multimodal_2012,
title = {Multimodal Prediction of Expertise and Leadership in Learning Groups},
author = {Stefan Scherer and Nadir Weibel and Sharon Oviatt and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Multimodal%20Prediction%20of%20Expertise%20and%20Leadership%20in%20Learning%20Groups.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {ACM International Conference on Multimodal Interaction (ICMI)},
address = {Santa Monica, CA},
abstract = {In this study, we investigate low level predictors from audio and writing modalities for the separation and identi⬚cation of socially dominant leaders and experts within a study group. We use a multimodal dataset of situated computer assisted group learning tasks: Groups of three high-school students solve a number of mathematical problems in two separate sessions. In order to automatically identify the socially dominant student and expert in the group we analyze a number of prosodic and voice quality features as well as writing-based features. In this preliminary study we identify a number of promising acoustic and writing predictors for the disambiguation of leaders, experts and other students. We believe that this exploratory study reveals key opportunities for future analysis of multimodal learning analytics based on a combination of audio and writing signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.
Deconstructing Reinforcement Learning in Sigma Proceedings Article
In: Conference on Artificial General Intelligence, Oxford, UK, 2012.
@inproceedings{rosenbloom_deconstructing_2012,
title = {Deconstructing Reinforcement Learning in Sigma},
author = {Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Deconstructing%20Reinforcement%20Learning%20in%20Sigma.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {Conference on Artificial General Intelligence},
address = {Oxford, UK},
abstract = {This article describes the development of reinforcement learning within the Sigma graphical cognitive architecture. Reinforcement learning has been deconstructed in terms of the interactions among more basic mechanisms and knowledge in Sigma, making it a derived capability rather than a de novo mechanism. Basic reinforcement learning – both model-based and model-free – are demonstrated, along with the intertwining of model learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Gratch, Jonathan; Morency, Louis-Philippe
I Already Know Your Answer: Using Nonverbal Behaviors to Predict Immediate Outcomes in a Dyadic Negotiation Proceedings Article
In: 14th ACM International Conference on Multimodal Interaction (ICMI), Santa Monica, CA, 2012.
@inproceedings{park_i_2012,
title = {I Already Know Your Answer: Using Nonverbal Behaviors to Predict Immediate Outcomes in a Dyadic Negotiation},
author = {Sunghyun Park and Jonathan Gratch and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/I%20Already%20Know%20Your%20Answer-%20Using%20Nonverbal%20Behaviors%20to%20Predict%20Immediate%20Outcomes%20in%20a%20Dyadic%20Negotiation.pdf},
year = {2012},
date = {2012-10-01},
booktitle = {14th ACM International Conference on Multimodal Interaction (ICMI)},
address = {Santa Monica, CA},
abstract = {Be it in our workplace or with our family or friends, negotiation comprises a fundamental fabric of our everyday life, and it is apparent that a system that can automatically predict negotiation outcomes will have substantial implications. In this paper, we focus on finding nonverbal behaviors that are predictive of immediate outcomes (acceptances or rejections of proposals) in a dyadic negotiation. Looking at the nonverbal behaviors of the respondent alone would be inadequate since ample predictive information could also reside in the behaviors of the proposer, as well as the past history between the two parties. With this intuition in mind, we show that a more accurate prediction can be achieved by considering all the three sources (multimodal) of information together. We evaluate our approach on a face-to-face negotiation dataset consisting of 42 dyadic interactions and show that integrating all three sources of information outperforms each individual predictor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Tunwattanapong, Borom; Busch, Jay; Yu, Xueming; Jones, Andrew; Debevec, Paul; Ghosh, Abhijeet
Measurement-based Synthesis of Facial Microgeometry Proceedings Article
In: ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12), Los Angeles, CA, 2012.
@inproceedings{graham_measurement-based_2012-1,
title = {Measurement-based Synthesis of Facial Microgeometry},
author = {Paul Graham and Borom Tunwattanapong and Jay Busch and Xueming Yu and Andrew Jones and Paul Debevec and Abhijeet Ghosh},
url = {http://ict.usc.edu/pubs/A%20Measurement-based%20Synthesis%20of%20Facial%20Microgeometry.pdf},
year = {2012},
date = {2012-08-01},
booktitle = {ACM (SIGGRAPH), 2012 Talks (SIGGRAPH '12)},
address = {Los Angeles, CA},
abstract = {Current scanning techniques record facial mesostructure with submillimeter precision showing pores, wrinkles, and creases. However, surface roughness continues to shape specular reflection at the level of microstructure: micron scale structures. Here, we present an approach to increase the resolution of mesostructure-level facial scans using microstructure examples digitized about the face. We digitize the skin patches using polarized gradient illumination and 10 mm resolution macro photography, and observe point-source reflectance measurements to characterize the specular reflectance lobe at this smaller scale. We then perform constrained texture synthesis to create appropriate surface microstructure per facial region, blending the regions to cover the whole entire face. We show that renderings of microstructure-augmented facial models preserve the original scanned mesostructure and exhibit surface reflections which are qualitatively more consistent with real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.