Publications
Search
Feng, Andrew; Lucas, Gale; Marsella, Stacy; Suma, Evan; Chiu, Chung-Cheng; Casas, Dan; Shapiro, Ari
Acting the Part: The Role of Gesture on Avatar Identity Proceedings Article
In: Proceedings of the Seventh International Conference on Motion in Games (MIG 2014), pp. 49–54, ACM Press, Playa Vista, CA, 2014, ISBN: 978-1-4503-2623-0.
@inproceedings{feng_acting_2014,
title = {Acting the Part: The Role of Gesture on Avatar Identity},
author = {Andrew Feng and Gale Lucas and Stacy Marsella and Evan Suma and Chung-Cheng Chiu and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2668064.2668102},
doi = {10.1145/2668064.2668102},
isbn = {978-1-4503-2623-0},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the Seventh International Conference on Motion in Games (MIG 2014)},
pages = {49–54},
publisher = {ACM Press},
address = {Playa Vista, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. However, in order to generate a recognizable 3D avatar, the movement and behavior of the human subject should be captured and replicated as well. We present a method of generating a 3D model from a scan, as well as a method to incorporate a subjects style of gesturing into a 3D character. We present a study which shows that 3D characters that used the gestural style as their original human subjects were more recognizable as the original subject than those that don’t.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Stratou, Giota; Lucas, Gale; Mahmoud, Marwa; Boberg, Jill; Gratch, Jonathan; Rizzo, Albert (Skip); Morency, Louis-Philippe
Automatic audiovisual behavior descriptors for psychological disorder analysis Journal Article
In: Image and Vision Computing Journal, vol. 32, no. 10, pp. 648–658, 2014, ISSN: 02628856.
@article{scherer_automatic_2014,
title = {Automatic audiovisual behavior descriptors for psychological disorder analysis},
author = {Stefan Scherer and Giota Stratou and Gale Lucas and Marwa Mahmoud and Jill Boberg and Jonathan Gratch and Albert (Skip) Rizzo and Louis-Philippe Morency},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0262885614001000},
doi = {10.1016/j.imavis.2014.06.001},
issn = {02628856},
year = {2014},
date = {2014-10-01},
journal = {Image and Vision Computing Journal},
volume = {32},
number = {10},
pages = {648–658},
abstract = {We investigate the capabilities of automatic audiovisual nonverbal behavior descriptors to identify indicators of psychological disorders such as depression, anxiety, and post-traumatic stress disorder. Due to strong correlations between these disordersas measured with standard self-assessment questionnaires in this study, we focus our investigations in particular on a generic distress measure as identified using factor analysis. Within this work, we seek to confirm and enrich present state of the art, predominantly based on qualitative manual annotations, with automatic quantitative behavior descriptors. We propose a number of nonverbal behavior descriptors that can be automatically estimated from audiovisual signals. Such automatic behavior descriptors could be used to support healthcare providers with quantified and objective observations that could ultimately improve clinical assessment. We evaluate our work on the dataset called the Distress Assessment Interview Corpus (DAIC) which comprises dyadic interactions between a confederate interviewer and a paid participant. Our evaluation on this dataset shows correlation of our automatic behavior descriptors with the derived general distress measure. Our analysis also includes a deeper study of self-adaptor and fidgeting behaviors based on detailed annotations of where these behaviors occur.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Joshi, Himanshu; Rosenbloom, Paul S.; Ustun, Volkan
Isolated word recognition in the Sigma cognitive architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 10, pp. 1–9, 2014, ISSN: 2212683X.
@article{joshi_isolated_2014,
title = {Isolated word recognition in the Sigma cognitive architecture},
author = {Himanshu Joshi and Paul S. Rosenbloom and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X14000644},
doi = {10.1016/j.bica.2014.11.001},
issn = {2212683X},
year = {2014},
date = {2014-10-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {10},
pages = {1–9},
abstract = {Symbolic architectures are effective at complex cognitive reasoning, but typically are incapable of important forms of sub-cognitive processing – such as perception – without distinct modules connected to them via low-bandwidth interfaces. Neural architectures, in contrast, may be quite effective at the latter, but typically struggle with the former. Sigma has been designed to leverage the state-of-the-art hybrid (discrete + continuous) mixed (symbolic + probabilistic) capability of graphical models to provide in a uniform non-modular fashion effective forms of, and integration across, both cognitive and sub-cognitive behavior. Here it is shown that Sigma is not only capable of performing a simple variant of speech recognition via the same knowledge structures and reasoning algorithm used for cognitive processing, but also of leveraging its existing knowledge templates and learning algorithm to acquire automatically most of the structures and parameters needed for this recognition activity.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nazarian, Angela; Nouri, Elnaz; Traum, David
Initiative Patterns in Dialogue Genres Proceedings Article
In: Proceedings of Semdial 2014, Edinburgh, UK, 2014.
@inproceedings{nazarian_initiative_2014,
title = {Initiative Patterns in Dialogue Genres},
author = {Angela Nazarian and Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Patterns%20in%20Dialogue%20Genres.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of Semdial 2014},
address = {Edinburgh, UK},
abstract = {One of the ways of distinguishing different dialogue genres is the differences in patterns of interactions between the participants. Morbini et al (2013) informally define dialogue genres on the basis of features like user vs system initiative, amongst other criteria. In this paper, we apply the multi-label initiative annotation scheme and related features from (Nouri and Traum, 2014) to a set of dialogue corpora from different domains. In our initial study, we examine two questionanswering domains, a “slot-filling” service application domain, and several human-human negotiation domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baltrušaitis, Tadas; Robinson, Peter; Morency, Louis-Philippe
Continuous Conditional Neural Fields for Structured Regression Book Section
In: Computer Vision–ECCV 2014, pp. 593–608, Springer, 2014.
@incollection{baltrusaitis_continuous_2014,
title = {Continuous Conditional Neural Fields for Structured Regression},
author = {Tadas Baltrušaitis and Peter Robinson and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Continuous%20Conditional%20Neural%20Fields%20for%20Structured%20Regression.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Computer Vision–ECCV 2014},
pages = {593–608},
publisher = {Springer},
abstract = {An increasing number of computer vision and pattern recognition problems require structured regression techniques. Problems like human pose estimation, unsegmented action recognition, emotion prediction and facial landmark detection have temporal or spatial output dependencies that regular regression techniques do not capture. In this paper we present continuous conditional neural fields (CCNF) textbackslashtextbackslashvphantom a novel structured regression model that can learn non-linear input-output dependencies, and model temporal and spatial output relationships of vary- ing length sequences. We propose two instances of our CCNF framework: Chain-CCNF for time series modelling, and Grid-CCNF for spatial relationship modelling. We evaluate our model on five public datasets spanning three different regression problems: facial landmark detection in the wild, emotion prediction in music and facial action unit recognition. Our CCNF model demonstrates state-of-the-art performance on all of the datasets used.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Feng, Andrew; Shapiro, Ari; Lhommet, Margaux; Marsella, Stacy
Embodied Autonomous Agents Book Section
In: Handbook of Virtual Environments: Design, Implementation, and Applications, pp. 335–352, 2014.
@incollection{feng_embodied_2014,
title = {Embodied Autonomous Agents},
author = {Andrew Feng and Ari Shapiro and Margaux Lhommet and Stacy Marsella},
url = {http://books.google.com/books?hl=en&lr=&id=7zzSBQAAQBAJ&oi=fnd&pg=PP1&dq=+Handbook+of+Virtual+Environments&ots=Vx3ia0S2Uu&sig=LaVbSdoG3FahlbVYbuCxLmKgFIA#v=onepage&q=Handbook%20of%20Virtual%20Environments&f=false},
year = {2014},
date = {2014-09-01},
booktitle = {Handbook of Virtual Environments: Design, Implementation, and Applications},
pages = {335–352},
abstract = {Since the last decade, virtual environments have been extensively used for a wide range of application, from training systems to video games. Virtual humans are animated characters that are designed to populate these environments and to interact with the objects of the world as well as with the user. A virtual agent must perceive the world in which it exists, reason about those perceptions, and decide on how to act on them in pursuit of its own agenda.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Shivakumar, Prashanth Gurunath; Potamianos, Alexandros; Lee, Sungbok; Narayanan, Shrikanth
Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling Journal Article
In: Proceedings of Workshop on Child Computer Interaction, 2014.
@article{shivakumar_improving_2014,
title = {Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling},
author = {Prashanth Gurunath Shivakumar and Alexandros Potamianos and Sungbok Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Improving%20Speech%20Recognition%20for%20Children%20using%20Acoustic%20Adaptation%20and%20Pronunciation%20Modeling.pdf},
year = {2014},
date = {2014-09-01},
journal = {Proceedings of Workshop on Child Computer Interaction},
abstract = {Developing a robust Automatic Speech Recognition (ASR) system for children is a challenging task because of increased variability in acoustic and linguistic correlates as function of young age. The acoustic variability is mainly due to the developmental changes associated with vocal tract growth. On the linguistic side, the variability is associated withlimited knowledge of vocabulary, pronunciations and other linguistic constructs. This paper presents a preliminary study towards better acoustic modeling, pronunciation modeling and front-end processing for children’s speech. Results are presented as a function of age. Speaker adaptation significantly reduces mismatch and variability improving recognition results across age groups. In addition, introduction of pronunciation modeling shows promising performance improvements.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Huang, Che-Wei; Xiao, Bo; Georgiou, Panayiotis G.; Narayanan, Shrikanth S.
Unsupervised Speaker Diarization Using Riemannian Manifold Clustering Proceedings Article
In: Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{huang_unsupervised_2014,
title = {Unsupervised Speaker Diarization Using Riemannian Manifold Clustering},
author = {Che-Wei Huang and Bo Xiao and Panayiotis G. Georgiou and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Diarization%20Using%20Riemannian%20Manifold%20Clustering.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {We address the problem of speaker clustering for robust unsupervised speaker diarization. We model each speakerhomogeneous segment as one single full multivariate Gaussian probability density function (pdf) and take into consideration the Riemannian property of Gaussian pdfs. By assuming that segments from different speakers lie on different (possibly intersected) sub-manifolds of the manifold of Gaussian pdfs, we formulate the original problem as a Riemannian manifold clustering problem. To apply the computationally simple Riemannian locally linear embedding (LLE) algorithm, we impose a constraint on the length of each segment so as to ensure the fitness of single-Gaussian modeling and to increase the chance that all k-nearest neighbors of a pdf are from the same submanifold (speaker). Experiments on the microphone-recorded conversational interviews from NIST 2010 speaker recognition evaluation set demonstrate promising results of less than 1% DER.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zadeh, AmirAli B.; Sagae, Kenji; Morency, Louis Philippe
Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words Proceedings Article
In: Intelligent Virtual Agents, pp. 496–503, Springer, Boston, MA, 2014.
@inproceedings{zadeh_towards_2014,
title = {Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words},
author = {AmirAli B. Zadeh and Kenji Sagae and Louis Philippe Morency},
url = {http://ict.usc.edu/pubs/Towards%20Learning%20Nonverbal%20Identities%20from%20the%20Web%20-%20Automatically%20Identifying%20Visually-Accentuated%20Words.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {496–503},
publisher = {Springer},
address = {Boston, MA},
abstract = {This paper presents a novel long-term idea to learn automatically from online multimedia content, such as videos from YouTube channels, a portfolio of nonverbal identities in the form of computational representation of prototypical gestures of a speaker. As a first step towards this vision, this paper presents proof-of-concept experiments to automatically identify visually accentuated words from a collection of online videos of the same person. The experimental results are promising with many accentuated words automatically identified and specific head motion patterns were associated with these words.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Yuyu; Pelachaud, Catherine; Marsella, Stacy
Compound Gesture Generation: A Model Based on Ideational Units Proceedings Article
In: Intelligent Virtual Agents, pp. 477–491, Springer, Boston, MA, 2014.
@inproceedings{xu_compound_2014,
title = {Compound Gesture Generation: A Model Based on Ideational Units},
author = {Yuyu Xu and Catherine Pelachaud and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Compound%20Gesture%20Generation%20-%20A%20Model%20Based%20on%20Ideational%20Units.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {477–491},
publisher = {Springer},
address = {Boston, MA},
abstract = {This work presents a hierarchical framework that generates continuous gesture animation performance for virtual characters. As opposed to approaches that focus more on realizing individual gesture, the focus of this work is on the relation between gestures as part of an overall gesture performance. Following Calbris’ work [3], our approach is to structure the performance around ideational units and determine gestural features within and across these ideational units. Furthermore, we use Calbris’ work on the relation between form and meaning in gesture to help inform how individual gesture’s expressivity is manipulated. Our framework takes in high level communicative function descriptions, generates behavior descriptions and realizes them using our character animation engine. We define the specifications for these different levels of descriptions. Finally, we show the general results as well as experiments illustrating the impacts of the key features.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lhommet, Margot; Marsella, Stacy
Metaphoric Gestures: Towards Grounded Mental Spaces Proceedings Article
In: Intelligent Virtual Agents, pp. 264–274, Springer, Boston, MA, 2014.
@inproceedings{lhommet_metaphoric_2014,
title = {Metaphoric Gestures: Towards Grounded Mental Spaces},
author = {Margot Lhommet and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Metaphoric%20Gestures%20-%20Towards%20Grounded%20Mental%20Spaces.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {264–274},
publisher = {Springer},
address = {Boston, MA},
abstract = {Gestures are related to the mental states and unfolding processes of thought, reasoning and verbal language production. This is especially apparent in the case of metaphors and metaphoric gestures. For example, talking about the importance of an idea by calling it a big idea and gesturing to indicate that large size is a manifestation of the use of metaphors in language and gesture. We propose a computational model of the influence of conceptual metaphors on gestures that maps from mental state representations of ideas to their expression in concrete, physical metaphoric gestures. This model relies on conceptual primary metaphors to map the abstract elements of the mental space to concrete physical elements that can be conveyed with gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; King, Aisha; Morency, Louis-Philippe
It’s only a computer: Virtual humans increase willingness to disclose Journal Article
In: Computers in Human Behavior, vol. 37, pp. 94–100, 2014, ISSN: 07475632.
@article{lucas_its_2014,
title = {It’s only a computer: Virtual humans increase willingness to disclose},
author = {Gale M. Lucas and Jonathan Gratch and Aisha King and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/It%27s%20Only%20a%20Computer%20-%20Virtual%20Humans%20Increase%20Willingness%20to%20Disclose.pdf},
doi = {10.1016/j.chb.2014.04.043},
issn = {07475632},
year = {2014},
date = {2014-08-01},
journal = {Computers in Human Behavior},
volume = {37},
pages = {94–100},
abstract = {Research has begun to explore the use of virtual humans (VHs) in clinical interviews (Bickmore, Gruber, & Picard, 2005). When designed as supportive and ‘‘safe’’ interaction partners, VHs may improve such screenings by increasing willingness to disclose information (Gratch, Wang, Gerten, & Fast, 2007). In health and mental health contexts, patients are often reluctant to respond honestly. In the context of health-screening interviews, we report a study in which participants interacted with a VH interviewer and were led to believe that the VH was controlled by either humans or automation. As predicted, compared to those who believed they were interacting with a human operator, participants who believed they were interacting with a computer reported lower fear of self-disclosure, lower impression management, displayed their sadness more intensely, and were rated by observers as more willing to disclose. These results suggest that automated VHs can help overcome a significant barrier to obtaining truthful patient information.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Countering User Deviation During Redirected Walking Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, Vancouver, British Columbia, Canada, 2014.
@inproceedings{azmandian_countering_2014,
title = {Countering User Deviation During Redirected Walking},
author = {Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Countering%20User%20Deviation%20During%20Redirected%20Walking.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
address = {Vancouver, British Columbia, Canada},
abstract = {Redirected Walking is technique that leverages human perception characteristics to allow locomotion in virtual environments larger than the tracking area. Among the many redirection techniques, some strictly depend on the user’s current position and orientation, while more recent algorithms also depend on the user’s predicted behavior. This prediction serves as an input to a computationally expensive search to determine an optimal path. The search output is formulated as a series of gains to be applied at different stages along the path. An example prediction could be if a user is walking down a corridor, a natural prediction would be that the user will walk along a straight line down the corridor, and she will choose one of the possible directions with equal probability. In practice, deviations from the expected virtual path are inevitable, and as a result, the real world path traversed will differ from the original prediction. These deviations can not only force the search to select a less optimal path in the next iteration, but also in cases cause the users to go off bounds, requiring resets, causing a jarring experience for the user. We propose a method to account for these deviations by modifying the redirection gains per update frame, aiming to keep the user on the intended predicted physical path.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lubetich, Shannon; Sagae, Kenji
Data-driven Measurement of Child Language Development with Simple Syntactic Templates Proceedings Article
In: Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pp. 2151 – 2160, Dublin, Ireland, 2014.
@inproceedings{lubetich_data-driven_2014,
title = {Data-driven Measurement of Child Language Development with Simple Syntactic Templates},
author = {Shannon Lubetich and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Data-driven%20Measurement%20of%20Child%20Language%20Development%20with%20Simple%20Syntactic%20Templates.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers},
pages = {2151 – 2160},
address = {Dublin, Ireland},
abstract = {When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.; Sagae, Kenji; Demski, Abram
Distributed Vector Representations of Words in the Sigma Cognitive Architecture Proceedings Article
In: Proceedings of the 7th Conference on Artificial General Intelligence 2014, Québec City, Canada, 2014.
@inproceedings{ustun_distributed_2014,
title = {Distributed Vector Representations of Words in the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul S. Rosenbloom and Kenji Sagae and Abram Demski},
url = {http://ict.usc.edu/pubs/Distributed%20Vector%20Representations%20of%20Words%20in%20the%20Sigma%20Cognitive%20Architecture.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Conference on Artificial General Intelligence 2014},
address = {Québec City, Canada},
abstract = {Recently reported results with distributed-vector word representations in natural language processing make them appealing for incorporation into a general cognitive architecture like Sigma. This paper describes a new algorithm for learning such word representations from large, shallow information resources, and how this algorithm can be implemented via small modifications to Sigma. The effectiveness and speed of the algorithm are evaluated via a comparison of an external simulation of it with state-of-the-art algorithms. The results from more limited experiments with Sigma are also promising, but more work is required for it to reach the effectiveness and speed of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Proceedings Article
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1–384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Traum, David; Marsella, Stacy; Morency, Louis-Philippe; Shapiro, Ari; Gratch, Jonathan
A Shared, Modular Architecture for Developing Virtual Humans Proceedings Article
In: Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014, pp. 4–7, Boston, MA, 2014.
@inproceedings{hartholt_shared_2014,
title = {A Shared, Modular Architecture for Developing Virtual Humans},
author = {Arno Hartholt and David Traum and Stacy Marsella and Louis-Philippe Morency and Ari Shapiro and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Shared%20Modular%20Architecture%20for%20Developing%20Virtual%20Humans.pdf},
doi = {10.2390/biecoll-wasiva2014-02},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014},
pages = {4–7},
address = {Boston, MA},
abstract = {Realizing the full potential of intelligent virtual agents requires compelling characters that can engage users in meaningful and realistic social interactions, and an ability to develop these characters effectively and efficiently. Advances are needed in individual capabilities, but perhaps more importantly, fundamental questions remain as to how best to integrate these capabilities into a single framework that allows us to efficiently create characters that can engage users in meaningful and realistic social interactions. This integration requires in-depth, inter-disciplinary understanding few individuals, or even teams of individuals, possess.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Park, Sunghyun; Shim, Han Suk; Sagae, Kenji; Morency, Louis-Philippe
Verbal Behaviors and Persuasiveness in Online Multimedia Content Proceedings Article
In: Proceedings of the Second Workshop on Natural Language Processing for Social Media (SocialNLP), pp. 50, Dublin, Ireland, 2014.
@inproceedings{chatterjee_verbal_2014,
title = {Verbal Behaviors and Persuasiveness in Online Multimedia Content},
author = {Moitreya Chatterjee and Sunghyun Park and Han Suk Shim and Kenji Sagae and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Verbal%20Behaviors%20and%20Persuasiveness%20in%20Online%20Multimedia%20Content.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Second Workshop on Natural Language Processing for Social Media (SocialNLP)},
pages = {50},
address = {Dublin, Ireland},
abstract = {Persuasive communication is an essential component of our daily lives, whether it is negotiating, reviewing a product, or campaigning for the acceptance of a point of view. With the rapid expansion of social media websites such as YouTube, Vimeo and ExpoTV, it is becoming ever more important and useful to understand persuasiveness in social multimedia content. In this paper we present a novel analysis of verbal behavior, based on lexical usage and paraverbal markers of hesitation, in the context of predicting persuasiveness in online multi-media content. Toward the end goal of predicting perceived persuasion, this work also explores the potential differences in verbal behavior of people expressing a positive opinion (e.g., a positive movie review) versus a negative one. The analysis is performed on a multimedia corpus of 1,000 movie review videos annotated for persuasiveness. Our results show that verbal behavior can be a significant predictor of persuasiveness in such online multimedia content.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Proceedings Article
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malandrakis, Nikolaos; Falcone, Michael; Vaz, Colin; Bisogni, Jesse; Potamianos, Alexandros; Narayanan, Shrikanth
SAIL: Sentiment analysis using semantic similarity and contrast features Proceedings Article
In: Proceedings of SemEval 2014, pp. 512–516, Dublin, Ireland, 2014.
@inproceedings{malandrakis_sail_2014,
title = {SAIL: Sentiment analysis using semantic similarity and contrast features},
author = {Nikolaos Malandrakis and Michael Falcone and Colin Vaz and Jesse Bisogni and Alexandros Potamianos and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SAIL%20-%20Sentiment%20Analysis%20using%20Semantic%20Similarity%20and%20Contrast%20Features.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SemEval 2014},
pages = {512–516},
address = {Dublin, Ireland},
abstract = {This paper describes our submission to SemEval2014 Task 9: Sentiment Analysis in Twitter. Our model is primarily a lexicon based one, augmented by some preprocessing, including detection of Multi-Word Expressions, negation propagation and hashtag expansion and by the use of pairwise semantic similarity at the tweet level. Feature extraction is repeated for sub-strings and contrasting sub-string features are used to better capture complex phenomena like sarcasm. The resulting supervised system, using a Naive Bayes model, achieved high performance in classifying entire tweets, ranking 7th on the main set and 2nd when applied to sarcastic tweets.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.