Publications
Search
Miguel, Eder; Feng, Andrew; Xu, Yuyu; Shapiro, Ari
Towards Cloth-Manipulating Characters Proceedings Article
In: CASA 2014, Houston, Texas, 2014.
@inproceedings{miguel_towards_2014,
title = {Towards Cloth-Manipulating Characters},
author = {Eder Miguel and Andrew Feng and Yuyu Xu and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Towards%20Cloth-Manipulating%20Characters.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {CASA 2014},
address = {Houston, Texas},
abstract = {Cloth manipulation is a common action in humans that current animated virtual characters are not able to perform due to its complexity. In this paper we focus on dressing-up, which is probably the most common action involving cloth. We identify the steps required to perform the task and describe the systems responsible for each of them. Our results show a character that is able to put on a scarf and react to cloth collision and over-stretching events.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Stratou, Giota; Scherer, Stefan; Morency, Louis-Philippe
CONTEXT-BASED SIGNAL DESCRIPTORS OF HEART-RATE VARIABILITY FOR ANXIETY ASSESSMENT Proceedings Article
In: Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pp. 3631–3635, IEEE, Florence, Italy, 2014.
@inproceedings{chatterjee_context-based_2014,
title = {CONTEXT-BASED SIGNAL DESCRIPTORS OF HEART-RATE VARIABILITY FOR ANXIETY ASSESSMENT},
author = {Moitreya Chatterjee and Giota Stratou and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Context-based%20signal%20descriptors%20of%20heart-rate%20variability%20for%20anxiety%20assessment.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},
pages = {3631–3635},
publisher = {IEEE},
address = {Florence, Italy},
abstract = {In this paper, we investigate the role of multiple context-based heart-rate variability descriptors for evaluating a person’s psychological health, specifically anxiety disorders. The descriptors are extracted from visually sensed heart-rate signals obtained during the course of a semi-structured interview with a virtual human and can potentially integrate question context as well. The proposed descriptors are motivated by prior related work and are constructed based on histogram-based approaches, time and frequency domain analysis of heart-rate variability. In order to contextualize our descriptors, we use information about the polarity and intimacy levels of the questions asked. Our experiments reveal that the descriptors, both with and without context, perform far better than chance in predicting anxiety. Further on, we perform at-a-par with the state-of-the-art in predicting anxiety and other psychological disorders when we integrate the question context information into the descriptors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chiu, Chung-Cheng; Marsella, Stacy C.
Gesture Generation with Low-Dimensional Embeddings Proceedings Article
In: Proceedings of the 13th International Conference on Autonomous Agents and Multiagent Systems, pp. 781–788, Paris, France, 2014.
@inproceedings{chiu_gesture_2014,
title = {Gesture Generation with Low-Dimensional Embeddings},
author = {Chung-Cheng Chiu and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Gesture%20generation%20with%20low-dimensional%20embeddings.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of the 13th International Conference on Autonomous Agents and Multiagent Systems},
pages = {781–788},
address = {Paris, France},
abstract = {There is a growing demand for embodied agents capable of engaging in face-to-face dialog using the same verbal and nonverbal behavior that people use. The focus of our work is generating coverbal hand gestures for these agents, gestures coupled to the content and timing of speech. A common approach to achieve this is to use motion capture of an actor or hand-crafted animations for each utterance. An alternative machine learning approach that saves development effort is to learn a general gesture controller that can generate behavior for novel utterances. However learning a direct mapping from speech to gesture movement faces the complexity of inferring the relation between the two time series of speech and gesture motion. We present a novel machine learning approach that decomposes the overall learning problem into learning two mappings: from speech to a gestural annotation and from gestural annotation to gesture motion. The combined model learns to synthesize natural gesture animation from speech audio. We assess the quality of generated animations by comparing them with the result generated by a previous approach that learns a direct mapping. Results from a human subject study show that our framework is perceived to be significantly better.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale M.; King, Aisha Aisha; Morency, Louis-Philippe
It’s Only a Computer: The Impact of Human-agent Interaction in Clinical Interviews Proceedings Article
In: Proceedings of 13th International Conference on Autonomous Agents and Multiagent Systems, pp. 85–92, International Foundation for Autonomous Agents and Multiagent Systems, Paris, France, 2014.
@inproceedings{gratch_its_2014,
title = {It’s Only a Computer: The Impact of Human-agent Interaction in Clinical Interviews},
author = {Jonathan Gratch and Gale M. Lucas and Aisha Aisha King and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/It%E2%80%99s%20only%20a%20computer%20-%20The%20impact%20of%20human-agent%20interaction%20in%20clinical%20interviews.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of 13th International Conference on Autonomous Agents and Multiagent Systems},
pages = {85–92},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Paris, France},
abstract = {Research has begun to explore the use of virtual humans (VHs) in medical interviews [1]. When designed as supportive and “safe” interaction partners, VHs may improve such screenings by encouraging patients to disclose more personal information [2-3]. In medical contexts, patients often feel resistance to selfdisclosure and engage in impression management to be viewed more positively by healthcare providers. This paper provides the first empirical evidence that VHs can reduce such resistance and impression management. In the context of health-screening interviews, we report a study in which participants interacted with a VH that was either teleo-operated by humans (Wizard-of-Oz) or fully-automated (AI). Independently, we manipulated whether participants believed the VH was controlled by humans or automation. As predicted, participants who believed they were interacting with a computer reported lower resistance to selfdisclosure, lower impression management and higher system usability than those who believed they were interacting with a human operator. Whether the virtual human was actually operated by a human or AI only affected ratings of the system’s usability. These results suggest that automated VHs can help overcome a significant barrier to obtaining truthful patient information in medical domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malandrakis, Nikolaos; Potamianos, Alexandros; Hsu, Kean J.; Babeva, Kalina N.; Feng, Michelle C.; Davison, Gerald C.; Narayanan, Shrikanth
AFFECTIVE LANGUAGE MODEL ADAPTATION VIA CORPUS SELECTION Proceedings Article
In: proceedings of ICASSP, Florence, Italy, 2014.
@inproceedings{malandrakis_affective_2014,
title = {AFFECTIVE LANGUAGE MODEL ADAPTATION VIA CORPUS SELECTION},
author = {Nikolaos Malandrakis and Alexandros Potamianos and Kean J. Hsu and Kalina N. Babeva and Michelle C. Feng and Gerald C. Davison and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Affective%20language%20model%20adaptation%20via%20corpus%20selection.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {proceedings of ICASSP},
address = {Florence, Italy},
abstract = {Motivated by methods used in language modeling and grammar induction, we propose the use of pragmatic constraints and perplexity as criteria to filter the unlabeled data used to generate the semantic similarity model. We investigate unsupervised adaptation algorithms of the semantic-affective models proposed in [1, 2]. Affective ratings at the utterance level are generated based on an emotional lexicon, which in turn is created using a semantic (similarity) model estimated over raw, unlabeled text. The proposed adaptation method creates task-dependent semantic similarity models and task- dependent word/term affective ratings. The proposed adaptation algorithms are tested on anger/distress detection of transcribed speech data and sentiment analysis in tweets showing significant relative classification error reduction of up to 10%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vaz, Colin; Tsiartas, Andreas; Narayanan, Shrikanth
ENERGY-CONSTRAINED MINIMUM VARIANCE RESPONSE FILTER FOR ROBUST VOWEL SPECTRAL ESTIMATION Proceedings Article
In: Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pp. 6275–6279, IEEE, 2014.
@inproceedings{vaz_energy-constrained_2014,
title = {ENERGY-CONSTRAINED MINIMUM VARIANCE RESPONSE FILTER FOR ROBUST VOWEL SPECTRAL ESTIMATION},
author = {Colin Vaz and Andreas Tsiartas and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Energy-Constrained%20Minimum%20Variance%20Response%20Filter%20for%20Robust%20Vowel%20Spectral%20Estimation.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},
pages = {6275–6279},
publisher = {IEEE},
abstract = {We propose the energy-constrained minimum-variance response (ECMVR) filter to perform robust spectral estimation of vowels. We modify the distortionless constraint of the minimum-variance distortionless response (MVDR) filter and add an energy constraint to its formulation to mitigate the influence of noise on the speech spectrum. We test our ECMVR filter on a vowel classification task with different background noises at various SNR levels. Results show that vowels are classified more accurately in certain noises using MFCC and PLP features extracted from the ECMVR spectrum compared to using features extracted from the FFT and MVDR spectra. Index Terms: frequency estimation, MVDR, robust signal processing, spectral estimation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Chreston; Quek, Francis; Morency, Louis-Philippe
Search Strategies for Pattern Identification in Multimodal Data: Three Case Studies Proceedings Article
In: pp. 273–280, ACM Press, 2014, ISBN: 978-1-4503-2782-4.
@inproceedings{miller_search_2014,
title = {Search Strategies for Pattern Identification in Multimodal Data: Three Case Studies},
author = {Chreston Miller and Francis Quek and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Search%20Strategies%20for%20Pattern%20Identification%20in%20Multimodal%20Data%20Three%20Case%20Studies.pdf},
doi = {10.1145/2578726.2578761},
isbn = {978-1-4503-2782-4},
year = {2014},
date = {2014-04-01},
pages = {273–280},
publisher = {ACM Press},
abstract = {The analysis of multimodal data benefits from meaningful search and retrieval. This paper investigates strategies of searching multimodal data for event patterns. Through three longitudinal case studies, we observed researchers exploring and identifying event patterns in multimodal data. The events were extracted from different multimedia signal sources ranging from annotated video transcripts to interaction logs. Each researcher’s data has varying temporal characteristics (e.g., sparse, dense, or clustered) that posed several challenges for identifying relevant patterns. We identify unique search strategies and better understand the aspects that contributed to each.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Pynadath, David V.; Hill, Randall W.
UrbanSim: Using Social Simulation to Train for Stability Operations Book Section
In: Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm, 2014.
@incollection{mcalinden_urbansim_2014,
title = {UrbanSim: Using Social Simulation to Train for Stability Operations},
author = {Ryan McAlinden and David V. Pynadath and Randall W. Hill},
url = {http://ict.usc.edu/pubs/UrbanSim%20-%20Using%20Social%20Simulation%20to%20Train%20for%20Stability%20Operations.pdf},
year = {2014},
date = {2014-04-01},
booktitle = {Understanding Megacities with the Reconnaissance, Surveillance, and Intelligence Paradigm},
abstract = {As the United States reorients itself towards to a period of reduced military capacity and away from large‐footprint military engagements, there is an imperative to keep commanders and decision‐makers mentally sharp and prepared for the next ‘hot spot.’ One potential hot spot, megacities, presents a unique set of challenges due to their expansive, often interwoven ethnographic landscapes, and their overall lack of understanding by many western experts. Social simulation using agent‐based models is one approach for furthering our understanding of distant societies and their security implications, and for preparing leaders to engage these populations if and when the need arises. Over the past ten years, the field of social simulation has become decidedly cross‐discipline, including academics and practitioners from the fields of sociology, anthropology, psychology, artificial intelligence and engineering. This has led to an unparalleled advancement in social simulation theory and practice, and as new threats evolve to operate within dense but expansive urban environments, social simulation has a unique opportunity to shape our perspectives and develop knowledge that may otherwise be difficult to obtain. This article presents a social simulation‐based training application (UrbanSim) developed by the University of Southern California’s Institute for Creative Technologies (USC‐ICT) in partnership with the US Army’s School for Command Preparation (SCP). UrbanSim has been in‐use since 2009 to help Army commanders understand and train for missions in complex, uncertain environments. The discussion describes how the social simulation‐based training application was designed to develop and hone commanders' skills for conducting missions in environs with multifaceted social, ethnic and political fabrics. We present a few considerations when attempting to recreate dense, rapidly growing population centers, and how the integration of real‐world data into social simulation frameworks can add a level of realism and understanding not possible even a few years ago.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
de Melo, Celso M.; Paiva, Ana; Gratch, Jonathan
Emotion in Games Book Section
In: Agius, Harry; Angelides, Marios (Ed.): Handbook of Digital Games, Wiley-IEEE Press, New Jersey, 2014, ISBN: 978-1-118-32803-3.
@incollection{melo_emotion_2014,
title = {Emotion in Games},
author = {Celso M. de Melo and Ana Paiva and Jonathan Gratch},
editor = {Harry Agius and Marios Angelides},
url = {http://www.amazon.com/Handbook-Digital-Games-Marios-Angelides/dp/1118328035},
isbn = {978-1-118-32803-3},
year = {2014},
date = {2014-03-01},
booktitle = {Handbook of Digital Games},
publisher = {Wiley-IEEE Press},
address = {New Jersey},
abstract = {Growing interest on the study of emotion in the behavioral sciences has led to the development of several psychological theories of human emotion. These theories, in turn, inspired computer scientists to propose computational models that synthesize, express, recognize and interpret emotion. This cross-disciplinary research on emotion introduces new possibilities for digital games. Complementing techniques from the arts for drama and storytelling, these models can be used to drive believable non-player characters that experience properly-motivated emotions and express them appropriately at the right time; these theories can also help interpret the emotions the human player is experiencing and suggest adequate reactions in the game. This chapter reviews relevant psychological theories of emotion as well as computational models of emotion and discusses implications for games. We give special emphasis to appraisal theories of emotion, undeniably one of the most influential theoretical perspectives within computational research. In appraisal theories, emotions arise from cognitive appraisal of events (e.g., is this event conducive to my goals? Who is responsible for this event? Can I cope with this event?). According to the pattern of appraisals that occur, different emotions are experienced and expressed. Appraisal theories can, therefore, be used to synthesize emotions in games, which are then expressed in different ways. Complementary, reverse appraisal has been recently proposed as a theory for the interpretation of emotion. Accordingly, people are argued to retrieve, from emotion displays, information about how others’ are appraising the ongoing interaction, which then leads to inferences about the others’ intentions. Reverse appraisal can, thus, be used to infer how human players, from their emotion displays, are appraising the game experience and, from this information, what their intentions in the game are. This information can then be used to adjust game parameters or have non-player characters react to the player’s intentions and, thus, contribute to improve the player’s overall experience.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Azmandian, Mahdi; Yahata, Rhys; Bolas, Mark; Suma, Evan
An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments Proceedings Article
In: IEEE Virtual Reality 2014, pp. 65–66, 2014.
@inproceedings{azmandian_enhanced_2014,
title = {An Enhanced Steering Algorithm for Redirected Walking in Virtual Environments},
author = {Mahdi Azmandian and Rhys Yahata and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/An%20Enhanced%20Steering%20Algorithm%20for%20Redirected%20Walking%20in%20Virtual%20Environments.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {IEEE Virtual Reality 2014},
pages = {65–66},
abstract = {Redirected walking techniques enable natural locomotion through immersive virtual environments that are considerably larger than the available real world walking space. However, the most effective strategy for steering the user remains an open question, as most previously presented algorithms simply redirect toward the center of the physical space. In this work, we present a theoretical framework that plans a walking path through a virtual environment and calculates the parameters for combining translation, rotation, and curvature gains such that the user can traverse a series of defined waypoints efficiently based on a utility function. This function minimizes the number of overt reorientations to avoid introducing potential breaks in presence. A notable advantage of this approach is that it leverages knowledge of the layout of both the physical and virtual environments to enhance the steering strategy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sukthankar, Gita; Goldman, Robert P.; Geib, Christopher; Pynadath, David V.; Bui, Hung
Plan, Activity, and Intent Recognition: Theory and Practice Book
Morgan Kaufmann, 2014, ISBN: 0-12-398532-3.
@book{sukthankar_plan_2014,
title = {Plan, Activity, and Intent Recognition: Theory and Practice},
author = {Gita Sukthankar and Robert P. Goldman and Christopher Geib and David V. Pynadath and Hung Bui},
url = {http://www.amazon.com/Plan-Activity-Intent-Recognition-Practice/dp/0123985323/ref=sr_1_1?s=books&ie=UTF8&qid=1408747877&sr=1-1&keywords=Plan%2C+Activity%2C+and+Intent+Recognition%3A+Theory+and+Practice},
isbn = {0-12-398532-3},
year = {2014},
date = {2014-03-01},
publisher = {Morgan Kaufmann},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Visser, Thomas; Traum, David; DeVault, David; Akker, Rieks
A model for incremental grounding in spoken dialogue systems Journal Article
In: Journal on Multimodal User Interfaces, vol. 8, no. 1, pp. 61–73, 2014, ISSN: 1783-7677, 1783-8738.
@article{visser_model_2014,
title = {A model for incremental grounding in spoken dialogue systems},
author = {Thomas Visser and David Traum and David DeVault and Rieks Akker},
url = {http://ict.usc.edu/pubs/A%20Model%20for%20Incremental%20Grounding%20in%20Spoken%20Dialogue%20Systems.pdf},
doi = {10.1007/s12193-013-0147-7},
issn = {1783-7677, 1783-8738},
year = {2014},
date = {2014-03-01},
journal = {Journal on Multimodal User Interfaces},
volume = {8},
number = {1},
pages = {61–73},
abstract = {We present a computational model of incremental grounding, including state updates and action selection. The model is inspired by corpus-based examples of overlapping utterances of several sorts, including backchannels and completions. The model has also been partially implemented within a virtual human system that includes incremental understanding, and can be used to track grounding and provide overlapping verbal and non-verbal behaviors from a listener, before a speaker has completed her utterance.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Koh, Sukjin; Gordon, Andrew S; Wienberg, Christopher; Sood, Sara O; Morley, Stephanie; Burke, Deborah M
Stroke Experiences in Weblogs: A Feasibility Study of Sex Differences Journal Article
In: Journal of Medical Internet Research, vol. 16, no. 3, pp. e84, 2014, ISSN: 14388871.
@article{koh_stroke_2014,
title = {Stroke Experiences in Weblogs: A Feasibility Study of Sex Differences},
author = {Sukjin Koh and Andrew S Gordon and Christopher Wienberg and Sara O Sood and Stephanie Morley and Deborah M Burke},
url = {http://ict.usc.edu/pubs/Stroke%20Experiences%20in%20Weblogs%20-%20A%20Feasibility%20Study%20of%20Sex%20Differences.pdf},
doi = {10.2196/jmir.2838},
issn = {14388871},
year = {2014},
date = {2014-03-01},
journal = {Journal of Medical Internet Research},
volume = {16},
number = {3},
pages = {e84},
abstract = {Research on cerebral stroke symptoms using hospital records has reported that women experience more nontraditional symptoms of stroke (eg, mental status change, pain) than men do. This is an important issue because nontraditional symptoms may delay the decision to get medical assistance and increase the difficulty of correct diagnosis. In the present study, we investigate sex differences in the stroke experience as described in stories on weblogs.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Khademi, Mahmoud; Morency, Louis-Philippe
Relative Facial Action Unit Detection Proceedings Article
In: Proceedings of the Winter conference on Applications in Computer Vision, 2014.
@inproceedings{khademi_relative_2014,
title = {Relative Facial Action Unit Detection},
author = {Mahmoud Khademi and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Relative%20Facial%20Action%20Unit%20Detection.pdf},
year = {2014},
date = {2014-03-01},
booktitle = {Proceedings of the Winter conference on Applications in Computer Vision},
abstract = {This paper presents a subject-independent facial action unit (AU) detection method by introducing the concept of relative AU detection, for scenarios where the neutral face is not provided. We propose a new classification objective function which analyzes the temporal neighborhood of the current frame to decide if the expression recently increased, decreased or showed no change. This approach is a significant change from the conventional absolute method which decides about AU classification using the current frame, without an explicit comparison with its neighboring frames. Our proposed method improves robustness to individual differences such as face scale and shape, age-related wrinkles, and transitions among expressions (e.g., lower intensity of expressions). Our experiments on three publicly available datasets (Extended Cohn-Kanade (CK+), Bosphorus, and DISFA databases) show significant improvement of our approach over conventional absolute techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Liu, Jing; Busch, Jay; Yu, Xueming; Bolas, Mark; Debevec, Paul
Interpolating vertical parallax for an autostereoscopic three-dimensional projector array Journal Article
In: Journal of Electronic Imaging, vol. 23, no. 1, 2014, ISSN: 1017-9909.
@article{jones_interpolating_2014,
title = {Interpolating vertical parallax for an autostereoscopic three-dimensional projector array},
author = {Andrew Jones and Koki Nagano and Jing Liu and Jay Busch and Xueming Yu and Mark Bolas and Paul Debevec},
url = {http://electronicimaging.spiedigitallibrary.org/article.aspx?doi=10.1117/1.JEI.23.1.011005},
doi = {10.1117/1.JEI.23.1.011005},
issn = {1017-9909},
year = {2014},
date = {2014-03-01},
journal = {Journal of Electronic Imaging},
volume = {23},
number = {1},
abstract = {We present a technique for achieving tracked vertical parallax for multiple users using a variety of autostereoscopic projector array setups, including front- and rear-projection and curved display surfaces. This hybrid parallax approach allows for immediate horizontal parallax as viewers move left and right and tracked parallax as they move up and down, allowing cues such as three-dimensional (3-D) perspective and eye contact to be conveyed faithfully. We use a low-cost RGB-depth sensor to simultaneously track multiple viewer head positions in 3-D space, and we interactively update the imagery sent to the array so that imagery directed to each viewer appears from a consistent and correct vertical perspective. Unlike previous work, we do not assume that the imagery sent to each projector in the array is rendered from a single vertical perspective. This lets us apply hybrid parallax to displays where a single projector forms parts of multiple viewers’ imagery. Thus, each individual projected image is rendered with multiple centers of projection, and might show an object from above on the left and from below on the right. We demonstrate this technique using a dense horizontal array of pico-projectors aimed into an anisotropic vertical diffusion screen, yielding 1.5 deg angular resolution over 110 deg field of view. To create a seamless viewing experience for multiple viewers, we smoothly interpolate the set of viewer heights and distances on a per-vertex basis across the array’s field of view, reducing image distortion, cross talk, and artifacts from tracking errors.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.
Virtual Reality and Leadership Development Book Section
In: Using Experience to Develop Leadership Talent: How Organizations Leverage On-The-Job Development, pp. 286–312, John Wiley & Sons, Inc., 2014, ISBN: 978-1-118-76783-2.
@incollection{hill_virtual_2014,
title = {Virtual Reality and Leadership Development},
author = {Randall W. Hill},
url = {http://www.amazon.com/dp/1118767837/ref=cm_sw_su_dp},
isbn = {978-1-118-76783-2},
year = {2014},
date = {2014-03-01},
booktitle = {Using Experience to Develop Leadership Talent: How Organizations Leverage On-The-Job Development},
pages = {286–312},
publisher = {John Wiley & Sons, Inc.},
series = {J-B SIOP Professional Practice Series (Book 1)},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Shapiro, Ari; Feng, Andrew; Wang, Ruizhe; Medioni, Gerard; Bolas, Mark; Suma, Evan A.
Automatic Acquisition and Animation of Virtual Avatars Proceedings Article
In: Virtual Reality (VR), 2014 iEEE, pp. 185–186, IEEE, Minneapolis, Minnesota, 2014, ISBN: 978-1-4799-2871-2.
@inproceedings{shapiro_automatic_2014,
title = {Automatic Acquisition and Animation of Virtual Avatars},
author = {Ari Shapiro and Andrew Feng and Ruizhe Wang and Gerard Medioni and Mark Bolas and Evan A. Suma},
url = {http://ict.usc.edu/pubs/Automatic%20acquisition%20and%20animation%20of%20virtual%20avatars.pdf},
doi = {10.1109/VR.2014.6802113},
isbn = {978-1-4799-2871-2},
year = {2014},
date = {2014-03-01},
booktitle = {Virtual Reality (VR), 2014 iEEE},
pages = {185–186},
publisher = {IEEE},
address = {Minneapolis, Minnesota},
abstract = {The USC Institute for Creative Technologies will demonstrate a pipline for automatic reconstruction and animation of lifelike 3D avatars acquired by rotating the user's body in front of a single Microsoft Kinect sensor. Based on a fusion of state-of-the-art techniques in computer vision, graphics, and animation, this approach can produce a fully rigged character model suitable for real-time virtual environments in less than four minutes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Shoemark, Philippa; Morency, Louis-Philippe
Toward Crowdsourcing Micro-Level Behavior Annotations - The Challenges of Interface, Training, and Generalization Proceedings Article
In: Proceedings of the 19th international conference on Intelligent User Interfaces, ACM, Haifa, Israel, 2014.
@inproceedings{park_toward_2014,
title = {Toward Crowdsourcing Micro-Level Behavior Annotations - The Challenges of Interface, Training, and Generalization},
author = {Sunghyun Park and Philippa Shoemark and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Toward%20Crowdsourcing%20Micro-Level%20Behavior%20Annotations%20-%20The%20Challenges%20of%20Interface,%20Training,%20and%20Generalization.pdf},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of the 19th international conference on Intelligent User Interfaces},
publisher = {ACM},
address = {Haifa, Israel},
abstract = {Research that involves human behavior analysis usually requires laborious and costly efforts for obtaining micro-level behavior annotations on a large video corpus. With the emerging paradigm of crowdsourcing however, these efforts can be considerably reduced. We first present OCTAB (Online Crowdsourcing Tool for Annotations of Behaviors), a web-based annotation tool that allows precise and convenient behavior annotations in videos, directly portable to popular crowdsourcing platforms. As part of OCTAB, we introduce a training module with specialized visualizations. The training module’s design was inspired by an observational study of local experienced coders, and it enables an iterative procedure for effectively training crowd workers online. Finally, we present an extensive set of experiments that evaluates the feasibility of our crowdsourcing approach for obtaining micro-level behavior annotations in videos, showing the reliability improvement in annotation accuracy when properly training online crowd workers. We also show the generalization of our training approach to a new independent video corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Gowrisankar, Rasiga; Richmond, Todd; Shapiro, Ari; Xu, Yuyu; Feng, Andrew
Mobile Personal Healthcare Mediated by Virtual Humans Proceedings Article
In: Proceedings of the companion publication of the 19th international conference on Intelligent User Interfaces, pp. 21–24, ACM Press, Haifa, Israel, 2014, ISBN: 978-1-4503-2729-9.
@inproceedings{leuski_mobile_2014,
title = {Mobile Personal Healthcare Mediated by Virtual Humans},
author = {Anton Leuski and Rasiga Gowrisankar and Todd Richmond and Ari Shapiro and Yuyu Xu and Andrew Feng},
url = {http://dl.acm.org/citation.cfm?doid=2559184.2559200},
doi = {10.1145/2559184.2559200},
isbn = {978-1-4503-2729-9},
year = {2014},
date = {2014-02-01},
booktitle = {Proceedings of the companion publication of the 19th international conference on Intelligent User Interfaces},
pages = {21–24},
publisher = {ACM Press},
address = {Haifa, Israel},
abstract = {We demonstrate Ally—-a prototype interface for a consumer–level medical diagnostic device. It is an interactive virtual character—-Virtual Human (VH)—-that listens to user's concern, collects and processes sensor data, offers advice, guides the user through a self-administered medical tests, and answers the user's questions. The primary focus of this demo is on the VH, we describe and demonstrate the technologies for language analysis, dialogue management, response generation and presentation. The sensing and medical decision making components are simulated in the current system, but possible applications and extensions are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
A semi-automated evaluation metric for dialogue model coherence Proceedings Article
In: Fifth International Workshop on Spoken Dialogue systems, pp. 141–150, 2014.
@inproceedings{gandhe_semi-automated_2014,
title = {A semi-automated evaluation metric for dialogue model coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/A%20semi-automated%20evaluation%20metric%20for%20dialogue%20model%20coherence.pdf},
year = {2014},
date = {2014-01-01},
booktitle = {Fifth International Workshop on Spoken Dialogue systems},
pages = {141–150},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.