Publications
Search
Nazarian, Angela; Nouri, Elnaz; Traum, David
Initiative Patterns in Dialogue Genres Inproceedings
In: Proceedings of Semdial 2014, Edinburgh, UK, 2014.
@inproceedings{nazarian_initiative_2014,
title = {Initiative Patterns in Dialogue Genres},
author = {Angela Nazarian and Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Patterns%20in%20Dialogue%20Genres.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of Semdial 2014},
address = {Edinburgh, UK},
abstract = {One of the ways of distinguishing different dialogue genres is the differences in patterns of interactions between the participants. Morbini et al (2013) informally define dialogue genres on the basis of features like user vs system initiative, amongst other criteria. In this paper, we apply the multi-label initiative annotation scheme and related features from (Nouri and Traum, 2014) to a set of dialogue corpora from different domains. In our initial study, we examine two questionanswering domains, a “slot-filling” service application domain, and several human-human negotiation domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Baltrušaitis, Tadas; Robinson, Peter; Morency, Louis-Philippe
Continuous Conditional Neural Fields for Structured Regression Incollection
In: Computer Vision–ECCV 2014, pp. 593–608, Springer, 2014.
@incollection{baltrusaitis_continuous_2014,
title = {Continuous Conditional Neural Fields for Structured Regression},
author = {Tadas Baltrušaitis and Peter Robinson and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Continuous%20Conditional%20Neural%20Fields%20for%20Structured%20Regression.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Computer Vision–ECCV 2014},
pages = {593--608},
publisher = {Springer},
abstract = {An increasing number of computer vision and pattern recognition problems require structured regression techniques. Problems like human pose estimation, unsegmented action recognition, emotion prediction and facial landmark detection have temporal or spatial output dependencies that regular regression techniques do not capture. In this paper we present continuous conditional neural fields (CCNF) textbackslashtextbackslashvphantom a novel structured regression model that can learn non-linear input-output dependencies, and model temporal and spatial output relationships of vary- ing length sequences. We propose two instances of our CCNF framework: Chain-CCNF for time series modelling, and Grid-CCNF for spatial relationship modelling. We evaluate our model on five public datasets spanning three different regression problems: facial landmark detection in the wild, emotion prediction in music and facial action unit recognition. Our CCNF model demonstrates state-of-the-art performance on all of the datasets used.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Feng, Andrew; Shapiro, Ari; Lhommet, Margaux; Marsella, Stacy
Embodied Autonomous Agents Incollection
In: Handbook of Virtual Environments: Design, Implementation, and Applications, pp. 335–352, 2014.
@incollection{feng_embodied_2014,
title = {Embodied Autonomous Agents},
author = {Andrew Feng and Ari Shapiro and Margaux Lhommet and Stacy Marsella},
url = {http://books.google.com/books?hl=en&lr=&id=7zzSBQAAQBAJ&oi=fnd&pg=PP1&dq=+Handbook+of+Virtual+Environments&ots=Vx3ia0S2Uu&sig=LaVbSdoG3FahlbVYbuCxLmKgFIA#v=onepage&q=Handbook%20of%20Virtual%20Environments&f=false},
year = {2014},
date = {2014-09-01},
booktitle = {Handbook of Virtual Environments: Design, Implementation, and Applications},
pages = {335--352},
abstract = {Since the last decade, virtual environments have been extensively used for a wide range of application, from training systems to video games. Virtual humans are animated characters that are designed to populate these environments and to interact with the objects of the world as well as with the user. A virtual agent must perceive the world in which it exists, reason about those perceptions, and decide on how to act on them in pursuit of its own agenda.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Yang, Zhaojun; Narayanan, Shrikanth
Analysis of Emotional Effect on Speech-Body Gesture Interplay Inproceedings
In: Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{yang_analysis_2014,
title = {Analysis of Emotional Effect on Speech-Body Gesture Interplay},
author = {Zhaojun Yang and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Analysis%20of%20Emotional%20Effect%20on%20Speech-Body%20Gesture%20Interplay.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {In interpersonal interactions, speech and body gesture channels are internally coordinated towards conveying communicative intentions. The speech-gesture relationship is influenced by the internal emotion state underlying the communication. In this paper, we focus on uncovering the emotional effect on the interrelation between speech and body gestures. We investigate acoustic features describing speech prosody (pitch and energy) and vocal tract configuration (MFCCs), as well as three types of body gestures, viz., head motion, lower and upper body motions. We employ mutual information to measure the coordination between the two communicative channels, and analyze the quantified speech-gesture link with respect to distinct levels of emotion attributes, i.e., activation and valence. The results reveal that the speech-gesture coupling is generally tighter for low-level activation and high-level valence, compared to high-level activation and low-level valence. We further propose a framework for modeling the dynamics of speech-gesture interaction. Experimental studies suggest that such quantified coupling representations can well discriminate different levels of activation and valence, reinforcing that emotions are encoded in the dynamics of the multimodal link. We also verify that the structures of the coupling representations are emotiondependent using subspace-based analysis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Segbroeck, Maarten Van; Travadi, Ruchir; Vaz, Colin; Kim, Jangwon; Black, Matthew P.; Potamianos, Alexandros; Narayanan, Shrikanth S.
Classification of Cognitive Load from Speech using an i-vector Framework Inproceedings
In: Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{van_segbroeck_classification_2014,
title = {Classification of Cognitive Load from Speech using an i-vector Framework},
author = {Maarten Van Segbroeck and Ruchir Travadi and Colin Vaz and Jangwon Kim and Matthew P. Black and Alexandros Potamianos and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Classification%20of%20Cognitive%20Load%20from%20Speech%20using%20an%20i-vector%20Framework.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {The goal in this work is to automatically classify speakers’ level of cognitive load (low, medium, high) from a standard battery of reading tasks requiring varying levels of working memory. This is a challenging machine learning problem because of the inherent difficulty in defining/measuring cognitive load and due to intra-/inter-speaker differences in how their effects are manifested in behavioral cues. We experimented with a number of static and dynamic features extracted directly from the audio signal (prosodic, spectral, voice quality) and from automatic speech recognition hypotheses (lexical information, speaking rate). Our approach to classification addressed the wide variability and heterogeneity through speaker normalization and by adopting an i-vector framework that affords a systematic way to factorize the multiple sources of variability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shivakumar, Prashanth Gurunath; Potamianos, Alexandros; Lee, Sungbok; Narayanan, Shrikanth
Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling Journal Article
In: Proceedings of Workshop on Child Computer Interaction, 2014.
@article{shivakumar_improving_2014,
title = {Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling},
author = {Prashanth Gurunath Shivakumar and Alexandros Potamianos and Sungbok Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Improving%20Speech%20Recognition%20for%20Children%20using%20Acoustic%20Adaptation%20and%20Pronunciation%20Modeling.pdf},
year = {2014},
date = {2014-09-01},
journal = {Proceedings of Workshop on Child Computer Interaction},
abstract = {Developing a robust Automatic Speech Recognition (ASR) system for children is a challenging task because of increased variability in acoustic and linguistic correlates as function of young age. The acoustic variability is mainly due to the developmental changes associated with vocal tract growth. On the linguistic side, the variability is associated withlimited knowledge of vocabulary, pronunciations and other linguistic constructs. This paper presents a preliminary study towards better acoustic modeling, pronunciation modeling and front-end processing for children’s speech. Results are presented as a function of age. Speaker adaptation significantly reduces mismatch and variability improving recognition results across age groups. In addition, introduction of pronunciation modeling shows promising performance improvements.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Vaz, Colin; Ramanarayanan, Vikram; Narayanan, Shrikanth
Joint Filtering and Factorization for Recovering Latent Structure from Noisy Speech Data Inproceedings
In: Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{vaz_joint_2014,
title = {Joint Filtering and Factorization for Recovering Latent Structure from Noisy Speech Data},
author = {Colin Vaz and Vikram Ramanarayanan and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Joint%20Filtering%20and%20Factorization%20for%20Recovering%20Latent%20Structure%20from%20Noisy%20Speech%20Data.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {We propose a joint filtering and factorization algorithm to recover latent structure from noisy speech. We incorporate the minimum variance distortionless response (MVDR) formulation within the non-negative matrix factorization (NMF) framework to derive a single, unified cost function for both filtering and factorization. Minimizing this cost function jointly optimizes three quantities – a filter that removes noise, a basis matrix that captures latent structure in the data, and an activation matrix that captures how the elements in the basis matrix can be linearly combined to reconstruct input data. Results show that the proposed algorithm recovers the speech basis matrix from noisy speech significantly better than NMF alone or Wiener filtering followed by NMF. Furthermore, PESQ scores show that our algorithm is a viable choice for speech denoising.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Che-Wei; Xiao, Bo; Georgiou, Panayiotis G.; Narayanan, Shrikanth S.
Unsupervised Speaker Diarization Using Riemannian Manifold Clustering Inproceedings
In: Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{huang_unsupervised_2014,
title = {Unsupervised Speaker Diarization Using Riemannian Manifold Clustering},
author = {Che-Wei Huang and Bo Xiao and Panayiotis G. Georgiou and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Diarization%20Using%20Riemannian%20Manifold%20Clustering.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {We address the problem of speaker clustering for robust unsupervised speaker diarization. We model each speakerhomogeneous segment as one single full multivariate Gaussian probability density function (pdf) and take into consideration the Riemannian property of Gaussian pdfs. By assuming that segments from different speakers lie on different (possibly intersected) sub-manifolds of the manifold of Gaussian pdfs, we formulate the original problem as a Riemannian manifold clustering problem. To apply the computationally simple Riemannian locally linear embedding (LLE) algorithm, we impose a constraint on the length of each segment so as to ensure the fitness of single-Gaussian modeling and to increase the chance that all k-nearest neighbors of a pdf are from the same submanifold (speaker). Experiments on the microphone-recorded conversational interviews from NIST 2010 speaker recognition evaluation set demonstrate promising results of less than 1% DER.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gibson, James; Segbroeck, Maarten Van; Narayanan, Shrikanth
Comparing Time-Frequency Representations for Directional Derivative Features Inproceedings
In: Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
@inproceedings{gibson_comparing_2014,
title = {Comparing Time-Frequency Representations for Directional Derivative Features},
author = {James Gibson and Maarten Van Segbroeck and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Comparing%20Time-Frequency%20Representations%20for%20Directional%20Derivative%20Features.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {We compare the performance of Directional Derivatives features for automatic speech recognition when extracted from different time-frequency representations. Specifically, we use the short-time Fourier transform, Mel-frequency, and Gammatone spectrograms as a base from which we extract spectrotemporal modulations. We then assess the noise robustness of each representation with varied number of frequency bins and dynamic range compression schemes for both word and phone recognition. We find that the choice of dynamic range compression approach has the most significant impact on recognition performance. Whereas, the performance differences between perceptually motivated filter-banks are minimal in the proposed framework. Furthermore, this work presents significant gains in speech recognition accuracy for low SNRs over MFCCs, GFCCs, and Directional Derivatives extracted from the log-Mel spectrogram.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Travadi, Ruchir; Segbroeck, Maarten Van; Narayanan, Shrikanth
Modified-prior i-Vector Estimation for Language Identification of Short Duration Utterances Inproceedings
In: Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association, pp. 3037–3041, Singapore, 2014.
@inproceedings{travadi_modified-prior_2014,
title = {Modified-prior i-Vector Estimation for Language Identification of Short Duration Utterances},
author = {Ruchir Travadi and Maarten Van Segbroeck and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Modified-prior%20i-Vector%20Estimation%20for%20Language%20Identification%20of%20Short%20Duration%20Utterances.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association},
pages = {3037--3041},
address = {Singapore},
abstract = {In this paper, we address the problem of Language Identification (LID) on short duration segments. Current state-of-the-art LID systems typically employ total variability i-Vector modeling for obtaining fixed length representation of utterances. However, when the utterances are short, only a small amount of data is available, and the estimated i-Vector representation will consequently exhibit significant variability, making the identification problem challenging. In this paper, we propose novel techniques to modify the standard normal prior distribution of the i-Vectors, to obtain a more discriminative i-Vector extraction given the small amount of available utterance data. Improved performance was observed by using the proposed i-Vector estimation techniques on short segments of the DARPA RATS corpora, with lengths as small as 3 seconds.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dehghani, M.; Khooshabeh, P.; Nazarian, A.; Gratch, J.
The Subtlety of Sound: Accent as a Marker for Culture Journal Article
In: Journal of Language and Social Psychology, 2014, ISSN: 0261-927X, 1552-6526.
@article{dehghani_subtlety_2014,
title = {The Subtlety of Sound: Accent as a Marker for Culture},
author = {M. Dehghani and P. Khooshabeh and A. Nazarian and J. Gratch},
url = {http://jls.sagepub.com/cgi/doi/10.1177/0261927X14551095},
doi = {10.1177/0261927X14551095},
issn = {0261-927X, 1552-6526},
year = {2014},
date = {2014-09-01},
journal = {Journal of Language and Social Psychology},
abstract = {Aspects of language, such as accent, play a crucial role in the formation and categorization of one’s cultural identity. Recent work on accent emphasizes the role of accent in person perception and social categorization, demonstrating that accent also serves as a meaningful indicator of an ethnic category. In this article, we investigate whether the accent of an interaction partner, as a marker for culture, can induce cultural frame-shifts in biculturals. We report the results of three experiments, performed among bicultural and monocultural individuals, in which we test the above hypothesis. Our results demonstrate that accent alone can affect people’s cognition.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Segbroeck, Maarten Van; Travadi, Ruchir; Narayanan, Shrikanth S.
UBM Fused Total Variability Modeling for Language Identification Inproceedings
In: Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association, INTERSPEECH, Singapore, 2014.
@inproceedings{van_segbroeck_ubm_2014,
title = {UBM Fused Total Variability Modeling for Language Identification},
author = {Maarten Van Segbroeck and Ruchir Travadi and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/UBM%20Fused%20Total%20Variability%20Modeling%20for%20Language%20Identification.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of the Fifteenth Annual Conference of the International Speech Communication Association},
publisher = {INTERSPEECH},
address = {Singapore},
abstract = {This paper proposes Universal Background Model (UBM) fusion in the framework of total variability or i-vector modeling with the application to language identification (LID). The total variability subspace which is typically exploited to discriminate between the language classes of different speech recordings, is trained by combining the normalized Baum-Welch statistics of multiple UBMs. When the UBMs model a diverse set of feature representations, the method yields an i-vector representation which is more discriminant between the classes of interest. This approach is particularly useful when applied to shortduration utterances, and is a computationally less complex alternative to performance boosting as compared to system level fusion. We assess the performance of UBM fused total variability modeling on the task of robust language identification on short-duration utterances, as part of Phase-III of the DARPA RATS (Robust Automatic Transcription of Speech) program.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
The Context of Military Environments: Social and Organizational Factors Technical Report
National Academies Press Washington, DC, 2014.
@techreport{noauthor_context_2014,
title = {The Context of Military Environments: Social and Organizational Factors},
url = {http://sites.nationalacademies.org/DBASSE/BBCSS/CurrentProjects/DBASSE_080746},
year = {2014},
date = {2014-09-01},
address = {Washington, DC},
institution = {National Academies Press},
abstract = {The U.S. Army faces a variety of challenges to maintain a ready and capable force into the future. Its missions are diverse, following a continuum from peace to war that includes combat and counterinsurgency operations as well as negotiation, reconstruction, and stability operations that require a variety of personnel and skill sets to execute. Missions often demand rapid decision making and coordination with others in novel ways, so that personnel are not simply following a specific set of tactical orders but, rather, carrying out mission command through an understanding of broader strategic goals in order to develop and choose among courses of action. Like any workforce, the Army is diverse in terms of demographic characteristics, such as gender and race, with a commitment of its leadership to ensure equal opportunities across all demographic parties. With these challenges comes the urgent need to better understand how contextual factors influence soldier and small unit behavior and mission performance.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Zadeh, AmirAli B.; Sagae, Kenji; Morency, Louis Philippe
Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words Inproceedings
In: Intelligent Virtual Agents, pp. 496–503, Springer, Boston, MA, 2014.
@inproceedings{zadeh_towards_2014,
title = {Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words},
author = {AmirAli B. Zadeh and Kenji Sagae and Louis Philippe Morency},
url = {http://ict.usc.edu/pubs/Towards%20Learning%20Nonverbal%20Identities%20from%20the%20Web%20-%20Automatically%20Identifying%20Visually-Accentuated%20Words.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {496--503},
publisher = {Springer},
address = {Boston, MA},
abstract = {This paper presents a novel long-term idea to learn automatically from online multimedia content, such as videos from YouTube channels, a portfolio of nonverbal identities in the form of computational representation of prototypical gestures of a speaker. As a first step towards this vision, this paper presents proof-of-concept experiments to automatically identify visually accentuated words from a collection of online videos of the same person. The experimental results are promising with many accentuated words automatically identified and specific head motion patterns were associated with these words.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Yuyu; Pelachaud, Catherine; Marsella, Stacy
Compound Gesture Generation: A Model Based on Ideational Units Inproceedings
In: Intelligent Virtual Agents, pp. 477–491, Springer, Boston, MA, 2014.
@inproceedings{xu_compound_2014,
title = {Compound Gesture Generation: A Model Based on Ideational Units},
author = {Yuyu Xu and Catherine Pelachaud and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Compound%20Gesture%20Generation%20-%20A%20Model%20Based%20on%20Ideational%20Units.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {477--491},
publisher = {Springer},
address = {Boston, MA},
abstract = {This work presents a hierarchical framework that generates continuous gesture animation performance for virtual characters. As opposed to approaches that focus more on realizing individual gesture, the focus of this work is on the relation between gestures as part of an overall gesture performance. Following Calbris’ work [3], our approach is to structure the performance around ideational units and determine gestural features within and across these ideational units. Furthermore, we use Calbris’ work on the relation between form and meaning in gesture to help inform how individual gesture’s expressivity is manipulated. Our framework takes in high level communicative function descriptions, generates behavior descriptions and realizes them using our character animation engine. We define the specifications for these different levels of descriptions. Finally, we show the general results as well as experiments illustrating the impacts of the key features.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lhommet, Margot; Marsella, Stacy
Metaphoric Gestures: Towards Grounded Mental Spaces Inproceedings
In: Intelligent Virtual Agents, pp. 264–274, Springer, Boston, MA, 2014.
@inproceedings{lhommet_metaphoric_2014,
title = {Metaphoric Gestures: Towards Grounded Mental Spaces},
author = {Margot Lhommet and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Metaphoric%20Gestures%20-%20Towards%20Grounded%20Mental%20Spaces.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {264--274},
publisher = {Springer},
address = {Boston, MA},
abstract = {Gestures are related to the mental states and unfolding processes of thought, reasoning and verbal language production. This is especially apparent in the case of metaphors and metaphoric gestures. For example, talking about the importance of an idea by calling it a big idea and gesturing to indicate that large size is a manifestation of the use of metaphors in language and gesture. We propose a computational model of the influence of conceptual metaphors on gestures that maps from mental state representations of ideas to their expression in concrete, physical metaphoric gestures. This model relies on conceptual primary metaphors to map the abstract elements of the mental space to concrete physical elements that can be conveyed with gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; King, Aisha; Morency, Louis-Philippe
It’s only a computer: Virtual humans increase willingness to disclose Journal Article
In: Computers in Human Behavior, vol. 37, pp. 94–100, 2014, ISSN: 07475632.
@article{lucas_its_2014,
title = {It’s only a computer: Virtual humans increase willingness to disclose},
author = {Gale M. Lucas and Jonathan Gratch and Aisha King and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/It%27s%20Only%20a%20Computer%20-%20Virtual%20Humans%20Increase%20Willingness%20to%20Disclose.pdf},
doi = {10.1016/j.chb.2014.04.043},
issn = {07475632},
year = {2014},
date = {2014-08-01},
journal = {Computers in Human Behavior},
volume = {37},
pages = {94--100},
abstract = {Research has begun to explore the use of virtual humans (VHs) in clinical interviews (Bickmore, Gruber, & Picard, 2005). When designed as supportive and ‘‘safe’’ interaction partners, VHs may improve such screenings by increasing willingness to disclose information (Gratch, Wang, Gerten, & Fast, 2007). In health and mental health contexts, patients are often reluctant to respond honestly. In the context of health-screening interviews, we report a study in which participants interacted with a VH interviewer and were led to believe that the VH was controlled by either humans or automation. As predicted, compared to those who believed they were interacting with a human operator, participants who believed they were interacting with a computer reported lower fear of self-disclosure, lower impression management, displayed their sadness more intensely, and were rated by observers as more willing to disclose. These results suggest that automated VHs can help overcome a significant barrier to obtaining truthful patient information.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lubetich, Shannon; Sagae, Kenji
Data-driven Measurement of Child Language Development with Simple Syntactic Templates Inproceedings
In: Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pp. 2151 – 2160, Dublin, Ireland, 2014.
@inproceedings{lubetich_data-driven_2014,
title = {Data-driven Measurement of Child Language Development with Simple Syntactic Templates},
author = {Shannon Lubetich and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Data-driven%20Measurement%20of%20Child%20Language%20Development%20with%20Simple%20Syntactic%20Templates.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers},
pages = {2151 -- 2160},
address = {Dublin, Ireland},
abstract = {When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.; Sagae, Kenji; Demski, Abram
Distributed Vector Representations of Words in the Sigma Cognitive Architecture Inproceedings
In: Proceedings of the 7th Conference on Artificial General Intelligence 2014, Québec City, Canada, 2014.
@inproceedings{ustun_distributed_2014,
title = {Distributed Vector Representations of Words in the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul S. Rosenbloom and Kenji Sagae and Abram Demski},
url = {http://ict.usc.edu/pubs/Distributed%20Vector%20Representations%20of%20Words%20in%20the%20Sigma%20Cognitive%20Architecture.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Conference on Artificial General Intelligence 2014},
address = {Québec City, Canada},
abstract = {Recently reported results with distributed-vector word representations in natural language processing make them appealing for incorporation into a general cognitive architecture like Sigma. This paper describes a new algorithm for learning such word representations from large, shallow information resources, and how this algorithm can be implemented via small modifications to Sigma. The effectiveness and speed of the algorithm are evaluated via a comparison of an external simulation of it with state-of-the-art algorithms. The results from more limited experiments with Sigma are also promising, but more work is required for it to reach the effectiveness and speed of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Traum, David; Marsella, Stacy; Morency, Louis-Philippe; Shapiro, Ari; Gratch, Jonathan
A Shared, Modular Architecture for Developing Virtual Humans Inproceedings
In: Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014, pp. 4–7, Boston, MA, 2014.
@inproceedings{hartholt_shared_2014,
title = {A Shared, Modular Architecture for Developing Virtual Humans},
author = {Arno Hartholt and David Traum and Stacy Marsella and Louis-Philippe Morency and Ari Shapiro and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Shared%20Modular%20Architecture%20for%20Developing%20Virtual%20Humans.pdf},
doi = {10.2390/biecoll-wasiva2014-02},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014},
pages = {4--7},
address = {Boston, MA},
abstract = {Realizing the full potential of intelligent virtual agents requires compelling characters that can engage users in meaningful and realistic social interactions, and an ability to develop these characters effectively and efficiently. Advances are needed in individual capabilities, but perhaps more importantly, fundamental questions remain as to how best to integrate these capabilities into a single framework that allows us to efficiently create characters that can engage users in meaningful and realistic social interactions. This integration requires in-depth, inter-disciplinary understanding few individuals, or even teams of individuals, possess.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.