Publications
Search
Jan, Dusan; Herrera, David; Martinovski, Bilyana; Novick, David; Traum, David
A Computational Model of Culture-Specific Conversational Behavior Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Paris, France, 2007.
@inproceedings{jan_computational_2007,
title = {A Computational Model of Culture-Specific Conversational Behavior},
author = {Dusan Jan and David Herrera and Bilyana Martinovski and David Novick and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Culture-Specific%20Conversational%20Behavior.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Paris, France},
abstract = {This paper presents a model for simulating cultural differences in the conversational behavior of virtual agents. The model provides parameters for differences in proxemics, gaze and overlap in turn taking.We present a review of literature on these factors and show results of a study where native speakers of North American English, Mexican Spanish and Arabic were asked to rate the realism of the simulations generated based on different cultural parameters with respect to their culture.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Yeh, Shih-Ching; Stewart, Jill; McLaughlin, Margaret; Parsons, Thomas D.; Winstein, Carolee J.; Rizzo, Albert
Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training Proceedings Article
In: Lecture Notes in Computer Science, pp. 378–387, 2007, 2007.
@inproceedings{yeh_evaluation_2007,
title = {Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training},
author = {Shih-Ching Yeh and Jill Stewart and Margaret McLaughlin and Thomas D. Parsons and Carolee J. Winstein and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20Approach%20for%20Post-stroke%20Rehabilitation%20Via%20Virtual%20Reality%20Aided%20Motor%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {378–387},
address = {2007},
abstract = {This paper introduces an evaluation approach that was applied to clinical data collected from a virtual reality aided motor training program for post-stroke rehabilitation. The goal of the proposed evaluation approach is to diagnose the patient's current status (performance) and detect change in status over time (progression). Three measures, performance time, movement efficiency, and movement speed, were defined to represent kinematic features of reaching. 3-D performance maps and progression maps were generated based on each kinematic measure to visualize a single patient's behavior. The case study revealed the patient's current status as to direction and range of upper extremity reach ability, composed of pitch, yaw and arm length. Further, progression was found and visualized quantitatively over a series of practice sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Robertson, R. Kevin; Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Robertson, Wendy T.; Wilson, Susan; Hall, Colin D.
Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL) Journal Article
In: Journal of Clinical Neuroscience, vol. 14, pp. 416+423, 2007.
@article{robertson_assessing_2007,
title = {Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL)},
author = {R. Kevin Robertson and Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Wendy T. Robertson and Susan Wilson and Colin D. Hall},
url = {http://ict.usc.edu/pubs/Assessing%20health-related%20quality%20of%20life%20in%20NeuroAIDS-%20some%20psychometric%20properties%20of%20the%20Neurological%20Quality%20of%20Life%20Questionnaire%20(NeuroQOL).pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Clinical Neuroscience},
volume = {14},
pages = {416+423},
abstract = {Several studies were undertaken to assess the psychometric properties (reliability and initial convergent and discriminant construct validity) of the Neurological Quality of Life Questionnaire (NeuroQOL). The NeuroQOL contains 114 items answered in self report Likert format, with higher scores reflecting better quality of life. Study one compared the questionnaire with existing quality of life measures (Symptom Distress Scale, Sickness Impact Profile) and disease stage, psychological, neuropsychological and neurological measures, and a significant correlation was also fount with each domain. The internal consistency reliability (alpha = 0.96), split half reliability (r12 = 0.97), and test-retest reliability (coefficients were 0.78 for 6 months and 0.67 for one year intervals between test and retest) were all found to high and adequately stable. Overall, these results indicate acceptable reliability and initial construct valididty for the NeuroQOL.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Macedonio, Mary F.; Parsons, Thomas D.; Digiuseppe, Raymond A.; Wiederhold, Brenda K.; Rizzo, Albert
Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality Journal Article
In: CyberPsychology and Behavior, vol. 10, no. 4, pp. 508–515, 2007.
@article{macedonio_immersiveness_2007,
title = {Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality},
author = {Mary F. Macedonio and Thomas D. Parsons and Raymond A. Digiuseppe and Brenda K. Wiederhold and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Immersiveness%20and%20Physiological%20Arousal%20within%20Panoramic%20Video-Based%20Virtual%20Reality.pdf},
year = {2007},
date = {2007-01-01},
journal = {CyberPsychology and Behavior},
volume = {10},
number = {4},
pages = {508–515},
abstract = {In this paper, we discuss findings from a study that used panoramic video-based virtual environments (PVVEs) to induce self-reported anger. The study assessed "immersiveness" and physiological correlates of anger arousal (i.e., heart rate, blood pressure, galvanic skin response [GSR], respiration, and skin temperature). Results indicate that over time, panoramic video-based virtual scenarios can be, at the very least, physiologically arousing. Further, it can be affirmed from the results that hypnotizability, as defined by the applied measures, interacts with group on physiological arousal measures. Hence, physiological arousal appeared to be moderated by participant hypnotizability and absorption levels.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Busso, Carlos; Narayanan, Shrikanth
Interplay between linguistic and affective goals in facial expression during emotional utterances Proceedings Article
In: Proceedings of the 7th International Seminar on Speech Production, pp. 549–556, Ubatuba, Brazil, 2006.
@inproceedings{busso_interplay_2006,
title = {Interplay between linguistic and affective goals in facial expression during emotional utterances},
author = {Carlos Busso and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Interplay%20between%20linguistic%20and%20affective%20goals%20in%20facial%20expression%20during%20emotional%20utterances.pdf},
year = {2006},
date = {2006-12-01},
booktitle = {Proceedings of the 7th International Seminar on Speech Production},
pages = {549–556},
address = {Ubatuba, Brazil},
abstract = {Communicative goals are simultaneously expressed through gestures and speech to convey messages enriched with valuable verbal and non-verbal clues. This paper analyzes and quantifies how linguistic and affective goals are reflected in facial expressions. Using a database recorded from an actress with markers attached to her face, the facial features during emotional speech were compared with the ones expressed during neutral speech. The results show that the facial activeness is mainly driven by articulatory processes. However, clear spatial-temporal patterns are observed during emotional speech, which indicate that emotional goals enhance and modulate facial expressions. The results also show that the upper face region has more degrees of freedom to convey non-verbal information than the lower face region, which is highly constrained by the underlying articulatory processes. These results are important toward understanding how humans communicate and interact.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Belanich, James; Lane, H. Chad; Core, Mark; Dixon, Melissa; Forbell, Eric; Kim, Julia; Hart, John
Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation Proceedings Article
In: Proceedings of the 25th Army Science Conference, 2006.
@inproceedings{hill_pedagogically_2006,
title = {Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation},
author = {Randall W. Hill and James Belanich and H. Chad Lane and Mark Core and Melissa Dixon and Eric Forbell and Julia Kim and John Hart},
url = {http://ict.usc.edu/pubs/PEDAGOGICALLY%20STRUCTURED%20GAME-BASED%20TRAINING-%20DEVELOPMENT%20OF%20THE%20ELECT%20BILAT%20SIMULATION.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
abstract = {ELECT BiLAT is a prototype game-based simulation for Soldiers to practice conducting bilateral engagements in a cultural context. The prototype provides students with the experience of preparing for a meeting including familiarization with the cultural context, gathering intelligence, conducting a meeting and negotiating when possible, and following up on meeting agreements as appropriate. The ELECT BiLAT architecture is based on a commercial game engine that is integrated with research technologies to enable the use of virtual human characters, scenario customization, as well as coaching, feedback and tutoring. Because the prototype application is intended to be a learning environment, pedagogy has been central throughout development. The project followed a five-phase process: (1) analyze the training domain; (2) develop a story board prototype; (3) implement a computer version of the training prototype; (4) refine training objectives and link their conditions and standards to game activities; and (5) develop training support content for students, instructors, and training developers. The goal is an authorable game-based environment that uses the pedagogy of guided discovery for training Soldiers in the conduct of bilateral engagements within a specific cultural context.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Kennedy, Brandon; Patel, Ronakkumar; Traum, David
Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be? Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{leuski_asking_2006,
title = {Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be?},
author = {Anton Leuski and Brandon Kennedy and Ronakkumar Patel and David Traum},
url = {http://ict.usc.edu/pubs/Asking%20Questions%20to%20Limited%20Domain%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper, we describe the evaluation of a limited domain question-answering characters, particularly as to the effect of non-optimal speech recognition, and the ability to appropriately answer novel questions. Results show that answering ability is robust until speech recognition reaches over 60% Word error rate.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Lavrenko, Victor
Tracking Dragon-Hunters with Language Models Proceedings Article
In: Conference on Information and Knowledge Management, Arlington, VA, 2006.
@inproceedings{leuski_tracking_2006,
title = {Tracking Dragon-Hunters with Language Models},
author = {Anton Leuski and Victor Lavrenko},
url = {http://ict.usc.edu/pubs/Tracking%20Dragon-Hunters%20with%20Language%20Models.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Conference on Information and Knowledge Management},
address = {Arlington, VA},
abstract = {We are interested in the problem of understanding the connections between human activities and the content of textual information generated in regard to those activities. Massive online collaborative environments, specifically online virtual worlds, where people meet, exchange messages, and perform actions can be a rich source for such an analysis. In this paper we study one of such virtual worlds and the activities of its inhabitants. We explore the existing dependencies between the activities and the content of the chat messages the world's inhabitants exchange with each other. We outline three experimental tasks and show how language modeling and text clustering techniques allow us to explore those dependencies successfully.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Julia; Zbylut, MIchelle L.; Gordon, Andrew S.; Traum, David; Gandhe, Sudeep; King, Stewart; Lavis, Salvo; Rocher, Scott
AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{hill_axlnet_2006,
title = {AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders},
author = {Randall W. Hill and Julia Kim and MIchelle L. Zbylut and Andrew S. Gordon and David Traum and Sudeep Gandhe and Stewart King and Salvo Lavis and Scott Rocher},
url = {http://ict.usc.edu/pubs/AXLNet-%20Web-enabled%20Case%20Method%20Instruction%20for%20Accelerating%20Tacit%20Knowledge%20Acquisition%20in%20Leaders.PDF},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {AXL.Net is a prototype web-based immersive technology solution that supports case method teaching for U.S. Army leader development. The AXL.Net system addresses three challenges: (1) designing a pedagogicallysound research prototype for leader development, (2) integrating research technologies with the best of Web 2.0 innovations to enhance case method teaching, and (3) providing an easy to use system. Initial evaluations show that the prototype application and framework is effective for leader development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Traum, David; Lane, H. Chad; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan; Lent, Michael
Teaching Negotiation Skills through Practice and Reflection with Virtual Humans Journal Article
In: Simulation: Transactions of the Society for Modeling and Simulation, vol. 82, no. 11, pp. 685–701, 2006.
@article{core_teaching_2006,
title = {Teaching Negotiation Skills through Practice and Reflection with Virtual Humans},
author = {Mark Core and David Traum and H. Chad Lane and William Swartout and Stacy C. Marsella and Jonathan Gratch and Michael Lent},
url = {http://ict.usc.edu/pubs/Teaching%20Negotiation%20Skills.pdf},
year = {2006},
date = {2006-11-01},
journal = {Simulation: Transactions of the Society for Modeling and Simulation},
volume = {82},
number = {11},
pages = {685–701},
abstract = {Although the representation of physical environments and behaviors will continue to play an important role in simulation-based training, an emerging challenge is the representation of virtual humans with rich mental models (e.g., including emotions, trust) that interact through conversational as well as physical behaviors. The motivation for such simulations is training soft skills such as leadership, cultural awareness, and negotiation, where the majority of actions are conversational, and the problem solving involves consideration of the emotions, attitudes, and desires of others.The educational power of such simulations can be enhanced by the integration of an intelligent tutoring system to support learners� understanding of the effect of their actions on virtual humans and how they might improve their performance. In this paper, we discuss our efforts to build such virtual humans, along with an accompanying intelligent tutor, for the domain of negotiation and cultural awareness.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Duncan, Susan
Virtual Humans for the Study of Rapport in Cross Cultural Settings Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{gratch_virtual_2006,
title = {Virtual Humans for the Study of Rapport in Cross Cultural Settings},
author = {Jonathan Gratch and Anna Okhmatovskaia and Susan Duncan},
url = {http://ict.usc.edu/pubs/VIRTUAL%20HUMANS%20FOR%20THE%20STUDY%20OF%20RAPPORT%20IN%20CROSS%20CULTURAL%20SETTINGS.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {As an increasing part of the Army's mission involves establishing rapport with diverse populations, training interpersonal skills becomes critically important. Here we describe a "Rapport Agent" that senses and responds to a speakerQs nonverbal behavior and provide empirical evidence that it increases speaker fluency and engagement. We argue such agent technology has potential, both as a training system to enhance communication skills, and to assess the key factors that influence rapport in face-to-face interactions. We conclude by discussing ways the nonverbal correlates of rapport vary between Arabic and English speakers and discuss the potential of such technology to advance research and training into rapport in cross-cultural settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Pair, Jarrell
Projector-Camera Systems for Immersive Training Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{treskunov_projector-camera_2006,
title = {Projector-Camera Systems for Immersive Training},
author = {Anton Treskunov and Jarrell Pair},
url = {http://ict.usc.edu/pubs/PROJECTOR-CAMERA%20SYSTEMS%20FOR%20IMMERSIVE%20TRAINING.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Real time computer graphics are limited in that they can only be displayed on projection screens and monitors. Monitors and projection screens cannot be used in live fire training or scenarios in which the displays could be physically damaged by trainees. To address this issue, we have developed projection systems using computer vision based color correction and image processing to project onto non-ideal surfaces such as painted walls, cinder blocks, and concrete floors. These projector-camera systems effectively paint the real world with digital light. Any surface can become an interactive projection screen allowing unprepared spaces to be transformed into an immersive environment. Virtual bullet holes, charring, and cracks can be added to real doors, walls, tables, chairs, cabinets, and windows. Distortion correction algorithms allow positioning of projection devices out of the field of view of trainees and their weapons. This paper describes our motivation and approach for implementing projector-camera systems for use within the FlatWorld wide area mixed reality system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Parsons, Thomas D.; Liewer, Matt; Graap, Ken; Difede, JoAnn; Rothbaum, Barbara O.; Reger, Greg; Roy, Michael
A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
@inproceedings{rizzo_virtual_2006-1,
title = {A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Jarrell Pair and Thomas D. Parsons and Matt Liewer and Ken Graap and JoAnn Difede and Barbara O. Rothbaum and Greg Reger and Michael Roy},
url = {http://ict.usc.edu/pubs/A%20VIRTUAL%20REALITY%20THERAPY%20APPLICATION%20FOR%20OEF%20OIF%20COMBAT-RELATED%20POST%20TRAUMATIC%20STRESS%20DISORDER.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Fourth Frame Forums: Interactive Comics for Collaborative Learning Proceedings Article
In: Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006), Santa Barbara, CA, 2006.
@inproceedings{gordon_fourth_2006,
title = {Fourth Frame Forums: Interactive Comics for Collaborative Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Fourth%20Frame%20Forums-%20Interactive%20Comics%20for%20Collaborative%20Learning.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006)},
address = {Santa Barbara, CA},
abstract = {In this paper, we describe Fourth Frame Forums, an application that combines traditional four-frame comic strips with online web-based discussion forums. In this application, users are presented with a four-frame comic strip where the last dialogue balloon of the fourth frame is left blank. By typing a statement into this dialogue balloon, the user creates a new discussion thread in the forum, where the user?s dialogue choice can be critiqued and discussed by other users of the forum. We argue that Fourth Frame Forums provide an elegant and cost-effective solution for online education and training environments for communities of learners. We provide examples from the domain of US Army leadership development, and compare Fourth Frame Forums to alternative methods of story-directed simulation and training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Integrating logical inference into statistical text classification applications Proceedings Article
In: Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications, Washington D.C., 2006.
@inproceedings{gordon_integrating_2006,
title = {Integrating logical inference into statistical text classification applications},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Integrating%20Logical%20Inference%20Into%20Statistical%20Text%20Classification%20Applications.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications},
address = {Washington D.C.},
abstract = {Contemporary statistical text classification is becoming increasingly common across a wide range of everyday applications. Typically, the bottlenecks in performance are the availability and consistency of large amounts of training data. We argue that these techniques could be improved by seamlessly integrating logical inference into the text encoding pipeline, making it possible to utilize large-scale commonsense and special-purpose knowledge bases to aid in the interpretation and encoding of documents.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Language evidence for changes in a Theory of Mind Book Section
In: Arbib, Michael A. (Ed.): Action to Language via the Mirror Neuron System, University of Cambridge Press, Cambridge, UK, 2006, ISBN: 978-0-521-84755-1.
@incollection{gordon_language_2006,
title = {Language evidence for changes in a Theory of Mind},
author = {Andrew S. Gordon},
editor = {Michael A. Arbib},
url = {http://www.cambridge.org/gb/knowledge/isbn/item1172518/?site_locale=en_GB},
isbn = {978-0-521-84755-1},
year = {2006},
date = {2006-09-01},
booktitle = {Action to Language via the Mirror Neuron System},
publisher = {University of Cambridge Press},
address = {Cambridge, UK},
abstract = {Mirror neurons may hold the brain's key to social interaction - each coding not only a particular action or emotion but also the recognition of that action or emotion in others. The Mirror System Hypothesis adds an evolutionary arrow to the story - from the mirror system for hand actions, shared with monkeys and chimpanzees, to the uniquely human mirror system for language. In this accessible 2006 volume, experts from child development, computer science, linguistics, neuroscience, primatology and robotics present and analyse the mirror system and show how studies of action and language can illuminate each other. Topics discussed in the fifteen chapters include: what do chimpanzees and humans have in common? Does the human capability for language rest on brain mechanisms shared with other animals? How do human infants acquire language? What can be learned from imaging the human brain? How are sign- and spoken-language related? Will robots learn to act and speak like humans?},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Tepperman, Joseph; Traum, David; Narayanan, Shrikanth
"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
@inproceedings{tepperman_yeah_2006,
title = {"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems},
author = {Joseph Tepperman and David Traum and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Yeah%20Right-%20Sarcasm%20Recognition%20for%20Spoken%20Dialogue%20Systems.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {The robust understanding of sarcasm in a spoken dialogue system requires a reformulation of the dialogue manager's basic assumptions behind, for example, user behavior and grounding strategies. But automatically detecting a sarcastic tone of voice is not a simple matter. This paper presents some experiments toward sarcasm recognition using prosodic, spectral, and contextual cues. Our results demonstrate that spectral and contextual features can be used to detect sarcasm as well as a human annotator would, and confirm a long-held claim in the field of psychology — that prosody alone is not sufficient to discern whether a speaker is being sarcastic.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Proceedings Article
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Leuski, Anton; Rangarajan, Vivek; Robinson, Susan; Vaswani, Ashish; Narayanan, Shrikanth; Traum, David
Radiobot-CFF: A Spoken Dialogue System for Military Training Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
@inproceedings{roque_radiobot-cff_2006,
title = {Radiobot-CFF: A Spoken Dialogue System for Military Training},
author = {Antonio Roque and Anton Leuski and Vivek Rangarajan and Susan Robinson and Ashish Vaswani and Shrikanth Narayanan and David Traum},
url = {http://ict.usc.edu/pubs/Radiobot-CFF-%20A%20Spoken%20Dialogue%20System%20for%20Military%20Training.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {We describe a spoken dialogue system which can engage in Call For Fire (CFF) radio dialogues to help train soldiers in proper procedures for requesting artillery fire missions. We describethe domain, an information-state dialogue manager with a novel system of interactive information components, and provide evaluation results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.