Publications
Search
Ard, Tyler; Krum, David M.; Phan, Thai; Duncan, Dominique; Essex, Ryan; Bolas, Mark; Toga, Arthur
NIVR: Neuro Imaging in Virtual Reality Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 465–466, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{ard_nivr_2017,
title = {NIVR: Neuro Imaging in Virtual Reality},
author = {Tyler Ard and David M. Krum and Thai Phan and Dominique Duncan and Ryan Essex and Mark Bolas and Arthur Toga},
url = {http://ieeexplore.ieee.org/abstract/document/7892381/},
doi = {10.1109/VR.2017.7892381},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {465–466},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Visualization is a critical component of neuroimaging, and how to best view data that is naturally three dimensional is a long standing question in neuroscience. Many approaches, programs, and techniques have been developed specifically for neuroimaging. However, exploration of 3D information through a 2D screen is inherently limited. Many neuroscientific researchers hope that with the recent commercialization and popularization of VR, it can offer the next-step in data visualization and exploration. Neuro Imaging in Virtual Reality (NIVR), is a visualization suite that employs various immersive visualizations to represent neuroimaging information in VR. Some established techniques, such as raymarching volume visualization, are paired with newer techniques, such as near-field rendering, to provide a broad basis of how we can leverage VR to improve visualization and navigation of neuroimaging data. Several of the neuroscientific visualization approaches presented are, to our knowledge, the first of their kind. NIVR offers not only an exploration of neuroscientific data visualization, but also a tool to expose and educate the public regarding recent advancements in the field of neuroimaging. By providing an engaging experience to explore new techniques and discoveries in neuroimaging, we hope to spark scientific interest through a broad audience. Furthermore, neuroimaging offers deep and expansive datasets; a single scan can involve several gigabytes of information. Visualization and exploration of this type of information can be challenging, and real-time exploration of this information in VR even more so. NIVR explores pathways which make this possible, and offers preliminary stereo visualizations of these types of massive data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Chih-Fan; Bolas, Mark; Rosenberg, Evan Suma
Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 473–474, IEEE, Los Angeles, CA, 2017, ISBN: 978-1-5090-6647-6.
@inproceedings{chen_rapid_2017,
title = {Rapid Creation of Photorealistic Virtual Reality Content with Consumer Depth Cameras},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma Rosenberg},
url = {http://ieeexplore.ieee.org/abstract/document/7892385/},
doi = {10.1109/VR.2017.7892385},
isbn = {978-1-5090-6647-6},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {473–474},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Virtual objects are essential for building environments in virtual reality (VR) applications. However, creating photorealistic 3D models is not easy, and handcrafting the detailed 3D model from a real object can be time and labor intensive. An alternative way is to build a structured camera array such as a light-stage to reconstruct the model from a real object. However, these technologies are very expensive and not practical for most users. In this work, we demonstrate a complete end-to-end pipeline for the capture, processing, and rendering of view-dependent 3D models in virtual reality from a single consumer-grade RGB-D camera. The geometry model and the camera trajectories are automatically reconstructed from a RGB-D image sequence captured offline. Based on the HMD position, selected images are used for real-time model rendering. The result of this pipeline is a 3D mesh with view-dependent textures suitable for real-time rendering in virtual reality. Specular reflections and light-burst effects are especially noticeable when users view the objects from different perspectives in a head-tracked environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parra, Federico; Miljkovitch, Raphaële; Persiaux, Gwenaelle; Morales, Michelle; Scherer, Stefan
The Multimodal Assessment of Adult Attachment Security: Developing the Biometric Attachment Test Journal Article
In: Journal of Medical Internet Research, vol. 19, no. 4, pp. e100, 2017, ISSN: 1438-8871.
@article{parra_multimodal_2017,
title = {The Multimodal Assessment of Adult Attachment Security: Developing the Biometric Attachment Test},
author = {Federico Parra and Raphaële Miljkovitch and Gwenaelle Persiaux and Michelle Morales and Stefan Scherer},
url = {http://www.jmir.org/2017/4/e100/},
doi = {10.2196/jmir.6898},
issn = {1438-8871},
year = {2017},
date = {2017-03-01},
journal = {Journal of Medical Internet Research},
volume = {19},
number = {4},
pages = {e100},
abstract = {Background: Attachment theory has been proven essential for mental health, including psychopathology, development, and interpersonal relationships. Validated psychometric instruments to measure attachment abound but suffer from shortcomings common to traditional psychometrics. Recent developments in multimodal fusion and machine learning pave the way for new automated and objective psychometric instruments for adult attachment that combine psychophysiological, linguistic, and behavioral analyses in the assessment of the construct.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nouri, Elnaz; Georgila, Kallirroi; Traum, David
Culture-specific models of negotiation for virtual characters: multi-attribute decision-making based on culture-specific values Journal Article
In: AI & SOCIETY, vol. 32, no. 1, pp. 51–63, 2017, ISSN: 0951-5666, 1435-5655.
@article{nouri_culture-specific_2017,
title = {Culture-specific models of negotiation for virtual characters: multi-attribute decision-making based on culture-specific values},
author = {Elnaz Nouri and Kallirroi Georgila and David Traum},
url = {http://link.springer.com/10.1007/s00146-014-0570-7},
doi = {10.1007/s00146-014-0570-7},
issn = {0951-5666, 1435-5655},
year = {2017},
date = {2017-02-01},
journal = {AI & SOCIETY},
volume = {32},
number = {1},
pages = {51–63},
abstract = {We posit that observed differences in negotiation performance across cultures can be explained by participants trying to optimize across multiple values, where the relative importance of values differs across cultures. We look at two ways for specifying weights on values for different cultures: one in which the weights of the model are hand-crafted, based on intuition interpreting Hofstede dimensions for the cultures, and one in which the weights of the model are learned from data using inverse reinforcement learning (IRL). We apply this model to the Ultimatum Game and integrate it into a virtual human dialog system. We show that weights learned from IRL surpass both a weak baseline with random weights and a strong baseline considering only one factor for maximizing gain in own wealth in accounting for the behavior of human players from four different cultures. Wealso show that the weights learned with our model for one culture outperform weights learned for other cultures when playing against opponents of the first culture.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Talbot, Thomas B
Making Lifelike Medical Games in the Age of Virtual Reality An Update on “Playing Games with Biology” from 2013 Book Section
In: Transforming Gaming and Computer Simulation Technologies across Industries, pp. 103–119, IGI Global, Hershey, PA, 2017, ISBN: 978-1-5225-1817-4 978-1-5225-1818-1.
@incollection{talbot_making_2017,
title = {Making Lifelike Medical Games in the Age of Virtual Reality An Update on “Playing Games with Biology” from 2013},
author = {Thomas B Talbot},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-1817-4},
doi = {10.4018/978-1-5225-1817-4},
isbn = {978-1-5225-1817-4 978-1-5225-1818-1},
year = {2017},
date = {2017-01-01},
booktitle = {Transforming Gaming and Computer Simulation Technologies across Industries},
pages = {103–119},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Medical simulations differ from other training modalities in that life processes must be simulated as part of the experience. Biological fidelity is the degree to which character anatomical appearance and physiology behavior are represented within a game or simulation. Methods to achieve physiological fidelity include physiology engines, complex state machines, simple state machines and kinetic models. Games health scores that can be used in medical sims. Selection of technique depends upon the goals of the simulation, expected user inputs, development budget and level of fidelity required. Trends include greater availability of physiology engines rapid advances in virtual reality (VR). In VR, the expectation for a naturalistic interface is much greater, resulting in technical challenges regarding natural language and gesture-based interaction. Regardless of the technical approach, the user’s perception of biological fidelity, responsiveness to user inputs and the ability to correct mistakes is often more important than the underlying biological fidelity of the model.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lucas, Gale M.; Gratch, Jonathan; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli; Nichols, Jeffrey
GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion Journal Article
In: Image and Vision Computing, 2017, ISSN: 02628856.
@article{lucas_goaalll_2017,
title = {GOAALLL!: Using Sentiment in the World Cup to Explore Theories of Emotion},
author = {Gale M. Lucas and Jonathan Gratch and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler and Jeffrey Nichols},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0262885617300148},
doi = {10.1016/j.imavis.2017.01.006},
issn = {02628856},
year = {2017},
date = {2017-01-01},
journal = {Image and Vision Computing},
abstract = {Sporting events evoke strong emotions amongst fans and thus act as natural laboratories to explore emotions and how they unfold in the wild. Computational tools, such as sentiment analysis, provide new ways to examine such dynamic emotional processes. In this article we use sentiment analysis to examine tweets posted during 2014 World Cup. Such analysis gives insight into how people respond to highly emotional events, and how these emotions are shaped by contextual factors, such as prior expectations, and how these emotions change as events unfold over time. Here we report on some preliminary analysis of a World Cup twitter corpus using sentiment analysis techniques. After performing initial tests of validation for sentiment analysis on data in this corpus, we show these tools can give new insights into existing theories of what makes a sporting match exciting. This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion. The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. We also discuss some challenges that such data present for existing sentiment analysis techniques and discuss future analysis.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bautista, Merrick; Leeds, Andrew; Tokel, Tugba; Talbot, Thomas B.
Spoken vs typed questioning in a conversational medical interview with virtual standardized patients Proceedings Article
In: Proceedings of the International Meeting on Simulation in Healthcare 2017, Orlando, Florida, 2017.
@inproceedings{bautista_spoken_2017,
title = {Spoken vs typed questioning in a conversational medical interview with virtual standardized patients},
author = {Merrick Bautista and Andrew Leeds and Tugba Tokel and Thomas B. Talbot},
url = {http://ict.usc.edu/pubs/SPS_IMSH%202017_Final_Talbot%20Tokel%20Leeds%20Bautista.pdf},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the International Meeting on Simulation in Healthcare 2017},
address = {Orlando, Florida},
abstract = {There here have have beenbeen beennumerous numerous numerous numerousattempts attempts to replicatereplicate replicate replicatereplicate the experienceexperience experienceexperience experienceexperienceexperience of humanhumanhuman humanstandardized standardizedstandardizedstandardizedstandardizedstandardized standardizedpatientpatientpatient patientpatientpatient(Barrows (Barrows (Barrows(Barrows&Anderson, Anderson, Anderson, Anderson, 1964 19641964)on acomputer computercomputer for anytime anytime -anywhere anywhereanywhereanywhere access access to the experience experience experience experience . USC Standard Standard Standard Patient Patient Patientseeks seeks to : • improve improve clinicclinic clinicclinic -based based medicalmedical medical medical encounter encounter simulation simulation simulation withwith withthe goal to create create engaging engagingengaging engagingvirtual virtual standardized standardized standardized standardizedstandardized patient patient patient(VSP) encounters, encounters, encounters, encounters, • enable enable objective objective objective and meaningful meaningful meaningful meaningfulassessment assessment assessment assessmentof learner learner interview interview performance performance performance performance and mature mature physician physicianphysician physicianphysicianinterviewing interviewing interviewinginterviewinginterviewing &diagnosticdiagnostic diagnosticdiagnosticdiagnosticdiagnostic skills skills skills. VirtualVirtual Virtualstandardized standardized standardized standardized patients patients patients (VSP) : • Aconversational conversational conversational simulated simulatedsimulated simulatedpatientpatient patient used usedfor medical medical medicaltraining training and capableapable apable of natural natural natural language languagelanguage languageinteraction interactioninteraction interaction withwith verbal verbal and nonverbal nonverbal nonverbal nonverbalbehavior behavior behavior behaviorresponses responses responses • OffersOffersOffers consistent, consistent, consistent, objective objective objective},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bautista, Merrick; Leeds, Andrew; Tokel, Tugba; Talbot, Thomas B.
Spoken vs. typed questioning in a conversational medical interview with virtual standardize patients Proceedings Article
In: Proceedings of the International Meeting on Simulation in Healthcare 2017, Orlando, FL, 2017.
@inproceedings{bautista_spoken_2017-1,
title = {Spoken vs. typed questioning in a conversational medical interview with virtual standardize patients},
author = {Merrick Bautista and Andrew Leeds and Tugba Tokel and Thomas B. Talbot},
url = {http://ict.usc.edu/pubs/SPS_IMSH%202017_Final_Talbot%20Tokel%20Leeds%20Bautista.pdf},
year = {2017},
date = {2017-01-01},
booktitle = {Proceedings of the International Meeting on Simulation in Healthcare 2017},
address = {Orlando, FL},
abstract = {There have been numerous attempts to replicate the experience of human standardized patient (Barrows & Anderson, 1964) on a computer for anytime-anywhere access to the experience. USC Standard Patient seeks to: • improve clinic-based medical encounter simulation with the goal to create engaging virtual standardized patient (VSP) encounters, • enable objective and meaningful assessment of learner interview performance and mature physician interviewing & diagnostic skills. Virtual standardized patients (VSP): • A conversational simulated patient used for medical training and capable of natural language interaction with verbal and nonverbal behavior responses • Offers consistent, objective experience and detailed user feedback to learners},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Eini, Dalit Shefer; Ratzon, Navah Z.; Rizzo, Albert A.; Yeh, Shih-Ching; Lange, Belinda; Yaffe, Batia; Daich, Alexander; Weiss, Patrice L.; Kizony, Rachel
Camera-tracking gaming control device for evaluation of active wrist flexion and extension Journal Article
In: Journal of Hand Therapy, vol. 30, no. 1, pp. 89–96, 2017, ISSN: 08941130.
@article{shefer_eini_camera-tracking_2017,
title = {Camera-tracking gaming control device for evaluation of active wrist flexion and extension},
author = {Dalit Shefer Eini and Navah Z. Ratzon and Albert A. Rizzo and Shih-Ching Yeh and Belinda Lange and Batia Yaffe and Alexander Daich and Patrice L. Weiss and Rachel Kizony},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0894113016301132},
doi = {10.1016/j.jht.2016.07.002},
issn = {08941130},
year = {2017},
date = {2017-01-01},
journal = {Journal of Hand Therapy},
volume = {30},
number = {1},
pages = {89–96},
abstract = {Study Design: Cross sectional. Introduction: Measuring wrist range of motion (ROM) is an essential procedure in hand therapy clinics. Purpose of the Study: To test the reliability and validity of a dynamic ROM assessment, the Camera WristTracker (CWT). Methods: Wrist flexion and extension ROM of 15 patients with distal radius fractures and 15 matchedcontrols were assessed with the CWT and with a universal goniometer. Results: One-way model intraclass correlation coefficient analysis indicated high test-retest reliability for extension (ICC ¼ 0.92) and moderate reliability for flexion (ICC ¼ 0.49). Standard error for extension was 2.45 and for flexion was 4.07 . Repeated-measures analysis revealed a significant main effect for group; ROM was greater in the control group (F[1, 28] ¼ 47.35; P textbackslashtextbackslashtextbackslashtextless.001). The concurrent validity of the CWT was partially supported. Conclusion: The results indicate that the CWT may provide highly reliable scores for dynamic wrist extension ROM, and moderately reliable scores for flexion, in people recovering from a distal radius fracture. Level of Evidence: N/A.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert "Skip"
The Ultimate Skinner Box: Clinical Virtual Reality 1990-2016 Journal Article
In: Engadget, 2017.
@article{rizzo_ultimate_2017,
title = {The Ultimate Skinner Box: Clinical Virtual Reality 1990-2016},
author = {Albert "Skip" Rizzo},
url = {https://www.engadget.com/2017/01/10/the-ultimate-skinner-box-clinical-virtual-reality-1990-2016/},
year = {2017},
date = {2017-01-01},
journal = {Engadget},
abstract = {The last decade has given rise to a dramatic increase in the global adoption of innovative digital technologies. This can be seen in the rapid acceptance and growing demand for mobile devices, high speed network access, smart televisions, social media, hyper-realistic digital games, behavioral sensing devices, and now the 2nd coming of Virtual Reality! Such consumer driven technologies that were considered to be visionary just 10 years ago have now become common and increasingly essential fixtures in the current digital landscape},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Woolley, J. D.; Chuang, B.; Fussell, C.; Scherer, S.; Biagianti, B.; Fulford, D.; Mathalon, D. H.; Vinogradov, S.
Intranasal oxytocin increases facial expressivity, but not ratings of trustworthiness, in patients with schizophrenia and healthy controls Journal Article
In: Psychological Medicine, pp. 1–12, 2017, ISSN: 0033-2917, 1469-8978.
@article{woolley_intranasal_2017,
title = {Intranasal oxytocin increases facial expressivity, but not ratings of trustworthiness, in patients with schizophrenia and healthy controls},
author = {J. D. Woolley and B. Chuang and C. Fussell and S. Scherer and B. Biagianti and D. Fulford and D. H. Mathalon and S. Vinogradov},
url = {https://www.cambridge.org/core/product/identifier/S0033291716003433/type/journal_article},
doi = {10.1017/S0033291716003433},
issn = {0033-2917, 1469-8978},
year = {2017},
date = {2017-01-01},
journal = {Psychological Medicine},
pages = {1–12},
abstract = {Blunted facial affect is a common negative symptom of schizophrenia. Additionally, assessing the trustworthiness of faces is a social cognitive ability that is impaired in schizophrenia. Currently available pharmacological agents are ineffective at improving either of these symptoms, despite their clinical significance. The hypothalamic neuropeptide oxytocin has multiple prosocial effects when administered intranasally to healthy individuals and shows promise in decreasing negative symptoms and enhancing social cognition in schizophrenia. Although two small studies have investigated oxytocin's effects on ratings of facial trustworthiness in schizophrenia, its effects on facial expressivity have not been investigated in any population. We investigated the effects of oxytocin on facial emotional expressivity while participants performed a facial trustworthiness rating task in 33 individuals with schizophrenia and 35 age-matched healthy controls using a double-blind, placebo-controlled, cross-over design. Participants rated the trustworthiness of presented faces interspersed with emotionally evocative photographs while being video-recorded. Participants’ facial expressivity in these videos was quantified by blind raters using a well-validated manualized approach (i.e. the Facial Expression Coding System; FACES). While oxytocin administration did not affect ratings of facial trustworthiness, it significantly increased facial expressivity in individuals with schizophrenia (Z = −2.33},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Woo, Simon; Kaiser, Elsi; Artstein, Ron; Mirkovic, Jelena
Life-experience passwords (LEPs) Proceedings Article
In: Proceedings of the 32nd Annual Conference on Computer Security Applications, pp. 113–126, ACM Press, Los Angeles, CA, 2016, ISBN: 978-1-4503-4771-6.
@inproceedings{woo_life-experience_2016,
title = {Life-experience passwords (LEPs)},
author = {Simon Woo and Elsi Kaiser and Ron Artstein and Jelena Mirkovic},
url = {http://dl.acm.org/citation.cfm?doid=2991079.2991107},
doi = {10.1145/2991079.2991107},
isbn = {978-1-4503-4771-6},
year = {2016},
date = {2016-12-01},
booktitle = {Proceedings of the 32nd Annual Conference on Computer Security Applications},
pages = {113–126},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Passwords are widely used for user authentication, but they are often difficult for a user to recall, easily cracked by automated programs and heavily reused. Security questions are also used for secondary authentication. They are more memorable than passwords, but are very easily guessed. We propose a new authentication mechanism, called "life-experience passwords (LEPs)," which outperforms passwords and security questions, both at recall and at security. Each LEP consists of several facts about a user-chosen past experience, such as a trip, a graduation, a wedding, etc. At LEP creation, the system extracts these facts from the user's input and transforms them into questions and answers. At authentication, the system prompts the user with questions and matches her answers with the stored ones. In this paper we propose two LEP designs, and evaluate them via user studies. We further compare LEPs to passwords, and find that: (1) LEPs are 30–47 bits stronger than an ideal, randomized, 8-character password, (2) LEPs are up to 3x more memorable, and (3) LEPs are reused half as often as passwords. While both LEPs and security questions use personal experiences for authentication, LEPs use several questions, which are closely tailored to each user. This increases LEP security against guessing attacks. In our evaluation, only 0.7% of LEPs were guessed by friends, while prior research found that friends could guess 17–25% of security questions. LEPs also contained a very small amount of sensitive or fake information. All these qualities make LEPs a promising, new authentication approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas B.; Lyon, Thomas D.; Rizzo, Albert; John, Bruce
Virtual Child Witness-Effects of single and multiple use on performance with Novice and Expert cohorts in a structured virtual human interview Proceedings Article
In: Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2016, Orlando, Florida, 2016.
@inproceedings{talbot_virtual_2016,
title = {Virtual Child Witness-Effects of single and multiple use on performance with Novice and Expert cohorts in a structured virtual human interview},
author = {Thomas B. Talbot and Thomas D. Lyon and Albert Rizzo and Bruce John},
url = {http://ict.usc.edu/pubs/Virtual%20Child%20Witness%20Effects%20of%20single%20and%20multiple%20use%20on%20performance%20with%20Novice%20and%20Expert%20cohorts%20in%20a%20structured%20virtual%20human%20interview.pdf},
year = {2016},
date = {2016-12-01},
booktitle = {Proceedings of the Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2016},
address = {Orlando, Florida},
abstract = {Virtual human avatars can be used to train and assess a myriad of complex skills, such as interviewing, interpersonal, and clinical skills, in a safe environment that provides consistency, reduced cost, greater accessibility, and objective feedback. We created a structured virtual human interview which consisted of a conversational avatar that interacts verbally in response to on screen question choices. Our prototype was a forensic interview simulation called Virtual Child Witness (VCW). VCW provides a content-rich interview in response to open-ended questions and is designed to assess user’s interviewing strategy. In a quasi-experimental design, we evaluated 222 subjects to determine if the system could discriminate between Experts (M = .713},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Wei, Lingyu; Hu, Liwen; Nagano, Koki; Li, Hao
Photorealistic Facial Texture Inference Using Deep Neural Networks Journal Article
In: arXiv preprint arXiv:1612.00523, 2016.
@article{saito_photorealistic_2016,
title = {Photorealistic Facial Texture Inference Using Deep Neural Networks},
author = {Shunsuke Saito and Lingyu Wei and Liwen Hu and Koki Nagano and Hao Li},
url = {https://arxiv.org/abs/1612.00523},
year = {2016},
date = {2016-12-01},
journal = {arXiv preprint arXiv:1612.00523},
abstract = {We present a data-driven inference method that can synthesize a photorealistic texture map of a complete 3D face model given a partial 2D view of a person in the wild. After an initial estimation of shape and low-frequency albedo, we compute a high-frequency partial texture map, without the shading component, of the visible face area. To extract the fine appearance details from this incomplete input, we introduce a multi-scale detail analysis technique based on midlayer feature correlations extracted from a deep convolutional neural network. We demonstrate that fitting a convex combination of feature correlations from a high-resolution face database can yield a semantically plausible facial detail description of the entire face. A complete and photorealistic texture map can then be synthesized by iteratively optimizing for the reconstructed feature correlations. Using these high-resolution textures and a commercial rendering framework, we can produce high-fidelity 3D renderings that are visually comparable to those obtained with state-of-theart multi-view face capture systems. We demonstrate successful face reconstructions from a wide range of low resolution input images, including those of historical figures. In addition to extensive evaluations, we validate the realism of our results using a crowdsourced user study.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nack, Frank; Gordon, Andrew S. (Ed.)
Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-48278-1 978-3-319-48279-8.
@book{nack_interactive_2016,
title = {Interactive Storytelling},
editor = {Frank Nack and Andrew S. Gordon},
url = {http://link.springer.com/10.1007/978-3-319-48279-8},
doi = {10.1007/978-3-319-48279-8},
isbn = {978-3-319-48278-1 978-3-319-48279-8},
year = {2016},
date = {2016-11-01},
volume = {10045},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
series = {Lecture Notes in Computer Science},
abstract = {This book constitutes the refereed proceedings of the 9th International Conference on Interactive Digital Storytelling, ICIDS 2016, held in Los Angeles, CA, USA, in November 2016. The 26 revised full papers and 8 short papers presented together with 9 posters, 4 workshop, and 3 demonstration papers were carefully reviewed and selected from 88 submissions. The papers are organized in topical sections on analyses and evaluation systems; brave new ideas; intelligent narrative technologies; theoretical foundations; and usage scenarios and applications.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Narang, Sahil; Best, Andrew; Randhavane, Tanmay; Shapiro, Ari; Manocha, Dinesh
PedVR: simulating gaze-based interactions between a real user and virtual crowds Proceedings Article
In: Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology, pp. 91–100, ACM Press, Munich, Germany, 2016, ISBN: 978-1-4503-4491-3.
@inproceedings{narang_pedvr_2016,
title = {PedVR: simulating gaze-based interactions between a real user and virtual crowds},
author = {Sahil Narang and Andrew Best and Tanmay Randhavane and Ari Shapiro and Dinesh Manocha},
url = {http://dl.acm.org/citation.cfm?id=2993378},
doi = {10.1145/2993369.2993378},
isbn = {978-1-4503-4491-3},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology},
pages = {91–100},
publisher = {ACM Press},
address = {Munich, Germany},
abstract = {We present a novel interactive approach, PedVR, to generate plausible behaviors for a large number of virtual humans, and to enable natural interaction between the real user and virtual agents. Our formulation is based on a coupled approach that combines a 2D multi-agent navigation algorithm with 3D human motion synthesis. The coupling can result in plausible movement of virtual agents and can generate gazing behaviors, which can considerably increase the believability. We have integrated our formulation with the DK-2 HMD and demonstrate the benefits of our crowd simulation algorithm over prior decoupled approaches. Our user evaluation suggests that the combination of coupled methods and gazing behavior can considerably increase the behavioral plausibility.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Zadeh, Amir; Zellers, Rowan; Pincus, Eli; Morency, Louis-Philippe
Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages Journal Article
In: IEEE Intelligent Systems, vol. 31, no. 6, pp. 82–88, 2016, ISSN: 1541-1672.
@article{zadeh_multimodal_2016,
title = {Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages},
author = {Amir Zadeh and Rowan Zellers and Eli Pincus and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/abstract/document/7742221/},
doi = {10.1109/MIS.2016.94},
issn = {1541-1672},
year = {2016},
date = {2016-11-01},
journal = {IEEE Intelligent Systems},
volume = {31},
number = {6},
pages = {82–88},
abstract = {People share their opinions, stories, and reviews through online video sharing websites every day. The automatic analysis of these online opinion videos is bringing new or understudied research challenges to the field of computational linguistics and multimodal analysis. Among these challenges is the fundamental question of exploiting the dynamics between visual gestures and verbal messages to be able to better model sentiment. This article addresses this question in four ways: introducing the first multimodal dataset with opinion-level sentiment intensity annotations; studying the prototypical interaction patterns between facial gestures and spoken words when inferring sentiment intensity; proposing a new computational representation, called multimodal dictionary, based on a language-gesture study; and evaluating the authors' proposed approach in a speaker-independent paradigm for sentiment intensity prediction. The authors' study identifies four interaction types between facial gestures and verbal content: neutral, emphasizer, positive, and negative interactions. Experiments show statistically significant improvement when using multimodal dictionary representation over the conventional early fusion representation (that is, feature concatenation).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Efficient Multispectral Reflectance Function Capture for Image-Based Relighting Proceedings Article
In: Proceedings of the Color and Imaging Conference, pp. 47–58, Society for Imaging Science and Technology, San Diego, CA, 2016.
@inproceedings{legendre_efficient_2016,
title = {Efficient Multispectral Reflectance Function Capture for Image-Based Relighting},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://www.ingentaconnect.com/contentone/ist/cic/2016/00002016/00000001/art00008},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the Color and Imaging Conference},
pages = {47–58},
publisher = {Society for Imaging Science and Technology},
address = {San Diego, CA},
abstract = {Image-based relighting (IBRL) renders the appearance of a subject in a novel lighting environment as a linear combination of the images of its reflectance field , the appearance of the subject lit by each incident lighting direction. Traditionally, a tristimulus color camera records the reflectance field as the subject is sequentially illuminated by broad-spectrum white light sources from each direction. Using a multispectral LED sphere and either a tristimulus (RGB) or monochrome camera, we photograph a still life scene to acquire its multispectral reflectance field – its appearance for every lighting direction for multiple incident illumination spectra. For the tristimulus camera, we demonstrate improved color rendition for IBRL when using the multispectral reflectance field, producing a closer match to the scene's actual appearance in a real-world illumination environment. For the monochrome camera, we also show close visual matches. We additionally propose an efficient method for acquiring such multispectral reflectance fields, augmenting the traditional broad-spectrum lighting basis capture with only a few additional images equal to the desired number of spectral channels. In these additional images, we illuminate the subject by a complete sphere of each available narrow-band LED light source, in our case: red, amber, green, cyan, and blue. From the full-sphere illumination images, we promote the white-light reflectance functions for every direction to multispectral, effectively hallucinating the appearance of the subject under each LED spectrum for each lighting direction. We also use polarization imaging to separate the diffuse and specular components of the reflectance functions, spectrally promoting these components according to different models. We validate that the approximated multispectral reflectance functions closely match those generated by a fully multispectral omnidirectional lighting basis, suggesting a rapid multispectral reflectance field capture method which could be applied for live subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2007
Jan, Dusan; Herrera, David; Martinovski, Bilyana; Novick, David; Traum, David
A Computational Model of Culture-Specific Conversational Behavior Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_computational_2007,
title = {A Computational Model of Culture-Specific Conversational Behavior},
author = {Dusan Jan and David Herrera and Bilyana Martinovski and David Novick and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Culture-Specific%20Conversational%20Behavior.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Paris, France},
abstract = {This paper presents a model for simulating cultural differences in the conversational behavior of virtual agents. The model provides parameters for differences in proxemics, gaze and overlap in turn taking.We present a review of literature on these factors and show results of a study where native speakers of North American English, Mexican Spanish and Arabic were asked to rate the realism of the simulations generated based on different cultural parameters with respect to their culture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yeh, Shih-Ching; Stewart, Jill; McLaughlin, Margaret; Parsons, Thomas D.; Winstein, Carolee J.; Rizzo, Albert
Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training Proceedings Article
In: Lecture Notes in Computer Science, pp. 378–387, 2007, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{yeh_evaluation_2007,
title = {Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training},
author = {Shih-Ching Yeh and Jill Stewart and Margaret McLaughlin and Thomas D. Parsons and Carolee J. Winstein and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20Approach%20for%20Post-stroke%20Rehabilitation%20Via%20Virtual%20Reality%20Aided%20Motor%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {378–387},
address = {2007},
abstract = {This paper introduces an evaluation approach that was applied to clinical data collected from a virtual reality aided motor training program for post-stroke rehabilitation. The goal of the proposed evaluation approach is to diagnose the patient's current status (performance) and detect change in status over time (progression). Three measures, performance time, movement efficiency, and movement speed, were defined to represent kinematic features of reaching. 3-D performance maps and progression maps were generated based on each kinematic measure to visualize a single patient's behavior. The case study revealed the patient's current status as to direction and range of upper extremity reach ability, composed of pitch, yaw and arm length. Further, progression was found and visualized quantitatively over a series of practice sessions.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Robertson, R. Kevin; Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Robertson, Wendy T.; Wilson, Susan; Hall, Colin D.
Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL) Journal Article
In: Journal of Clinical Neuroscience, vol. 14, pp. 416+423, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_assessing_2007,
title = {Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL)},
author = {R. Kevin Robertson and Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Wendy T. Robertson and Susan Wilson and Colin D. Hall},
url = {http://ict.usc.edu/pubs/Assessing%20health-related%20quality%20of%20life%20in%20NeuroAIDS-%20some%20psychometric%20properties%20of%20the%20Neurological%20Quality%20of%20Life%20Questionnaire%20(NeuroQOL).pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Clinical Neuroscience},
volume = {14},
pages = {416+423},
abstract = {Several studies were undertaken to assess the psychometric properties (reliability and initial convergent and discriminant construct validity) of the Neurological Quality of Life Questionnaire (NeuroQOL). The NeuroQOL contains 114 items answered in self report Likert format, with higher scores reflecting better quality of life. Study one compared the questionnaire with existing quality of life measures (Symptom Distress Scale, Sickness Impact Profile) and disease stage, psychological, neuropsychological and neurological measures, and a significant correlation was also fount with each domain. The internal consistency reliability (alpha = 0.96), split half reliability (r12 = 0.97), and test-retest reliability (coefficients were 0.78 for 6 months and 0.67 for one year intervals between test and retest) were all found to high and adequately stable. Overall, these results indicate acceptable reliability and initial construct valididty for the NeuroQOL.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Macedonio, Mary F.; Parsons, Thomas D.; Digiuseppe, Raymond A.; Wiederhold, Brenda K.; Rizzo, Albert
Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality Journal Article
In: CyberPsychology and Behavior, vol. 10, no. 4, pp. 508–515, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{macedonio_immersiveness_2007,
title = {Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality},
author = {Mary F. Macedonio and Thomas D. Parsons and Raymond A. Digiuseppe and Brenda K. Wiederhold and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Immersiveness%20and%20Physiological%20Arousal%20within%20Panoramic%20Video-Based%20Virtual%20Reality.pdf},
year = {2007},
date = {2007-01-01},
journal = {CyberPsychology and Behavior},
volume = {10},
number = {4},
pages = {508–515},
abstract = {In this paper, we discuss findings from a study that used panoramic video-based virtual environments (PVVEs) to induce self-reported anger. The study assessed "immersiveness" and physiological correlates of anger arousal (i.e., heart rate, blood pressure, galvanic skin response [GSR], respiration, and skin temperature). Results indicate that over time, panoramic video-based virtual scenarios can be, at the very least, physiologically arousing. Further, it can be affirmed from the results that hypnotizability, as defined by the applied measures, interacts with group on physiological arousal measures. Hence, physiological arousal appeared to be moderated by participant hypnotizability and absorption levels.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
2006
Busso, Carlos; Narayanan, Shrikanth
Interplay between linguistic and affective goals in facial expression during emotional utterances Proceedings Article
In: Proceedings of the 7th International Seminar on Speech Production, pp. 549–556, Ubatuba, Brazil, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{busso_interplay_2006,
title = {Interplay between linguistic and affective goals in facial expression during emotional utterances},
author = {Carlos Busso and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Interplay%20between%20linguistic%20and%20affective%20goals%20in%20facial%20expression%20during%20emotional%20utterances.pdf},
year = {2006},
date = {2006-12-01},
booktitle = {Proceedings of the 7th International Seminar on Speech Production},
pages = {549–556},
address = {Ubatuba, Brazil},
abstract = {Communicative goals are simultaneously expressed through gestures and speech to convey messages enriched with valuable verbal and non-verbal clues. This paper analyzes and quantifies how linguistic and affective goals are reflected in facial expressions. Using a database recorded from an actress with markers attached to her face, the facial features during emotional speech were compared with the ones expressed during neutral speech. The results show that the facial activeness is mainly driven by articulatory processes. However, clear spatial-temporal patterns are observed during emotional speech, which indicate that emotional goals enhance and modulate facial expressions. The results also show that the upper face region has more degrees of freedom to convey non-verbal information than the lower face region, which is highly constrained by the underlying articulatory processes. These results are important toward understanding how humans communicate and interact.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Belanich, James; Lane, H. Chad; Core, Mark; Dixon, Melissa; Forbell, Eric; Kim, Julia; Hart, John
Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation Proceedings Article
In: Proceedings of the 25th Army Science Conference, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_pedagogically_2006,
title = {Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation},
author = {Randall W. Hill and James Belanich and H. Chad Lane and Mark Core and Melissa Dixon and Eric Forbell and Julia Kim and John Hart},
url = {http://ict.usc.edu/pubs/PEDAGOGICALLY%20STRUCTURED%20GAME-BASED%20TRAINING-%20DEVELOPMENT%20OF%20THE%20ELECT%20BILAT%20SIMULATION.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
abstract = {ELECT BiLAT is a prototype game-based simulation for Soldiers to practice conducting bilateral engagements in a cultural context. The prototype provides students with the experience of preparing for a meeting including familiarization with the cultural context, gathering intelligence, conducting a meeting and negotiating when possible, and following up on meeting agreements as appropriate. The ELECT BiLAT architecture is based on a commercial game engine that is integrated with research technologies to enable the use of virtual human characters, scenario customization, as well as coaching, feedback and tutoring. Because the prototype application is intended to be a learning environment, pedagogy has been central throughout development. The project followed a five-phase process: (1) analyze the training domain; (2) develop a story board prototype; (3) implement a computer version of the training prototype; (4) refine training objectives and link their conditions and standards to game activities; and (5) develop training support content for students, instructors, and training developers. The goal is an authorable game-based environment that uses the pedagogy of guided discovery for training Soldiers in the conduct of bilateral engagements within a specific cultural context.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Kennedy, Brandon; Patel, Ronakkumar; Traum, David
Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be? Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_asking_2006,
title = {Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be?},
author = {Anton Leuski and Brandon Kennedy and Ronakkumar Patel and David Traum},
url = {http://ict.usc.edu/pubs/Asking%20Questions%20to%20Limited%20Domain%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper, we describe the evaluation of a limited domain question-answering characters, particularly as to the effect of non-optimal speech recognition, and the ability to appropriately answer novel questions. Results show that answering ability is robust until speech recognition reaches over 60% Word error rate.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Lavrenko, Victor
Tracking Dragon-Hunters with Language Models Proceedings Article
In: Conference on Information and Knowledge Management, Arlington, VA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{leuski_tracking_2006,
title = {Tracking Dragon-Hunters with Language Models},
author = {Anton Leuski and Victor Lavrenko},
url = {http://ict.usc.edu/pubs/Tracking%20Dragon-Hunters%20with%20Language%20Models.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Conference on Information and Knowledge Management},
address = {Arlington, VA},
abstract = {We are interested in the problem of understanding the connections between human activities and the content of textual information generated in regard to those activities. Massive online collaborative environments, specifically online virtual worlds, where people meet, exchange messages, and perform actions can be a rich source for such an analysis. In this paper we study one of such virtual worlds and the activities of its inhabitants. We explore the existing dependencies between the activities and the content of the chat messages the world's inhabitants exchange with each other. We outline three experimental tasks and show how language modeling and text clustering techniques allow us to explore those dependencies successfully.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Julia; Zbylut, MIchelle L.; Gordon, Andrew S.; Traum, David; Gandhe, Sudeep; King, Stewart; Lavis, Salvo; Rocher, Scott
AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{hill_axlnet_2006,
title = {AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders},
author = {Randall W. Hill and Julia Kim and MIchelle L. Zbylut and Andrew S. Gordon and David Traum and Sudeep Gandhe and Stewart King and Salvo Lavis and Scott Rocher},
url = {http://ict.usc.edu/pubs/AXLNet-%20Web-enabled%20Case%20Method%20Instruction%20for%20Accelerating%20Tacit%20Knowledge%20Acquisition%20in%20Leaders.PDF},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {AXL.Net is a prototype web-based immersive technology solution that supports case method teaching for U.S. Army leader development. The AXL.Net system addresses three challenges: (1) designing a pedagogicallysound research prototype for leader development, (2) integrating research technologies with the best of Web 2.0 innovations to enhance case method teaching, and (3) providing an easy to use system. Initial evaluations show that the prototype application and framework is effective for leader development.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Traum, David; Lane, H. Chad; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan; Lent, Michael
Teaching Negotiation Skills through Practice and Reflection with Virtual Humans Journal Article
In: Simulation: Transactions of the Society for Modeling and Simulation, vol. 82, no. 11, pp. 685–701, 2006.
Abstract | Links | BibTeX | Tags: Learning Sciences, Social Simulation, Virtual Humans
@article{core_teaching_2006,
title = {Teaching Negotiation Skills through Practice and Reflection with Virtual Humans},
author = {Mark Core and David Traum and H. Chad Lane and William Swartout and Stacy C. Marsella and Jonathan Gratch and Michael Lent},
url = {http://ict.usc.edu/pubs/Teaching%20Negotiation%20Skills.pdf},
year = {2006},
date = {2006-11-01},
journal = {Simulation: Transactions of the Society for Modeling and Simulation},
volume = {82},
number = {11},
pages = {685–701},
abstract = {Although the representation of physical environments and behaviors will continue to play an important role in simulation-based training, an emerging challenge is the representation of virtual humans with rich mental models (e.g., including emotions, trust) that interact through conversational as well as physical behaviors. The motivation for such simulations is training soft skills such as leadership, cultural awareness, and negotiation, where the majority of actions are conversational, and the problem solving involves consideration of the emotions, attitudes, and desires of others.The educational power of such simulations can be enhanced by the integration of an intelligent tutoring system to support learners� understanding of the effect of their actions on virtual humans and how they might improve their performance. In this paper, we discuss our efforts to build such virtual humans, along with an accompanying intelligent tutor, for the domain of negotiation and cultural awareness.},
keywords = {Learning Sciences, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Duncan, Susan
Virtual Humans for the Study of Rapport in Cross Cultural Settings Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_virtual_2006,
title = {Virtual Humans for the Study of Rapport in Cross Cultural Settings},
author = {Jonathan Gratch and Anna Okhmatovskaia and Susan Duncan},
url = {http://ict.usc.edu/pubs/VIRTUAL%20HUMANS%20FOR%20THE%20STUDY%20OF%20RAPPORT%20IN%20CROSS%20CULTURAL%20SETTINGS.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {As an increasing part of the Army's mission involves establishing rapport with diverse populations, training interpersonal skills becomes critically important. Here we describe a "Rapport Agent" that senses and responds to a speakerQs nonverbal behavior and provide empirical evidence that it increases speaker fluency and engagement. We argue such agent technology has potential, both as a training system to enhance communication skills, and to assess the key factors that influence rapport in face-to-face interactions. We conclude by discussing ways the nonverbal correlates of rapport vary between Arabic and English speakers and discuss the potential of such technology to advance research and training into rapport in cross-cultural settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Pair, Jarrell
Projector-Camera Systems for Immersive Training Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{treskunov_projector-camera_2006,
title = {Projector-Camera Systems for Immersive Training},
author = {Anton Treskunov and Jarrell Pair},
url = {http://ict.usc.edu/pubs/PROJECTOR-CAMERA%20SYSTEMS%20FOR%20IMMERSIVE%20TRAINING.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Real time computer graphics are limited in that they can only be displayed on projection screens and monitors. Monitors and projection screens cannot be used in live fire training or scenarios in which the displays could be physically damaged by trainees. To address this issue, we have developed projection systems using computer vision based color correction and image processing to project onto non-ideal surfaces such as painted walls, cinder blocks, and concrete floors. These projector-camera systems effectively paint the real world with digital light. Any surface can become an interactive projection screen allowing unprepared spaces to be transformed into an immersive environment. Virtual bullet holes, charring, and cracks can be added to real doors, walls, tables, chairs, cabinets, and windows. Distortion correction algorithms allow positioning of projection devices out of the field of view of trainees and their weapons. This paper describes our motivation and approach for implementing projector-camera systems for use within the FlatWorld wide area mixed reality system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Parsons, Thomas D.; Liewer, Matt; Graap, Ken; Difede, JoAnn; Rothbaum, Barbara O.; Reger, Greg; Roy, Michael
A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006-1,
title = {A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Jarrell Pair and Thomas D. Parsons and Matt Liewer and Ken Graap and JoAnn Difede and Barbara O. Rothbaum and Greg Reger and Michael Roy},
url = {http://ict.usc.edu/pubs/A%20VIRTUAL%20REALITY%20THERAPY%20APPLICATION%20FOR%20OEF%20OIF%20COMBAT-RELATED%20POST%20TRAUMATIC%20STRESS%20DISORDER.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Fourth Frame Forums: Interactive Comics for Collaborative Learning Proceedings Article
In: Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006), Santa Barbara, CA, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_fourth_2006,
title = {Fourth Frame Forums: Interactive Comics for Collaborative Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Fourth%20Frame%20Forums-%20Interactive%20Comics%20for%20Collaborative%20Learning.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006)},
address = {Santa Barbara, CA},
abstract = {In this paper, we describe Fourth Frame Forums, an application that combines traditional four-frame comic strips with online web-based discussion forums. In this application, users are presented with a four-frame comic strip where the last dialogue balloon of the fourth frame is left blank. By typing a statement into this dialogue balloon, the user creates a new discussion thread in the forum, where the user?s dialogue choice can be critiqued and discussed by other users of the forum. We argue that Fourth Frame Forums provide an elegant and cost-effective solution for online education and training environments for communities of learners. We provide examples from the domain of US Army leadership development, and compare Fourth Frame Forums to alternative methods of story-directed simulation and training.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Integrating logical inference into statistical text classification applications Proceedings Article
In: Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications, Washington D.C., 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_integrating_2006,
title = {Integrating logical inference into statistical text classification applications},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Integrating%20Logical%20Inference%20Into%20Statistical%20Text%20Classification%20Applications.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications},
address = {Washington D.C.},
abstract = {Contemporary statistical text classification is becoming increasingly common across a wide range of everyday applications. Typically, the bottlenecks in performance are the availability and consistency of large amounts of training data. We argue that these techniques could be improved by seamlessly integrating logical inference into the text encoding pipeline, making it possible to utilize large-scale commonsense and special-purpose knowledge bases to aid in the interpretation and encoding of documents.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Language evidence for changes in a Theory of Mind Book Section
In: Arbib, Michael A. (Ed.): Action to Language via the Mirror Neuron System, University of Cambridge Press, Cambridge, UK, 2006, ISBN: 978-0-521-84755-1.
Abstract | Links | BibTeX | Tags: The Narrative Group
@incollection{gordon_language_2006,
title = {Language evidence for changes in a Theory of Mind},
author = {Andrew S. Gordon},
editor = {Michael A. Arbib},
url = {http://www.cambridge.org/gb/knowledge/isbn/item1172518/?site_locale=en_GB},
isbn = {978-0-521-84755-1},
year = {2006},
date = {2006-09-01},
booktitle = {Action to Language via the Mirror Neuron System},
publisher = {University of Cambridge Press},
address = {Cambridge, UK},
abstract = {Mirror neurons may hold the brain's key to social interaction - each coding not only a particular action or emotion but also the recognition of that action or emotion in others. The Mirror System Hypothesis adds an evolutionary arrow to the story - from the mirror system for hand actions, shared with monkeys and chimpanzees, to the uniquely human mirror system for language. In this accessible 2006 volume, experts from child development, computer science, linguistics, neuroscience, primatology and robotics present and analyse the mirror system and show how studies of action and language can illuminate each other. Topics discussed in the fifteen chapters include: what do chimpanzees and humans have in common? Does the human capability for language rest on brain mechanisms shared with other animals? How do human infants acquire language? What can be learned from imaging the human brain? How are sign- and spoken-language related? Will robots learn to act and speak like humans?},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {incollection}
}
Tepperman, Joseph; Traum, David; Narayanan, Shrikanth
"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{tepperman_yeah_2006,
title = {"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems},
author = {Joseph Tepperman and David Traum and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Yeah%20Right-%20Sarcasm%20Recognition%20for%20Spoken%20Dialogue%20Systems.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {The robust understanding of sarcasm in a spoken dialogue system requires a reformulation of the dialogue manager's basic assumptions behind, for example, user behavior and grounding strategies. But automatically detecting a sarcastic tone of voice is not a simple matter. This paper presents some experiments toward sarcasm recognition using prosodic, spectral, and contextual cues. Our results demonstrate that spectral and contextual features can be used to detect sarcasm as well as a human annotator would, and confirm a long-held claim in the field of psychology — that prosody alone is not sufficient to discern whether a speaker is being sarcastic.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Proceedings Article
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Leuski, Anton; Rangarajan, Vivek; Robinson, Susan; Vaswani, Ashish; Narayanan, Shrikanth; Traum, David
Radiobot-CFF: A Spoken Dialogue System for Military Training Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_radiobot-cff_2006,
title = {Radiobot-CFF: A Spoken Dialogue System for Military Training},
author = {Antonio Roque and Anton Leuski and Vivek Rangarajan and Susan Robinson and Ashish Vaswani and Shrikanth Narayanan and David Traum},
url = {http://ict.usc.edu/pubs/Radiobot-CFF-%20A%20Spoken%20Dialogue%20System%20for%20Military%20Training.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {We describe a spoken dialogue system which can engage in Call For Fire (CFF) radio dialogues to help train soldiers in proper procedures for requesting artillery fire missions. We describethe domain, an information-state dialogue manager with a novel system of interactive information components, and provide evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Carnicke, Sharon Marie; Gratch, Jonathan; Okhmatovskaia, Anna; Rizzo, Albert
An Exploration of Delsartes Structural Acting System Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA), pp. 80–92, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{marsella_exploration_2006,
title = {An Exploration of Delsartes Structural Acting System},
author = {Stacy C. Marsella and Sharon Marie Carnicke and Jonathan Gratch and Anna Okhmatovskaia and Albert Rizzo},
url = {http://ict.usc.edu/pubs/An%20Exploration%20of%20Delsarte%E2%80%99s%20Structural%20Acting%20System.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA)},
pages = {80–92},
address = {Marina del Rey, CA},
abstract = {The designers of virtual agents often draw on a large research literature in psychology, linguistics and human ethology to design embodied agents that can interact with people. In this paper, we consider a structural acting system developed by Francois Delsarte as a possible resource in designing the nonverbal behavior of embodied agents. Using human subjects,we evaluate one component of the system, Delsarte's Cube, that addresses the meaning of differing attitudes of the hand in gestures.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Virtual Cinematography: Relighting through Computation Journal Article
In: IEEE ComputerMagazine, vol. 39, pp. 57–65, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{debevec_virtual_2006,
title = {Virtual Cinematography: Relighting through Computation},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Cinematography-%20Relighting%20through%20Computation.pdf},
year = {2006},
date = {2006-08-01},
journal = {IEEE ComputerMagazine},
volume = {39},
pages = {57–65},
abstract = {Recording how scenes transform incident illumination into radiant light is an active topic in computational photography. Such techniques make it possible to create virtual images of a person or place from new viewpoints and in any form of illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Lee, Jina; Marsella, Stacy C.
Nonverbal Behavior Generator for Embodied Conversational Agents Proceedings Article
In: 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{lee_nonverbal_2006,
title = {Nonverbal Behavior Generator for Embodied Conversational Agents},
author = {Jina Lee and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Nonverbal%20Behavior%20Generator%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {Believable nonverbal behaviors for embodied conversational agents (ECA) can create a more immersive experience for users and improve the effectiveness of communication. This paper describes a nonverbal behavior generator that analyzes the syntactic and semantic structure of the surface text as well as the affective state of the ECA and annotates the surface text with appropriate nonverbal behaviors. A number of video clips of people conversing were analyzed to extract the nonverbal behavior generation rules. The system works in real-time and is user-extensible so that users can easily modify or extend the current behavior generation rules.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Gluck, Kevin A.; Gunzelmann, Glenn; Gratch, Jonathan; Hudlicka, Eva; Ritter, Frank E.
Modeling the Impact of Cognitive Moderators on Human Cognition and Performance Proceedings Article
In: Proceedings of the 2006 Conference of the Cognitive Society, pp. 2658, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gluck_modeling_2006,
title = {Modeling the Impact of Cognitive Moderators on Human Cognition and Performance},
author = {Kevin A. Gluck and Glenn Gunzelmann and Jonathan Gratch and Eva Hudlicka and Frank E. Ritter},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Impact%20of%20Cognitive%20Moderators%20on%20Human%20Cognition%20and%20Performance.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 2006 Conference of the Cognitive Society},
pages = {2658},
address = {Vancouver, CA},
abstract = {Cognitive moderators, such as emotions, personality, stress, and fatigue, represent an emerging area of research within the cognitive science community and are increasingly acknowledged as important and ubiquitous influences on cognitive processes. This symposium brings together scientists engaged in research to develop models that help us better understand the mechanisms through which these factors impact human cognition and performance. There are two unifying themes across the presentations. One theme is a commitment to developing computational models useful for simulating the processes that produce the effects and phenomena of interest. The second theme is a commitment to assessing the validity of the models by comparing their performance against empirical human data.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Lamothe, Francois; Marsella, Stacy C.; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Virtual Rapport Proceedings Article
In: Lecture Notes in Computer Science, pp. 14–27, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_virtual_2006-1,
title = {Virtual Rapport},
author = {Jonathan Gratch and Anna Okhmatovskaia and Francois Lamothe and Stacy C. Marsella and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Lecture Notes in Computer Science},
volume = {4311},
pages = {14–27},
address = {Marina del Rey, CA},
abstract = {Effective face-to-face conversations are highly interactive. Participants respond to each other, engaging in nonconscious behavioral mimicry and backchanneling feedback. Such behaviors produce a subjective sense of rapport and are correlated with effective communication, greater liking and trust, and greater influence between participants. Creating rapport requires a tight sense-act loop that has been traditionally lacking in embodied conversational agents. Here we describe a system, based on psycholinguistic theory, designed to create a sense of rapport between a human speaker and virtual human listener. We provide empirical evidence that it increases speaker fluency and engagement.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kopp, Stefan; Krenn, Brigitte; Marsella, Stacy C.; Marshall, Andrew; Pelachaud, Catherine; Pirker, Hannes; Thórisson, Kristinn R.; Vilhjálmsson, Hannes
Towards a Common Framework for Multimodal Generation: The Behavior Markup Language Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{kopp_towards_2006,
title = {Towards a Common Framework for Multimodal Generation: The Behavior Markup Language},
author = {Stefan Kopp and Brigitte Krenn and Stacy C. Marsella and Andrew Marshall and Catherine Pelachaud and Hannes Pirker and Kristinn R. Thórisson and Hannes Vilhjálmsson},
url = {http://ict.usc.edu/pubs/Towards%20a%20Common%20Framework%20for%20Multimodal%20Generation-%20The%20Behavior%20Markup%20Language.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Marina del Rey, CA},
abstract = {This paper describes an international effort to unify a multimodal behavior generation framework for Embodied Conversational Agents (ECAs). We propose a three stage model we call SAIBA where the stages represent intent planning, behavior planning and behavior realization. A Function Markup Language (FML), describing intent without referring to physical behavior, mediates between the first two stages and a Behavior Markup Language (BML)describing desired physical realization, mediates between the last two stages. In this paper we will focus on BML. The hope is that this abstraction and modularization will help ECA researchers pool their resources to build more sophisticated virtual humans.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Ronakkumar; Leuski, Anton; Traum, David
Dealing with Out of Domain Questions in Virtual Characters Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_dealing_2006,
title = {Dealing with Out of Domain Questions in Virtual Characters},
author = {Ronakkumar Patel and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Out%20of%20Domain%20Questions%20in%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {We consider the problem of designing virtual characters that support speech-based interactions in a limited domain. Previously we have shown that classification can be an effective and robust tool for selecting appropriate in-domain responses. In this paper, we consider the problem of dealing with out-of-domain user questions. We introduce a taxonomy of out-of-domain response types. We consider three classification architectures for selecting the most appropriate out-of-domain responses. We evaluate these architectures and show that they significantly improve the quality of the response selection making the user?s interaction with the virtual character more natural and engaging.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
An Information State-Based Dialogue Manager for Call for Fire Dialogues Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_information_2006,
title = {An Information State-Based Dialogue Manager for Call for Fire Dialogues},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/An%20Information%20State-Based%20Dialogue%20Manager%20for%20Call%20for%20Fire%20Dialogues.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {We present a dialogue manager for "Call for Fire" training dialogues. We describe the training environment, the domain, the features of its novel information state-based dialogue manager, the system it is a part of, and preliminary evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles Proceedings Article
In: Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_comparison_2006,
title = {A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/A%20Comparison%20of%20Alternative%20Parse%20Tree%20Paths%20for%20Labeling%20Semantic%20Roles.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL)},
address = {Sydney, Australia},
abstract = {The integration of sophisticated inference-based techniques into natural language processing applications first requires a reliable methos of encoding the predicate-argument structure of the propositional context of text. Recent statistical approaches to automated predicate-argument annotaion have utilized parse tree paths as predictive features, which encode the path between a verb predicate and a node in the parse tree that governs its argument. In this paper, we explore a number of alternaitves for how these parse tree paths are encoded, focusing on the difference between automatically generated constituency parses and dependency parses. After describing five alternatives for encoding parse tree paths, we investigate how well each can be aligned with the argument substrings in annotated text corpora, their relative precision and recall performance, and their comparative learning curves. Results indicate that constituency parsers produce parse tree paths that can more easily be aligned to argument substrings, perform better in precision and recall, and have more favorable learning curves than those produced by a dependency parser.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Patel, Ronakkumar; Traum, David; Kennedy, Brandon
Building Effective Question Answering Characters Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_building_2006,
title = {Building Effective Question Answering Characters},
author = {Anton Leuski and Ronakkumar Patel and David Traum and Brandon Kennedy},
url = {http://ict.usc.edu/pubs/Building%20Effective%20Question%20Answering%20Characters.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {In this paper, we describe methods for building and evaluation of limited domain question-answering characters. Several classification techniques are tested, including text classification using support vector machines, language-model based retrieval, and cross-language information retrieval techniques, with the latter having the highest success rate. We also evaluated the effect of speech recognition errors on performance with users, finding that retrieval is robust until recognition reaches over 50% WER.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana
Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind Proceedings Article
In: Proceedings of the 28th Annual Conference of the Cognitive Society, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_cognitive_2006,
title = {Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind},
author = {Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Cognitive%20and%20Emotive%20Empathy%20in%20Discourse-%20Towards%20an%20Integrated%20Theory%20of%20Mind.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 28th Annual Conference of the Cognitive Society},
address = {Vancouver, CA},
abstract = {This paper presents an empirical qualitative analysis of eliciting, giving and receiving empathy in discourse. The study identifies discursive and linguistic features, which realize cognitive, emotive, parallel and reactive empathy and suggests that imitation, simulation and representation could be non-exclusive processes in Theory of Mind reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Gomboc, Dave; Solomon, Steve; Rosenberg, Milton
Building Explainable Artificial Intelligence Systems Proceedings Article
In: Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{core_building_2006,
title = {Building Explainable Artificial Intelligence Systems},
author = {Mark Core and H. Chad Lane and Michael Lent and Dave Gomboc and Steve Solomon and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Building%20Explainable%20Artificial%20Intelligence%20Systems.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference},
address = {Boston, MA},
abstract = {As artiï¬cial intelligence (AI) systems and behavior models in military simulations become increasingly complex, it has been difï¬cult for users to understand the activities of computer-controlled entities. Prototype explanation systems have been added to simulators, but designers have not heeded the lessons learned from work in explaining expert system behavior. These new explanation systems are not modular and not portable; they are tied to a particular AI system. In this paper, we present a modular and generic architecture for explaining the behavior of simulated entities. We describe its application to the Virtual Humans, a simulation designed to teach soft skills such as negotiation and cultural awareness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Young, R. Michael
From Linear Story Generation to Branching Story Graphs Journal Article
In: IEEE Computer Graphics and Applications, vol. 26, no. 3, pp. 23–31, 2006.
Abstract | Links | BibTeX | Tags:
@article{riedl_linear_2006,
title = {From Linear Story Generation to Branching Story Graphs},
author = {Mark O. Riedl and R. Michael Young},
url = {http://ict.usc.edu/pubs/From%20Linear%20Story%20Generation%20to%20Branching%20Story%20Graphs.pdf},
year = {2006},
date = {2006-06-01},
journal = {IEEE Computer Graphics and Applications},
volume = {26},
number = {3},
pages = {23–31},
abstract = {Interactive narrative systems are storytelling systems in which the user can influence the content or ordering of story world events. Conceptually, an interactive narrative can be represented as a branching graph of narrative elements, implying points at which an interactive user?s decisions influence the content or ordering of the remaining elements. Generative approaches to interactive narrative construct narrative at runtime or pre-construct on a per-session basis highly interactive branching narrative structures. One generative approach ? narrative mediation ? represents story as a linear progression of events with anticipated user actions and system-controlled agent actions together in a partially-ordered plan. For every possible way the user can violate the story plan, an alternative story plan is generated. If narrative mediation is powerful enough to express the same interactive stories as systems that use branching narrative structures, then linear narrative generation techniques can be applied to interactive narrative generation. This paper lays out this argument and sketches a proof that narrative mediation is at least as powerful as acyclic branching story structures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Lent, Michael; Clevenger, William; Tien, Wen C.
Using Environmental Annotations & Affordances to Model Culture Proceedings Article
In: Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_using_2006,
title = {Using Environmental Annotations & Affordances to Model Culture},
author = {Ryan McAlinden and Michael Lent and William Clevenger and Wen C. Tien},
url = {http://ict.usc.edu/pubs/Using%20Environmental%20Annotations%20&%20Affordances%20to%20Model%20Culture.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations},
address = {Marina del Rey, CA},
abstract = {This paper details the demonstration of an annotation and affordance-based software model intended to introduce cultural and social influences into a non-player character's (NPC) decision-making process. We describe how recent research has supported the need to begin incorporating the effects of culture into the interactive digital domain. The technical approach is presented that describes the software techniques for embedding and utilizing culturally-specific information inside of a virtual environment, as well as the design and implementation of a deterministic Markov Decision Process (MDP) to model the affects of culture on the AI.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Callieri, Marco; Debevec, Paul; Scopigno, Roberto
A realtime immersive application with realistic lighting: The Parthenon Journal Article
In: Computers & Graphics, vol. 30, no. 3, pp. 368–376, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{callieri_realtime_2006,
title = {A realtime immersive application with realistic lighting: The Parthenon},
author = {Marco Callieri and Paul Debevec and Roberto Scopigno},
url = {http://ict.usc.edu/pubs/A%20realtime%20immersive%20application%20with%20realistic%20lighting-%20The%20Parthenon.pdf},
year = {2006},
date = {2006-06-01},
journal = {Computers & Graphics},
volume = {30},
number = {3},
pages = {368–376},
abstract = {Offline rendering techniques have nowadays reached an astonishing level of realism but pay the cost of long computational times. The new generation of programmable graphic hardware, on the other hand, gives the possibility to implement in realtime some of the visual effects previously available only for cinematographic production. We describe the design and implementation of an interactive system which is able to reproduce in realtime one of the crucial sequences from the short movie “The Parthenon” presented at Siggraph 2004. The application is designed to run on a specific immersive reality system, making possible for a user to perceive the virtual environment with nearly cinematographic visual quality.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Woods, Steven Paul; Tröster, Alexander I.
Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis Journal Article
In: Lancet Neurology, vol. 5, pp. 578–588, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_cognitive_2006,
title = {Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis},
author = {Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Steven Paul Woods and Alexander I. Tröster},
url = {http://ict.usc.edu/pubs/Cognitive%20sequelae%20of%20subthalamic%20nucleus%20deep%20brain%20stimulation%20in%20Parkinson%E2%80%99s%20disease-%20a%20meta-analysis.pdf},
year = {2006},
date = {2006-06-01},
journal = {Lancet Neurology},
volume = {5},
pages = {578–588},
abstract = {Summary: Background Deep brain stimulation of the subthalamic nucleus (STN DBS) is an increasingly common treatment for Parkinson's disease. Qualitative reviews have concluded that diminished verbal fluency is common after STN DBS, but that changes in global cognitive abilities, attention, executive functions, and memory are only inconsistently observed and, when present, often nominal or transient. We did a quantitative meta-analysis to improve understanding of the variability and clinical signiï¬cance of cognitive dysfunction after STN DBS. Methods: We searched MedLine, PsycLIT, and ISI Web of Science electronic databases for articles published between 1990 and 2006, and extracted information about number of patients, exclusion criteria, conï¬rmation of target by microelectrode recording, veriï¬cation of electrode placement via radiographic means, stimulation parameters, assessment time points, assessment measures, whether patients were on levodopa or dopaminomimetics, and summary statistics needed for computation of effect sizes. We used the random-effects meta-analytical model to assess continuous outcomes before and after STN DBS. Findings: Of 40 neuropsychological studies identiï¬ed, 28 cohort studies (including 612 patients) were eligible for inclusion in the meta-analysis. After adjusting for heterogeneity of variance in study effect sizes, the random effects meta-analysis revealed signiï¬cant, albeit small, declines in executive functions and verbal learning and memory. Moderate declines were only reported in semantic (Cohen's d 0·73) and phonemic verbal fluency (0·51). Changes in verbal fluency were not related to patient age, disease duration, stimulation parameters, or change in dopaminomimetic dose after surgery. Interpretation: STN DBS, in selected patients, seems relatively safe from a cognitive standpoint. However, diffculty in identiï¬cation of factors underlying changes in verbal fluency draws attention to the need for uniform and detailed reporting of patient selection, demographic, disease, treatment, surgical, stimulation, and clinical outcome parameters.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Dini, Don M.; Lent, Michael; Carpenter, Paul; Iyer, Kumar
Building Robust Planning and Execution Systems for Virtual Worlds Proceedings Article
In: Proceedings of Artificial Intelligence and Interactive Digital Entertainment, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{dini_building_2006,
title = {Building Robust Planning and Execution Systems for Virtual Worlds},
author = {Don M. Dini and Michael Lent and Paul Carpenter and Kumar Iyer},
url = {http://ict.usc.edu/pubs/Building%20Robust%20Planning%20and%20Execution%20Systems%20for%20Virtual%20Worlds.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Proceedings of Artificial Intelligence and Interactive Digital Entertainment},
address = {Marina del Rey, CA},
abstract = {Planning and execution systems have been used in a wide varietyof systems to create practical and successful automation. Theyhave been used for everything from performing scientific researchon the surface of Mars to controlling enemy characters in video games to performing military air campaign planning. After reviewing past work on these various planning and executionsystems, we believe that most lack one or more key componentscontained in another system. To enable future researchers to build more complete systems, and avoid possible serious system failure, we identify the major technical problems any implementer of such a system would have to face. In addition wecite recent solutions to each of these technical problems. We limit our focus to planning and execution for virtual worlds and theunique problems faced therein.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Stern, Andrew; Dini, Don M.
Mixing Story and Simulation in Interactive Narrative Proceedings Article
In: 2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE), Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_mixing_2006,
title = {Mixing Story and Simulation in Interactive Narrative},
author = {Mark O. Riedl and Andrew Stern and Don M. Dini},
url = {http://ict.usc.edu/pubs/Mixing%20Story%20and%20Simulation%20in%20Interactive%20Narrative.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE)},
address = {Marina del Rey, CA},
abstract = {Simulation is a common feature in computer entertainment. However, in computer games simulation and story are often kept distinct by interleaving interactive play and cut scenes. We describe a technique for an interactive narrative system that more closely integrates simulation and storyline. The technique uses a combination of semi-autonomous character agents and high-level story direction. The storyline is decomposed into directives to character agents to achieve particular world states. Otherwise, character agents are allowed to behave autonomously. When the player?s actions create inconsistency between the simulation state and storyline, the storyline is dynamically adapted and repaired to resolve any inconsistencies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering (2006), 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters Proceedings Article
In: 11th International Fall Workshop on Vision, Modeling and Visualization, Aachen, Germany, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tariq_efficient_2006-1,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/Efficient%20Estimation%20of%20Spatially%20Varying%20Subsurface%20Scattering%20Parameters.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {11th International Fall Workshop on Vision, Modeling and Visualization},
address = {Aachen, Germany},
abstract = {We present an image-based technique to efficiently acquire spatially varying subsurface reflectance properties of a human face. The estimated prop- erties can be used directly to render faces with spa- tially varying scattering, or can be used to estimate a robust average across the face. We demonstrate our technique with renderings of peoples' faces un- der novel, spatially-varying illumination and pro- vide comparisons with current techniques. Our cap- tured data consists of images of the face from a sin- gle viewpoint under two small sets of projected im- ages. The first set, a sequence of phase-shifted pe- riodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The sec- ond set of structured light patterns is used to obtain face geometry. We subtract the minimum of each profile to remove the contribution of interreflected light from the rest of the face, and then match the observed reflectance profiles to scattering properties predicted by a scattering model using a lookup ta- ble. From these properties we can generate images of the subsurface reflectance of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Egges, Arjan; Eliëns, Anton; Isbister, Katherine; Paiva, Ana; Rist, Thomas; Hagen, Paul
Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans Proceedings Article
In: Dagstuhl Seminar Proceedings, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_design_2006,
title = {Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella and Arjan Egges and Anton Eliëns and Katherine Isbister and Ana Paiva and Thomas Rist and Paul Hagen},
url = {http://ict.usc.edu/pubs/Design%20criteria%20techniques%20and%20case%20studies%20for%20creating%20and%20evaluating%20interactive%20experiences%20for%20virtual%20humans.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Dagstuhl Seminar Proceedings},
abstract = {How does one go about designing a human? With the rise in recent years of virtual humans this is no longer purely a philosophical question. Virtual humans are intelligent agents with a body, often a human-like graphical body, that interact verbally and non-verbally with human users on a variety of tasks and applications. At a recent meeting on this subject, the above authors participated in a several day discussion on the question of virtual human design. Our working group approached this question from the perspective of interactivity. Specifically, how can one design effective interactive experiences involving a virtual human, and what constraints does this goal place on the form and function of an embodied conversational agent. Our group grappled with several related questions: What ideals should designers aspire to, what sources of theory and data will best lead to this goal and what methodologies can inform and validate the design process? This article summarizes our output and suggests a specific framework, borrowed from interactive media design, as a vehicle for advancing the state of interactive experiences with virtual humans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Tucker, Karen A.; Hall, Colin D.; Robertson, Wendy T.; Eron, Joseph J.; Fried, Michael W.; Robertson, R. Kevin
Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection Journal Article
In: AIDS, vol. 20, pp. 1591–1595, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_neurocognitive_2006,
title = {Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection},
author = {Thomas D. Parsons and Karen A. Tucker and Colin D. Hall and Wendy T. Robertson and Joseph J. Eron and Michael W. Fried and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Neurocognitive%20functioning%20and%20HAART%20in%20HIV%20and%20hepatitis%20C%20virus%20co-infection.pdf},
year = {2006},
date = {2006-05-01},
journal = {AIDS},
volume = {20},
pages = {1591–1595},
abstract = {Objectives: This study examined the effects of HAART on neurocognitive functioning in persons with hepatitis C virus (HCV) and HIV co-infection. Design: A prospective study examining neurocognitive performance before and after HAART initiation. Method: Participant groups included a mono-infected group (45 HIV/HCV-participants) and a co-infected group (20 HIV/HCV participants). A neuropsychological battery (attention/concentration, psychomotor speed, executive functioning, verbal memory, visual memory, ï¬ne motor, and gross motor functioning) was used to evaluate all participants. After 6 months of HAART, 31 HIV mono-infected and 13 HCV/ HIV co-infected participants were reevaluated. Results: Neurocognitive functioning by domain revealed signiï¬cantly worse performance in the co-infected group when compared to the monoinfected group on domains of visual memory and ï¬ne motor functioning. Assessment of neurocognitive functioning after antiretroviral therapy revealed that the co-infected group was no longer performing worse than the monoinfected group. Conclusions: The ï¬ndings of the current study suggest that persons with HCV/HIV co-infection may have greater neurocognitive declines than persons with HIV infection alone. HCV/HIV co-infection may accelerate the progression of HIV related neurocognitive decline.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Stern, Andrew
Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training Proceedings Article
In: 15th Conference on Behavior Representation in Modeling and Simulation (BRIMS), Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_believable_2006,
title = {Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training},
author = {Mark O. Riedl and Andrew Stern},
url = {http://ict.usc.edu/pubs/Believable%20Agents%20and%20Intelligent%20Scenario%20Direction%20for%20Social%20and%20Cultural%20Leadership%20Training.pdf},
year = {2006},
date = {2006-05-01},
booktitle = {15th Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Baltimore, MD},
abstract = {Simulation provides an opportunity for a trainee to practice skills in an interactive and reactive virtual environment. We present a technique for social and cultural leader training through simulation based on a combination of interactive synthetic agents and intelligent scenario direction and adaptation. Social simulation through synthetic characters provides an engaging and believable experience for the trainee. In addition, the trainee is exposed to a sequence of relevant learning situations where the trainee can practice problem-solving under particular conditions. An Automated Scenario Director provides high-level guidance to semi-autonomous character agents to coerce the trainee's experience to conform to a given scenario. When the trainee performs actions in the virtual world that cause the simulation state to deviate from the scenario, the Automated Scenario Director adapts the scenario to resolve any unexpected inconsistencies, thereby preserving the trainee's perception of self control while still retaining any relevant learning situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A computational model of appraisal dynamics Proceedings Article
In: Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion, Vienna, Austria, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{marsella_ema_2006,
title = {EMA: A computational model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20computational%20model%20of%20appraisal%20dynamics.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion},
address = {Vienna, Austria},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person’s interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). We illustrate this perspective through the computational modeling of a naturalistic emotional situation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents Book Section
In: Modeling Communication with Robots and Virtual Humans, pp. 296–309, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{traum_talking_2006,
title = {Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Talking%20to%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {296–309},
abstract = {Virtual Humans are artificial characters who look and act like humans, but inhabit a simulated environment. One important aspect of many virtual humans is their communicative dialogue ability. In this paper we outline a methodology for study of dialogue behavior and construction of virtual humans. We also consider three architectures for different types of virtual humans that have been built at the Institute for Creative Technologies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bolas, Mark; Pair, Jarrell; Haynes, Kip; McDowall, Ian
Display Research at the University of Southern California Proceedings Article
In: IEEE Emerging Displays Workshop, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{bolas_display_2006,
title = {Display Research at the University of Southern California},
author = {Mark Bolas and Jarrell Pair and Kip Haynes and Ian McDowall},
url = {http://ict.usc.edu/pubs/Display%20Research%20at%20the%20University%20of%20Southern%20California.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {IEEE Emerging Displays Workshop},
address = {Alexandria, VA},
abstract = {The University of Southern California and its collaborative research partner, Fakespace Labs, are participating in a number of research programs to invent and implement new forms of display technologies for immersive and semi-immersive applications. This paper briefly describes three of these technologies and highlights a few emerging results from those efforts. The first system is a rear projected 300 degree field of view cylindrical display. It is driven by 11 projectors with geometry correction and edge blending hardware. A full scale prototype will be completed in March 2006. The second system is a 14 screen projected panoramic room environment used as an advanced teaching and meeting space. It can be driven by a cluster of personal computers or low-cost DVD players, or driven by a single personal computer. The third is a prototype stereoscopic head mounted display that can be worn in a fashion similar to standard dust protection goggles. It provides a field of view in excess of 150 degrees.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Graap, Ken; Manson, Brian; McNerney, Peter J.; Wiederhold, Brenda K.; Wiederhold, Mark; Spira, James
A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment Proceedings Article
In: NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment},
author = {Albert Rizzo and Jarrell Pair and Ken Graap and Brian Manson and Peter J. McNerney and Brenda K. Wiederhold and Mark Wiederhold and James Spira},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Military%20Personnel%20with%20Post%20Traumatic%20Stress%20Disorder-%20From%20Training%20to%20Toy%20to%20Treatment.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder},
abstract = {Post Traumatic Stress Disorder is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of a Virtual Iraq PTSD VR application that has been created from the virtual assets that were initially developed for a combat tactical training simulation, which then served as the inspiration for the X-Box game entitled Full Spectrum Warrior.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Allen, Brian; Dautricourt, Matthieu; Treskunov, Anton; Liewer, Matt; Graap, Ken; Reger, Greg; Rizzo, Albert
A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the IEEE VR 2006 Conference, pp. 64–71, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{pair_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder},
author = {Jarrell Pair and Brian Allen and Matthieu Dautricourt and Anton Treskunov and Matt Liewer and Ken Graap and Greg Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {Proceedings of the IEEE VR 2006 Conference},
pages = {64–71},
address = {Alexandria, VA},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-Centered tests with the application are currently underway at the Naval Medical Center–San Diego and within an Army Combat Stress Control Team in Iraq with clinical trials scheduled to commence in February 2006.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Karen J.; Parsons, Thomas D.; Whybrow, Peter C.; Herle, Katja; Rasgon, Natalie; Herle, Andre; Martinez, Dorothy; Silverman, Dan H.; Bauer, Michael
Memory Improvement with Treatment of Hypothyroidism Journal Article
In: International Journal of Neuroscience, vol. 16, no. 8, pp. 895–906, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{miller_memory_2006,
title = {Memory Improvement with Treatment of Hypothyroidism},
author = {Karen J. Miller and Thomas D. Parsons and Peter C. Whybrow and Katja Herle and Natalie Rasgon and Andre Herle and Dorothy Martinez and Dan H. Silverman and Michael Bauer},
url = {http://ict.usc.edu/pubs/Memory%20Improvement%20with%20Treatment%20of%20Hypothyroidism.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
number = {8},
pages = {895–906},
abstract = {The consequences of inadequate thyroid hormone availability to the brain and treatment effects of levothyroxine function are still poorly understood. This study prospectively assessed the effects of thyroid replacement therapy on cognitive function in patients suffering from biochemical evidenced, untreated hypothyroidism. Significant effects between the untreated hypothyroid group and control group were limited to verbal memory retrieval. When assessing the effects of 3-month treatment, results revealed that the treated hypothyroid group had significant increased verbal memory retrieval. Results suggest that specific memory retrieval deficits associated with hypothyroidism can resolve after replacement therapy with levothyroxine.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}