Publications
Search
Dennison, Mark; Neubauer, Cathy; Passaro, Tony; Harrison, Andre; Scherer, Stefan; Khooshabeh, Pete
Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop, Los Angeles, CA, 2016.
@inproceedings{dennison_using_2016,
title = {Using cardiovascular features to classify state changes during cooperation in a simulated bomb defusal task},
author = {Mark Dennison and Cathy Neubauer and Tony Passaro and Andre Harrison and Stefan Scherer and Pete Khooshabeh},
url = {http://marksdennison.com/s/DennisonPAVA2016.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents, Physiologically Aware Virtual Agent’s (PAVA) Workshop},
address = {Los Angeles, CA},
abstract = {Teams of two individuals worked together in a high-intensity simu-lated bomb diffusing task. Half the teams were given icebreaker social time to increase comfort and familiarity with each other and the remaining half of the teams served as controls and did not meet until the task began. Electrocardiog-raphy and impedance cardiography were recorded to examine cardiac changes during task cooperation. Changes in ventricular contractility showed that individ-uals who had taken part in the icebreaker showed increased task engagement over time whereas controls showed the opposite. Data also trended to show that ice-breaker participants were in a challenge state and controls were in a threat state during the final thirty seconds of bomb defusal. Finally, we show that a set of cardiac features can be used to classify participant data as belonging to the ice-breaker or control groups with an accuracy as high as 88%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ahn, Emily; Morbini, Fabrizio; Gordon, Andrew S.
Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing Proceedings Article
In: Proceedings of the 9th International Natural Language Generation Conference (INLG-2016), Edinburgh, UK, 2016.
@inproceedings{ahn_improving_2016,
title = {Improving Fluency in Narrative Text Generation With Grammatical Transformations and Probabilistic Parsing},
author = {Emily Ahn and Fabrizio Morbini and Andrew S. Gordon},
url = {https://www.researchgate.net/publication/307512031_Improving_Fluency_in_Narrative_Text_Generation_With_Grammatical_Transformations_and_Probabilistic_Parsing},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 9th International Natural Language Generation Conference (INLG-2016)},
address = {Edinburgh, UK},
abstract = {In research on automatic generation of narrative text, story events are often formally represented as a causal graph. When serializing and realizing this causal graph as natural language text, simple approaches produce cumbersome sentences with repetitive syntactic structure, e.g. long chains of “because” clauses. In our research, we show that the fluency of narrative text generated from causal graphs can be improved by applying rule-based grammatical transformations to generate many sentence variations with equivalent semantics, then selecting the variation that has the highest probability using a probabilistic syntactic parser. We evaluate our approach by generating narrative text from causal graphs that encode 100 brief stories involving the same three characters, based on a classic film of experimental social psychology. Crowdsourced workers judged the writing quality of texts generated with ranked transformations as significantly higher than those without, and not significantly lower than human-authored narratives of the same situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefan
Representation Learning for Speech Emotion Recognition Journal Article
In: Interspeech 2016, pp. 3603–3607, 2016.
@article{ghosh_representation_2016,
title = {Representation Learning for Speech Emotion Recognition},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefan Scherer},
url = {https://www.researchgate.net/publication/307889274_Representation_Learning_for_Speech_Emotion_Recognition},
doi = {10.21437},
year = {2016},
date = {2016-09-01},
journal = {Interspeech 2016},
pages = {3603–3607},
abstract = {Speech emotion recognition is an important problem with applications as varied as human-computer interfaces and affective computing. Previous approaches to emotion recognition have mostly focused on extraction of carefully engineered features and have trained simple classifiers for the emotion task. There has been limited effort at representation learning for affect recognition, where features are learnt directly from the signal waveform or spectrum. Prior work also does not investigate the effect of transfer learning from affective attributes such as valence and activation to categorical emotions. In this paper, we investigate emotion recognition from spectrogram features extracted from the speech and glottal flow signals; spectrogram encoding is performed by a stacked autoencoder and an RNN (Recurrent Neural Network) is used for classification of four primary emotions. We perform two experiments to improve RNN training : (1) Representation Learning - Model training on the glottal flow signal to investigate the effect of speaker and phonetic invariant features on classification performance (2) Transfer Learning - RNN training on valence and activation, which is adapted to a four emotion classification task. On the USC-IEMOCAP dataset, our proposed approach achieves a performance comparable to the state of the art speech emotion recognition systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ghosh, Sayan; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefen
An Unsupervised Approach to Glottal Inverse Filtering Proceedings Article
In: Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO), Budapest, Hungary, 2016.
@inproceedings{ghosh_unsupervised_2016,
title = {An Unsupervised Approach to Glottal Inverse Filtering},
author = {Sayan Ghosh and Eugene Laksana and Louis-Philippe Morency and Stefen Scherer},
url = {http://www.eurasip.org/Proceedings/Eusipco/Eusipco2016/papers/1570252319.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 24th European Signal Processing Conference (EUSIPCO)},
address = {Budapest, Hungary},
abstract = {The extraction of the glottal volume velocity waveform from voiced speech is a well-known example of a sparse signal recovery problem. Prior approaches have mostly used wellengineered speech processing or convex L1-optimization methods to solve the inverse filtering problem. In this paper, we describe a novel approach to modeling the human vocal tract using an unsupervised dictionary learning framework. We make the assumption of an all-pole model of the vocal tract, and derive an L1 regularized least squares loss function for the all-pole approximation. To evaluate the quality of the extracted glottal volume velocity waveform, we conduct experiments on real-life speech datasets, which include vowels and multi-speaker phonetically balanced utterances. We find that the the unsupervised model learns meaningful dictionaries of vocal tracts, and the proposed data-driven unsupervised framework achieves a performance comparable to the IAIF (Iterative Adaptive Inverse Filtering) glottal flow extraction approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Buckwalter, J. Galen; Castellani, Brian; Mcewen, Bruce; Karlamangla, Arun S.; Rizzo, Albert A.; John, Bruce; O'donnell, Kyle; Seeman, Teresa
Allostatic Load as a Complex Clinical Construct: A Case-Based Computational Modeling Approach Journal Article
In: Complexity, vol. 21, no. S1, pp. 291–306, 2016, ISSN: 10762787.
@article{galen_buckwalter_allostatic_2016,
title = {Allostatic Load as a Complex Clinical Construct: A Case-Based Computational Modeling Approach},
author = {J. Galen Buckwalter and Brian Castellani and Bruce Mcewen and Arun S. Karlamangla and Albert A. Rizzo and Bruce John and Kyle O'donnell and Teresa Seeman},
url = {http://doi.wiley.com/10.1002/cplx.21743},
doi = {10.1002/cplx.21743},
issn = {10762787},
year = {2016},
date = {2016-09-01},
journal = {Complexity},
volume = {21},
number = {S1},
pages = {291–306},
abstract = {Allostatic load (AL) is a complex clinical construct, providing a unique window into the cumulative impact of stress. However, due to its inherent complexity, AL presents two major measurement challenges to conventional statistical modeling (the field’s dominant methodology): it is comprised of a complex causal network of bioallostatic systems, represented by an even larger set of dynamic biomarkers; and, it is situated within a web of antecedent socioecological systems, linking AL to differences in health outcomes and disparities. To address these challenges, we employed casebased computational modeling (CBM), which allowed us to make four advances: (1) we developed a multisystem, 7-factor (20 biomarker) model of AL’s network of allostatic systems; (2) used it to create a catalog of nine different clinical AL profiles (causal pathways); (3) linked each clinical profile to a typology of 23 health outcomes; and (4) explored our results (post hoc) as a function of gender, a key socioecological factor. In terms of highlights, (a) the Healthy clinical profile had few health risks; (b) the pro-inflammatory profile linked to high blood pressure and diabetes; (c) Low Stress Hormones linked to heart disease, TIA/Stroke, diabetes, and circulation problems; and (d) high stress hormones linked to heart disease and high blood pressure. Post hoc analyses also found that males were overrepresented on the High Blood Pressure (61.2%), Metabolic Syndrome (63.2%), High Stress Hormones (66.4%), and High Blood Sugar (57.1%); while females were overrepresented on the Healthy (81.9%), Low Stress Hormones (66.3%), and Low Stress Antagonists (stress buffers) (95.4%) profiles.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ravi, Satheesh; Artstein, Ron
Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 111–116, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{ravi_language_2016,
title = {Language Portability for Dialogue Systems: Translating a Question-Answering System from English into Tamil},
author = {Satheesh Ravi and Ron Artstein},
url = {http://www.aclweb.org/anthology/W16-3614},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {111–116},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {A training and test set for a dialogue system in the form of linked questions and responses is translated from English into Tamil. Accuracy of identifying an appropriate response in Tamil is 79%, compared to the English accuracy of 89%, suggesting that translation can be useful to start up a dialogue system. Machine translation of Tamil inputs into English also results in 79% accuracy. However, machine translation of the English training data into Tamil results in a drop in accuracy to 54% when tested on manually authored Tamil, indicating that there is still a large gap before machine translated dialogue systems can interact with human users.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; Qu, Cheng; Schlangen, David; DeVault, David
Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 252–262, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_toward_2016,
title = {Toward incremental dialogue act segmentation in fast-paced interactive dialogue systems},
author = {Ramesh Manuvinakurike and Maike Paetzel and Cheng Qu and David Schlangen and David DeVault},
url = {http://www.aclweb.org/anthology/W16-3632},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {252–262},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {In this paper, we present and evaluate an approach to incremental dialogue act (DA) segmentation and classification. Our approach utilizes prosodic, lexico-syntactic and contextual features, and achieves an encouraging level of performance in offline corpus-based evaluation as well as in simulated human-agent dialogues. Our approach uses a pipeline of sequential processing steps, and we investigate the contribution of different processing steps to DA segmentation errors. We present our results using both existing and new metrics for DA segmentation. The incremental DA segmentation capability described here may help future systems to allow more natural speech from users and enable more natural patterns of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Kennington, Casey; DeVault, David; Schlangen, David
Real-Time Understanding of Complex Discriminative Scene Descriptions Proceedings Article
In: Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 232–241, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{manuvinakurike_real-time_2016,
title = {Real-Time Understanding of Complex Discriminative Scene Descriptions},
author = {Ramesh Manuvinakurike and Casey Kennington and David DeVault and David Schlangen},
url = {http://www.aclweb.org/anthology/W16-3630},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 17th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {232–241},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Real-world scenes typically have complex structure, and utterances about them consequently do as well. We devise and evaluate a model that processes descriptions of complex configurations of geometric shapes and can identify the described scenes among a set of candidates, including similar distractors. The model works with raw images of scenes, and by design can work word-by-word incrementally. Hence, it can be used in highly-responsive interactive and situated settings. Using a corpus of descriptions from game-play between human subjects (who found this to be a challenging task), we show that reconstruction of description structure in our system contributes to task success and supports the performance of the word-based model of grounded semantics that we use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Melamud, Oren; Artstein, Ron; Dagan, Ido
Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues Proceedings Article
In: Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies, Zerotype, Los Angeles, 2016.
@inproceedings{konovalov_collecting_2016,
title = {Collecting Better Training Data using Biased Agent Policies in Negotiation Dialogues},
author = {Vasily Konovalov and Oren Melamud and Ron Artstein and Ido Dagan},
url = {http://workshop.colips.org/wochat/documents/RP-270.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of WOCHAT, the Second Workshop on Chatbots and Conversational Agent Technologies},
publisher = {Zerotype},
address = {Los Angeles},
abstract = {When naturally occurring data is characterized by a highly skewed class distribution, supervised learning often benefits from reducing this skew. Human-agent dialogue data is commonly highly skewed when using standard agent policies. Hence, we suggest that agent policies need to be reconsidered in the context of training data collection. Specifically, in this work we implemented biased agent policies that are optimized for data collection in the negotiation domain. Empirical evaluations show that our method is successful in collecting a reasonably balanced corpus in the highly skewed Job-Candidate domain. Furthermore, using this balanced corpus to train a negotiation intent classifier yields notable performance improvements relative to naturally distributed data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale
The Benefits of Virtual Humans for Teaching Negotiation Proceedings Article
In: Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016, Springer, Los Angeles, CA, 2016.
@inproceedings{gratch_benefits_2016,
title = {The Benefits of Virtual Humans for Teaching Negotiation},
author = {Jonathan Gratch and David DeVault and Gale Lucas},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110276.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA), 2016},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {This article examines the potential for teaching negotiation with virtual humans. Many people find negotiations to be aversive. We conjecture that stu-dents may be more comfortable practicing negotiation skills with an agent than with another person. We test this using the Conflict Resolution Agent, a semi-automated virtual human that negotiates with people via natural language. In a between-participants design, we independently manipulated two pedagogically-relevant factors while participants engaged in repeated negotiations with the agent: perceived agency (participants either believed they were negotiating with a computer program or another person) and pedagogical feedback (participants received instructional advice or no advice between negotiations). Findings indi-cate that novice negotiators were more comfortable negotiating with a computer program (they self-reported more comfort and punished their opponent less of-ten) and expended more effort on the exercise following instructional feedback (both in time spent and in self-reported effort). These findings lend support to the notion of using virtual humans to teach interpersonal skills.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, AA; Lucas, G; Gratch, J; Stratou, G; Morency, L-P; Shilling, R; Hartholt, A; Scherer, S
Clinical interviewing by a virtual human agent with automatic behavior analysis Proceedings Article
In: Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies, pp. 57–64, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{rizzo_clinical_2016,
title = {Clinical interviewing by a virtual human agent with automatic behavior analysis},
author = {AA Rizzo and G Lucas and J Gratch and G Stratou and L-P Morency and R Shilling and A Hartholt and S Scherer},
url = {http://centaur.reading.ac.uk/66645/8/ICDVRAT2016_Full_Proceedings_11th%20_Conf.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The 2016 Proceedings of the International Conference on Disability, Virtual Reality and Associated Technologies},
pages = {57–64},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. As well, previous research with SimSensei indicates that users engaging with this automated system, have less fear of evaluation and self-disclose more personal information compare to when they believe the VH agent is actually an avatar being operated by a “wizard of oz” human-in-the-loop (Lucas et al., 2014). The current study presents results from a sample of military service members (SMs) who were interviewed within the SimSensei system before and after a deployment to Afghanistan. Results indicate that SMs reveal more PTSD symptoms to the SimSensei VH agent than they self-report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and fewer happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Pollard, Kimberly A.; Artstein, Ron; Byrne, Brendan; Hill, Susan G.; Voss, Clare; Traum, David
Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards Proceedings Article
In: Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),, Springer, Los Angeles, CA, 2016.
@inproceedings{marge_assessing_2016,
title = {Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards},
author = {Matthew Marge and Claire Bonial and Kimberly A. Pollard and Ron Artstein and Brendan Byrne and Susan G. Hill and Clare Voss and David Traum},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110460.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {The Wizard-of-Oz (WOz) method is a common experimental technique in virtual agent and human-robot dialogue research for eliciting natural communicative behavior from human partners when full autonomy is not yet possible. For the first phase of our research reported here, wizards play the role of dialogue manager, acting as a robot’s dialogue processing. We describe a novel step within WOz methodology that incorporates two wizards and control sessions: the wizards function much like corpus annotators, being asked to make independent judgments on how the robot should respond when receiving the same verbal commands in separate trials. We show that inter-wizard discussion after the control sessions and the resolution with a reconciled protocol for the follow-on pilot sessions successfully impacts wizard behaviors and significantly aligns their strategies. We conclude that, without control sessions, we would have been unlikely to achieve both the natural diversity of expression that comes with multiple wizards and a better protocol for modeling an automated system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Chandrashekhar, Nithin; Shapiro, Ari; Morency, Louis-Philippe; Scherer, Stefan
Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors Proceedings Article
In: Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference, Springer, Los Angeles, CA, 2016.
@inproceedings{chollet_manipulating_2016,
title = {Manipulating the Perception of Virtual Audiences using Crowdsourced Behaviors},
author = {Mathieu Chollet and Nithin Chandrashekhar and Ari Shapiro and Louis-Philippe Morency and Stefan Scherer},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110162.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the IVA 2016 : Intelligent Virtual Agents Conference},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {Virtual audiences are used for training public speaking and mitigating anxiety related to it. However, research has been scarce on studying how virtual audiences are perceived and which non-verbal behaviors should be used to make such an audience appear in particular states, such as boredom or engagement. Recently, crowdsourcing methods have been proposed for collecting data for building virtual agents' behavior models. In this paper, we use crowdsourcing for creating and evaluating a nonverbal behaviors generation model for virtual audiences. We show that our model successfully expresses relevant audience states (i.e. low to high arousal, negative to positive valence), and that the overall impression exhibited by the virtual audience can be controlled my manipulating the amount of individual audience members that display a congruent state.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Reger, Greg M.; Koenen-Woods, Patricia; Zetocha, Kimberlee; Smolenski, Derek J.; Holloway, Kevin M.; Rothbaum, Barbara O.; Difede, JoAnn; Rizzo, Albert A.; Edwards-Stewart, Amanda; Skopp, Nancy A.; Mishkind, Matthew; Reger, Mark A.; Gahm, Gregory A.
In: Journal of Consulting and Clinical Psychology, 2016, ISSN: 1939-2117, 0022-006X.
@article{reger_randomized_2016,
title = {Randomized Controlled Trial of Prolonged Exposure Using Imaginal Exposure vs. Virtual Reality Exposure in Active Duty Soldiers With Deployment-Related Posttraumatic Stress Disorder (PTSD).},
author = {Greg M. Reger and Patricia Koenen-Woods and Kimberlee Zetocha and Derek J. Smolenski and Kevin M. Holloway and Barbara O. Rothbaum and JoAnn Difede and Albert A. Rizzo and Amanda Edwards-Stewart and Nancy A. Skopp and Matthew Mishkind and Mark A. Reger and Gregory A. Gahm},
url = {https://www.researchgate.net/profile/Amanda_Edwards-Stewart/publication/307950241_Randomized_Controlled_Trial_of_Prolonged_Exposure_Using_Imaginal_Exposure_vs_Virtual_Reality_Exposure_in_Active_Duty_Soldiers_With_Deployment-Related_Posttraumatic_Stress_Disorder_PTSD/links/57d6f13f08ae601b39ac25d9.pdf},
doi = {10.1037/ccp0000134},
issn = {1939-2117, 0022-006X},
year = {2016},
date = {2016-09-01},
journal = {Journal of Consulting and Clinical Psychology},
abstract = {Prolonged exposure (PE) is an evidence-based psychotherapy for posttraumatic stress disorder (PTSD) but there is limited research with active-duty military populations. Virtual reality exposure (VRE) has shown promise but randomized trials are needed to evaluate efficacy relative to existing standards of care. This study evaluated the efficacy of VRE and PE for active duty soldiers with PTSD from deployments to Iraq and Afghanistan. Active-duty soldiers ( = 162) were randomized to 10-sessions of PE, VRE, or a minimal attention waitlist (WL). Blinded assessors evaluated symptoms at baseline, halfway through treatment, at posttreatment, and at 3- and 6-month follow-ups using the Clinician Administered PTSD Scale (CAPS). Intent-to-treat analyses found that both PE and VRE resulted in significant reductions in PTSD symptoms relative to those in the WL. The majority of patients demonstrated reliable change in PTSD symptoms. There was no difference between PE and VRE regarding treatment drop out before completing 10 sessions (44 and 41% for VRE and PE, respectively). Contrary to hypotheses, analyses at posttreatment did not show that VRE was superior to PE. Post hoc analyses found that PE resulted in significantly greater symptom reductions than VRE at 3- and 6-month follow-up. Both treatments significantly reduced self-reported stigma. PE is an efficacious treatment for active-duty Army soldiers with PTSD from deployments to Iraq or Afghanistan. Results extend previous evidence supporting the efficacy of PE to active-duty military personnel and raise important questions for future research on VRE},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hiraoka, Takuya; Georgila, Kallirroi; Nouri, Elnaz; Traum, David; Nakamura, Satoshi
Reinforcement Learning of Multi-Party Trading Dialog Policies Journal Article
In: Transactions of the Japanese Society for Artificial Intelligence, vol. 31, 2016, ISSN: 1346-8030.
@article{hiraoka_reinforcement_2016,
title = {Reinforcement Learning of Multi-Party Trading Dialog Policies},
author = {Takuya Hiraoka and Kallirroi Georgila and Elnaz Nouri and David Traum and Satoshi Nakamura},
url = {https://www.jstage.jst.go.jp/article/tjsai/31/4/31_B-FC1/_pdf},
issn = {1346-8030},
year = {2016},
date = {2016-09-01},
journal = {Transactions of the Japanese Society for Artificial Intelligence},
volume = {31},
abstract = {Trading dialogs are a kind of negotiation in which an exchange of ownership of items is discussed, and these kinds of dialogs are pervasive in many situations. Recently, there has been an increasing amount of research on applying reinforcement learning (RL) to negotiation dialog domains. However, in previous research, the focus was on negotiation dialog between two participants only, ignoring cases where negotiation takes place between more than two interlocutors. In this paper, as a first study on multi-party negotiation, we apply RL to a multi-party trading scenario where the dialog system (learner) trades with one, two, or three other agents. We experiment with different RL algorithms and reward functions. We use Q-learning with linear function approximation, least-squares policy iteration, and neural fitted Q iteration. In addition, to make the learning process more efficient, we introduce an incremental reward function. The negotiation strategy of the learner is learned through simulated dialog with trader simulators. In our experiments, we evaluate how the performance of the learner varies depending on the RL algorithm used and the number of traders. Furthermore, we compare the learned dialog policies with two strong hand-crafted baseline dialog policies. Our results show that (1) even in simple multi-party trading dialog tasks, learning an effective negotiation policy is not a straightforward task and requires a lot of experimentation; and (2) the use of neural fitted Q iteration combined with an incremental reward function produces negotiation policies as effective or even better than the policies of the two strong hand-crafted baselines.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bresnahan, T.; Rizzo, A.; Burke, S. L.; Partin, M.; Ahlness, R. M.; Trimmer, M.
Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities Proceedings Article
In: Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology, pp. 49–56, ICDVRAT and the University of Reading, Los Angeles, CA, 2016, ISBN: 978-0-7049-1547-3.
@inproceedings{bresnahan_using_2016,
title = {Using Virtual Interactive Training Agents (VITA) with Adults with Autism and other Developmental Disabilities},
author = {T. Bresnahan and A. Rizzo and S. L. Burke and M. Partin and R. M. Ahlness and M. Trimmer},
url = {http://www.icdvrat.org/2016/papers/ICDVRAT2016_S02N2_Bresnahan_etal.pdf},
isbn = {978-0-7049-1547-3},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the 2016 International Conference on Disability, Virtual Reality, and Associated Technology},
pages = {49–56},
publisher = {ICDVRAT and the University of Reading},
address = {Los Angeles, CA},
abstract = {Conversational Virtual Human (VH) agents are increasingly being used to support role-play experiential learning across a range of use-cases and populations. This project examined whether use of the Virtual Interactive Training Agent (VITA) system would improve job interviewing skills in a sample of persons with autism or other developmental disability. The study examined performance differences between baseline and final interviews in face-to-face and virtual reality conditions, and whether statistically significant increases were demonstrated between interviewing conditions. Paired samples t-tests were utilized to examine mean changes in performance by interview stage and in the overall difference between baseline and final interview stages. The preliminary results indicated that VITA is a positive factor when preparing young adults with autism or other developmental disability for employment interviews. Statistically significant results were demonstrated across all pilot conditions and in all but one post-assessment condition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mizukami, Masahiro; Yoshino, Koichiro; Neubig, Graham; Traum, David; Nakamura, Satoshi
Analyzing the Effect of Entrainment on Dialogue Acts Proceedings Article
In: Proceedings of the SIGDIAL 2016 Conference, pp. 310–318, Association for Computational Linguistics, Los Angeles, CA, 2016.
@inproceedings{mizukami_analyzing_2016,
title = {Analyzing the Effect of Entrainment on Dialogue Acts},
author = {Masahiro Mizukami and Koichiro Yoshino and Graham Neubig and David Traum and Satoshi Nakamura},
url = {http://www.sigdial.org/workshops/conference17/proceedings/pdf/SIGDIAL40.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of the SIGDIAL 2016 Conference},
pages = {310–318},
publisher = {Association for Computational Linguistics},
address = {Los Angeles, CA},
abstract = {Entrainment is a factor in dialogue that affects not only human-human but also human-machine interaction. While entrainment on the lexical level is well documented, less is known about how entrainment affects dialogue on a more abstract, structural level. In this paper, we investigate the effect of entrainment on dialogue acts and on lexical choice given dialogue acts, as well as how entrainment changes during a dialogue. We also define a novel measure of entrainment to measure these various types of entrainment. These results may serve as guidelines for dialogue systems that would like to entrain with users in a similar manner.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krämer, Nicole C.; Karacora, Bilge; Lucas, Gale; Dehghani, Morteza; Rüther, Gina; Gratch, Jonathan
In: Computers & Education, vol. 99, pp. 1–13, 2016, ISSN: 03601315.
@article{kramer_closing_2016,
title = {Closing the gender gap in STEM with friendly male instructors? On the effects of rapport behavior and gender of a virtual agent in an instructional interaction},
author = {Nicole C. Krämer and Bilge Karacora and Gale Lucas and Morteza Dehghani and Gina Rüther and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0360131516300835},
doi = {10.1016/j.compedu.2016.04.002},
issn = {03601315},
year = {2016},
date = {2016-08-01},
journal = {Computers & Education},
volume = {99},
pages = {1–13},
abstract = {While numerous research endeavors address the effects of pedagogical agents, the role of the agent´s gender and its rapport behavior has been neglected. We hypothesize that a minimal amount of behavioral realism induced by display of rapport is necessary for any social effects to occur in human-computer interaction. Further, in line with results from STEM research on female role models, we assume that especially for female learners a same sex agent will be beneficial. In a 2(student gender)x2(agent gender)x2(rapport behavior yes/no) between subjects design, we investigate whether virtual agents can help enhance participants’ performance, effort and motivation in mathematics. Female and male participants (N = 128) interacted with a male or female virtual agent that either displayed rapport or no rapport. Our results confirm the expected main effect of rapport. However, against expectations, our results do not support the assumption that a same sex agent is beneficial for female learners. Participants’ performance and effort were significantly enhanced when interacting with an agent of opposite gender that displayed rapport. Our results have implications on designing agents for education and training purposes.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Freed, Michael C.; Novak, Laura A.; Killgore, William D. S.; Rauch, Sheila A. M.; Koehlmoos, Tracey P.; Ginsberg, J. P.; Krupnick, Janice L.; Rizzo, Albert "Skip"; Andrews, Anne; Engel, Charles C.
IRB and Research Regulatory Delays Within the Military Health System: Do They Really Matter? And If So, Why and for Whom? Journal Article
In: The American Journal of Bioethics, vol. 16, no. 8, pp. 30–37, 2016, ISSN: 1526-5161, 1536-0075.
@article{freed_irb_2016,
title = {IRB and Research Regulatory Delays Within the Military Health System: Do They Really Matter? And If So, Why and for Whom?},
author = {Michael C. Freed and Laura A. Novak and William D. S. Killgore and Sheila A. M. Rauch and Tracey P. Koehlmoos and J. P. Ginsberg and Janice L. Krupnick and Albert "Skip" Rizzo and Anne Andrews and Charles C. Engel},
url = {http://www.tandfonline.com/doi/full/10.1080/15265161.2016.1187212},
doi = {10.1080/15265161.2016.1187212},
issn = {1526-5161, 1536-0075},
year = {2016},
date = {2016-08-01},
journal = {The American Journal of Bioethics},
volume = {16},
number = {8},
pages = {30–37},
abstract = {Institutional review board (IRB) delays may hinder the successful completion of federally funded research in the U.S. military. When this happens, time-sensitive, mission-relevant questions go unanswered. Research participants face unnecessary burdens and risks if delays squeeze recruitment timelines, resulting in inadequate sample sizes for definitive analyses. More broadly, military members are exposed to untested or undertested interventions, implemented by well-intentioned leaders who bypass the research process altogether. To illustrate, we offer two case examples. We posit that IRB delays often appear in the service of managing institutional risk, rather than protecting research participants. Regulators may see more risk associated with moving quickly than risk related to delay, choosing to err on the side of bureaucracy. The authors of this article, all of whom are military-funded researchers, government stakeholders, and/or human subject protection experts, offer feasible recommendations to improve the IRB system and, ultimately, research within military, veteran, and civilian populations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Washburn, Micki; Bordnick, Patrick; Rizzo, Albert “Skip”
A pilot feasibility study of virtual patient simulation to enhance social work students’ brief mental health assessment skills Journal Article
In: Social Work in Health Care, pp. 1–19, 2016, ISSN: 0098-1389, 1541-034X.
@article{washburn_pilot_2016,
title = {A pilot feasibility study of virtual patient simulation to enhance social work students’ brief mental health assessment skills},
author = {Micki Washburn and Patrick Bordnick and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/00981389.2016.1210715},
doi = {10.1080/00981389.2016.1210715},
issn = {0098-1389, 1541-034X},
year = {2016},
date = {2016-08-01},
journal = {Social Work in Health Care},
pages = {1–19},
abstract = {This study presents preliminary feasibility and acceptability data on the use of virtual patient (VP) simulations to develop brief assessment skills within an interdisciplinary care setting. Results support the acceptability of technology-enhanced simulations and offer preliminary evidence for an association between engagement in VP practice simulations and improvements in diagnostic accuracy and clinical interviewing skills. Recommendations and next steps for research on technologyenhanced simulations within social work are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2005
Kim, Youngjun; Velson, Martin; Hill, Randall W.
Modeling Dynamic Perceptual Attention in Complex Virtual Environments Proceedings Article
In: Conference on Behavior Representation in Modeling and Simulation (BRIMS), Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_modeling_2005,
title = {Modeling Dynamic Perceptual Attention in Complex Virtual Environments},
author = {Youngjun Kim and Martin Velson and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Modeling%20Dynamic%20Perceptual%20Attention%20in%20Complex%20Virtual%20Environments.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to entities and areas in a virtual environment in a manner that appears believable and serves a functional purpose. In this paper, we describe a perceptual attention model that integrates perceptual attention that mediates top-down and bottom-up attention processes of virtual humans within complex virtual environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hobbs, Jerry R.; Gordon, Andrew S.
Encoding Knowledge of Commonsense Psychology Proceedings Article
In: 7th International Symposium on Logical Formalizations of Commonsense Reasoning, Corfu, Greece, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hobbs_encoding_2005,
title = {Encoding Knowledge of Commonsense Psychology},
author = {Jerry R. Hobbs and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Encoding%20Knowledge%20of%20Commonsense%20Psychology.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {7th International Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Corfu, Greece},
abstract = {Introduction: In previous papers (Gordon and Hobbs, 2003, 2004) we have described a methodology for determining what knowledge should be included in the knowledge base for an intelligent agent, capable of constructing and executing plans to achieve its goals. An intelligent agent is at least a planning mechanism, so Gordon (2004) asked what concepts are necessary for the common strategies that people use in achieving their goals. He investigated ten different domains, including politics, personal relationships, artistic performance, and warfare, and collected 372 strategies. He authored representations of these strategies in order to identify a controlled vocabulary involving of concepts. These concepts were categorized into 48 different representational areas, such as sets, space, and time. Thirty of the representational areas, involving 635 concepts, were concerned with commonsense psychology; among these are memory, knowledge management, planning, and so on. This result by itself demonstrates the very great importance of commonsense psychology in the construction of intelligent agents. Gordon et al. (2003) then, to deï¬ne further each of the representational areas, augmented the list of concepts by investigating the English language expressions for concepts in each area. The result was a list of 528 concepts, a set that identiï¬es the target coverage of a formal theory of commonsense psychology. The authors began the development of formal theories that would encompass this list of concepts. In our earlier work (Gordon and Hobbs, 2003), we described the ï¬rst theory we constructed, memory, as an illustration of the method. We have now completed 14 of the 30 theories, and this paper provides an overview of this work as we close in on the halfway mark.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
A Computational Model of Dynamic Perceptual Attention for Virtual Humans Proceedings Article
In: Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation, Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_computational_2005,
title = {A Computational Model of Dynamic Perceptual Attention for Virtual Humans},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Dynamic%20Perceptual%20Attention%20for%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Proceedings of the 14th Conference on Behavior Representation in Modeling and Simulation},
address = {Universal City, CA},
abstract = {An important characteristic of a virtual human is the ability to direct its perceptual attention to objects and locations in a virtual environment in a manner that looks believable and serves a functional purpose. We have developed a computational model of perceptual attention that mediates top-down and bottom-up attention processes of virtual humans in virtual environments. In this paper, we propose a perceptual attention model that will integrate perceptual attention toward objects and locations in the environment with the need to look at other parties in a social context.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gomboc, Dave; Solomon, Steve; Core, Mark; Lane, H. Chad; Lent, Michael
Design Recommendations to Support Automated Explanation and Tutoring Proceedings Article
In: Conference on Behavior Representation in Modeling and Simulation (BRIMS), Universal City, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{gomboc_design_2005,
title = {Design Recommendations to Support Automated Explanation and Tutoring},
author = {Dave Gomboc and Steve Solomon and Mark Core and H. Chad Lane and Michael Lent},
url = {http://ict.usc.edu/pubs/Design%20Recommendations%20to%20Support%20Automated%20Explanation%20and%20Tutoring.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Universal City, CA},
abstract = {The after-action review is an essential component of military training exercises. The use of constructive simulations for training poses a challenge when conducting such reviews, because behavior models are typically designed to simulate satisfactorially, without explicit concern for the interrogation of synthetic entities afterward. Ideally, users could obtain knowledge about not only the choices made by a simulatorEs behavior models, but also the rationale for those choices. This requires a rich representation of behavioral knowledge within the software system. We have integrated our explainable AI system with behavior models and log information from two simulation systems. Selecting examples from these simulators, we identify areas for improvement to facilitate the automation of explanation and tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Donghui; Hovy, Eduard
MRE: A Study on Evolutionary Language Understanding Proceedings Article
In: Second International Workshop on Natural Language Understanding and Cognitive Science (NLUCS), Miami, Florida, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{feng_mre_2005,
title = {MRE: A Study on Evolutionary Language Understanding},
author = {Donghui Feng and Eduard Hovy},
url = {http://ict.usc.edu/pubs/MRE-%20A%20Study%20on%20Evolutionary%20Language%20Understanding.pdf},
year = {2005},
date = {2005-05-01},
booktitle = {Second International Workshop on Natural Language Understanding and Cognitive Science (NLUCS)},
address = {Miami, Florida},
abstract = {The lack of well-annotated data is always one of the biggest problems for most training-based dialogue systems. Without enough training data, it's almost impossible for a trainable system to work. In this paper, we explore the evolutionary language understanding approach to build a natural language understanding machine in a virtual human training project. We build the initial training data with a finite state machine. The language understanding system is trained based on the automated data first and is improved as more and more real data come in, which is proved by the experimental results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nijholt, Anton; Traum, David
The Virtuality Continuum Revisited Proceedings Article
In: CHI 2005 Workshop on the Virtuality Continuum Revisited, Portland, OR, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{nijholt_virtuality_2005,
title = {The Virtuality Continuum Revisited},
author = {Anton Nijholt and David Traum},
url = {http://ict.usc.edu/pubs/The%20Virtuality%20Continuum%20Revisited.pdf},
year = {2005},
date = {2005-04-01},
booktitle = {CHI 2005 Workshop on the Virtuality Continuum Revisited},
address = {Portland, OR},
abstract = {We survey the themes and the aims of a workshop devoted to the state-of-the-art virtuality continuum. In this continuum, ranging from fully virtual to real physical environments, allowing for mixed, augmented and desktop virtual reality, several perspectives can be taken. Originally, the emphasis was on display technologies. Here we take the perspective of the inhabited environment, that is, environments positioned somewhere on this continuum that are inhabited by virtual (embodied) agents, that interact with each other and with their human partners. Hence, we look at it from the multi-party interaction perspective. In this workshop we will investigate the current state of the art, its shortcomings and a future research agenda.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Peterson, Michael J.; Kyriakakis, Chris
Choosing Candidate Locations for Source Localization Proceedings Article
In: International Workshop on Hands Free Communication and Microphone Arrays, Rutgers, NY, 2005.
@inproceedings{peterson_choosing_2005,
title = {Choosing Candidate Locations for Source Localization},
author = {Michael J. Peterson and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/CHOOSING%20CANDIDATE%20LOCATIONS%20FOR%20SOURCE%20LOCALIZATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {International Workshop on Hands Free Communication and Microphone Arrays},
address = {Rutgers, NY},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
Automated Commonsense Reasoning About Human Memory Proceedings Article
In: AAAI Spring Symposium on Metacognitive Computing, Stanford, CA, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_automated_2005,
title = {Automated Commonsense Reasoning About Human Memory},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Automated%20Commonsense%20Reasoning%20About%20Human%20Memory.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {AAAI Spring Symposium on Metacognitive Computing},
address = {Stanford, CA},
abstract = {Metacognitive reasoning in computational systems will be enabled by the development of formal theories that have broad coverage over mental states and processes as well as inferential competency. In this paper we evaluate the inferential competency of an existing formal theory of commonsense human memory by attempting to use it to validate the appropriateness of a commonsense memory strategy. We formulate a particular memory strategy (to create an associated obstacle) as a theorem in first-order predicate calculus. We then attempt to validate this strategy by showing that it is entailed by the axioms of the theory we evaluated. These axioms were encoded into the syntax of an automated reasoning system, which was used to automatically generate inferences and search for formal proofs.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Busso, Carlos; Hernanz, Sergi; Chu, Chi-Wei; Kwon, Soon-il; Lee, Sung; Georgiou, Panayiotis G.; Cohen, Isaac; Narayanan, Shrikanth
Smart Room: Participant and Speaker Localization and Identification Proceedings Article
In: Proceedings of the IEEE Conference on Acoustics, Speech and Signal Processing, Philadelphia, PA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{busso_smart_2005,
title = {Smart Room: Participant and Speaker Localization and Identification},
author = {Carlos Busso and Sergi Hernanz and Chi-Wei Chu and Soon-il Kwon and Sung Lee and Panayiotis G. Georgiou and Isaac Cohen and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SMART%20ROOM-%20PARTICIPANT%20AND%20SPEAKER%20LOCALIZATION%20AND%20IDENTIFICATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {Proceedings of the IEEE Conference on Acoustics, Speech and Signal Processing},
address = {Philadelphia, PA},
abstract = {Our long-term objective is to create Smart Room Technologies that are aware of the users presence and their behavior and can become an active, but not an intrusive, part of the interaction. In this work, we present a multimodal approach for estimating and tracking the location and identity of the participants including the active speaker. Our smart room design contains three user-monitoring systems: four CCD cameras, an omnidirectional camera and a 16 channel microphone array. The various sensory modalities are processed both individually and jointly and it is shown that the multimodal approach results in signiï¬cantly improved performance in spatial localization, identiï¬cation and speech activity detection of the participants.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Treskunov, Anton; Piepol, Diane
Leveraging Hollywood Set Design Techniques to Enhance Ad Hoc Immersive Display Systems Proceedings Article
In: IEEE VR Emerging Displays Workshop, Bonn, Germany, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{pair_leveraging_2005,
title = {Leveraging Hollywood Set Design Techniques to Enhance Ad Hoc Immersive Display Systems},
author = {Jarrell Pair and Anton Treskunov and Diane Piepol},
url = {http://ict.usc.edu/pubs/Leveraging%20Hollywood%20Set%20Design%20Techniques%20to%20Enhance%20Ad%20Hoc%20Immersive%20Display%20Systems.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {IEEE VR Emerging Displays Workshop},
address = {Bonn, Germany},
abstract = {Over the past four years, the FlatWorld project [1] at the University of Southern California Institute for Creative Technologies has exploited ad hoc immersive display techniques to prototype virtual reality education and training applications. While our approach is related to traditional immersive projection systems such as the CAVE [2], our work draws extensively upon techniques widely used in Hollywood sets and theme parks. Our first display system, initially prototyped in 2001, enables wide area virtual environments in which participants can maneuver through simulated rooms, buildings, or streets. In 2004, we expanded our work by experimenting with transparent projection screens. To date, we have used this display technique for presenting life size interactive characters with a pseudo-holographic appearance.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Dagen; Narayanan, Shrikanth
Speech Rate Estimation Via Temporal Correlation and Selected Sub-band Correlation Proceedings Article
In: Proceedings of the International Conference on Acoustics, Speech, and Signal Processing (ICASSP), pp. 413–416, Philadelphia, PA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{wang_speech_2005,
title = {Speech Rate Estimation Via Temporal Correlation and Selected Sub-band Correlation},
author = {Dagen Wang and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SPEECH%20RATE%20ESTIMATION%20VIA%20TEMPORAL%20CORRELATION%20AND%20SELECTED%20SUB-BAND%20CORRELATION.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {Proceedings of the International Conference on Acoustics, Speech, and Signal Processing (ICASSP)},
pages = {413–416},
address = {Philadelphia, PA},
abstract = {In this paper, we propose a novel method for speech rate estimation without requiring automatic speech recognition. It extends the methods of spectral subband correlation by including temporal correlation and the use of selecting prominent spectral subbands for correlation. Further more, to address some of the practical issues in previously published methods, we introduce some novel components into the algorithm such as the use of pitch confidence, magnifying window, relative peak measure and relative threshold. By selecting the parameters and thresholds from realistic development sets, this method achieves a 0.972 correlation coefficient on syllable number estimation and a 0.706 correlation on speech rate estimation. This result is about 6.9% improvement than current best single estimator and 3.5% improvement than current multi-estimator evaluated on the same switchboard database.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Peterson, Michael J.; Kyriakakis, Chris
Hybrid Algorithm for Robust, Real-time Source Localization in the Reverberant Environments Proceedings Article
In: International Conference on Acoustics, Speech and Signal Processing, Philadelphia, PA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{peterson_hybrid_2005,
title = {Hybrid Algorithm for Robust, Real-time Source Localization in the Reverberant Environments},
author = {Michael J. Peterson and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/HYBRID%20ALGORITHM%20FOR%20ROBUST,%20REAL-TIME%20SOURCE%20LOCALIZATION%20IN%20REVERBERANT%20ENVIRONMENTS.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {International Conference on Acoustics, Speech and Signal Processing},
address = {Philadelphia, PA},
abstract = {The location of an acoustical source can be found robustly using the Steered Response Pattern - Phase Transform (SRP-PHAT) algorithm. However SRP-PHAT can be computationally expensive, requiring a search of a large number of candidate locations. The required spacing between these locations is dependent on sampling rate, microphone array geometry, and source location. In this work, a novel method will be presented that calculates a smaller number of test points using an efï¬cient closed-form localization algorithm. This method signiï¬cantly reduces the number of calculations, while still remaining robust in acoustical environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, C. M.; Narayanan, Shrikanth
Toward Detecting Emotions in Spoken Dialogs Proceedings Article
In: IEEE Transactions on Speech and Audio Processing, pp. 293–303, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{lee_toward_2005,
title = {Toward Detecting Emotions in Spoken Dialogs},
author = {C. M. Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Toward%20Detecting%20Emotions%20in%20Spoken%20Dialogs.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {IEEE Transactions on Speech and Audio Processing},
volume = {12},
pages = {293–303},
abstract = {The importance of automatically recognizing emotions from human speech has grown with the increasing role of spoken language interfaces in human-computer interaction applications. This paper explores the detection of domain-specific emotions using language and discourse information in conjunction with acoustic correlates of emotion in speech signals. The specific focus is on a case study of detecting negative and non-negative emotions using spoken language data obtained from a call center application. Most previous studies in emotion recognition have used only the acoustic information contained in speech. In this paper, a combination of three sources of information-acoustic, lexical, and discourse-is used for emotion recognition. To capture emotion information at the language level, an information-theoretic notion of emotional salience is introduced. Optimization of the acoustic correlates of emotion with respect to classification error was accomplished by investigating different feature sets obtained from feature selection, followed by principal component analysis. Experimental results on our call center data show that the best results are obtained when acoustic and language information are combined. Results show that combining all the information, rather than using only acoustic information, improves emotion classification by 40.7% for males and 36.4% for females (linear discriminant classifier used for acoustic information).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ananthakrishnan, S.; Narayanan, Shrikanth
An Automatic Prosody Recognizer Using a Coupled Multi-stream Acoustic Model and a Syntactic-Prosodic Language Model Proceedings Article
In: Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing, Philadelphia, PA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{ananthakrishnan_automatic_2005,
title = {An Automatic Prosody Recognizer Using a Coupled Multi-stream Acoustic Model and a Syntactic-Prosodic Language Model},
author = {S. Ananthakrishnan and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/AN%20AUTOMATIC%20PROSODY%20RECOGNIZER%20USING%20A%20COUPLED%20MULTI-STREAM%20ACOUSTIC%20MODEL%20AND%20A%20SYNTACTIC-PROSODIC%20LANGUAGE%20MODEL.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing},
address = {Philadelphia, PA},
abstract = {Automatic detection and labeling of prosodic events in speech has received much attention from speech technologists and linguists ever since the introduction of annotation standards such as ToBI. Since prosody is intricately bound to the semantics of the utterance, recognition of prosodic events is important for spoken language applications such as automatic understanding and translation of speech. Moreover, corpora labeled with prosodic markers are essential for building speech synthesizers that use data-driven approaches to generate natural speech. In this paper, we build a prosody recognition system that detects stress and prosodic boundaries at the word and syllable level in American English using a coupled Hidden Markov Model (CHMM) to model multiple, asynchronous acoustic feature streams and a syntactic-prosodic model that captures the relationship between the syntax of the utterance and its prosodic structure. Experiments show that the recognizer achieves about 75% agreement on stress labeling and 88% agreement on boundary labeling at the syllable level.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hobbs, Jerry R.; Gordon, Andrew S.
Toward a Large-scale Formal Theory of Commonsense Psychology for Metacognition Proceedings Article
In: American Association of Artificial Intelligence Spring Symposium on Metacognitive Computing, Stanford, CA, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hobbs_toward_2005,
title = {Toward a Large-scale Formal Theory of Commonsense Psychology for Metacognition},
author = {Jerry R. Hobbs and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Toward%20a%20Large-scale%20Formal%20Theory%20of%20Commonsense%20Psychology%20for%20Metacognition.pdf},
year = {2005},
date = {2005-03-01},
booktitle = {American Association of Artificial Intelligence Spring Symposium on Metacognitive Computing},
address = {Stanford, CA},
abstract = {Robust intelligent systems will require a capacity for metacognitive reasoning, where intelligent systems monitor and reflect on their own reasoning processes. A large-scale study of human strategic reasoning indicates that rich representational models of commonsense psychology are available to enable human metacognition. In this paper, we argue that large-scale formalizations of commonsense psychology enable metacognitive reasoning in intelligent systems. We describe our progress toward developing 30 integrated axiomatic theories of commonsense psychology, and discuss the central representational challenges that have arisen in this work to date.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Grote, Christopher L.; Parsons, Thomas D.
Threats to the Livelihood of the Forensic Neuropsychological Practice: Avoiding Ethical Misconduct Journal Article
In: Journal of Forensic Neuropsychology, vol. 4, no. 3, pp. 79–93, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@article{grote_threats_2005,
title = {Threats to the Livelihood of the Forensic Neuropsychological Practice: Avoiding Ethical Misconduct},
author = {Christopher L. Grote and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/Threats%20to%20the%20Livelihood%20of%20the%20Forensic%20Neuropsychological%20Practice-%20Avoiding%20Ethical%20Misconduct.pdf},
doi = {10.1300/J151v04n03_06},
year = {2005},
date = {2005-01-01},
journal = {Journal of Forensic Neuropsychology},
volume = {4},
number = {3},
pages = {79–93},
abstract = {We review six diverse issues that have the potential of devaluing our profession, in that ethical missteps could lead to the perception or reality that the work of forensic neuropsychologists is "for sale." By resisting temptations or overtures to engage in inappropriate conduct, such as attacking colleagues or failing to recognize how our own biases might influence our behavior or opinions, neuropsychologists strive to create a work product that enhances the reputation of our profession and makes a positive contribution to the public-at-large.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy and its linguistic manifestations Proceedings Article
In: Proceedings of Conference on Formal and Informal Negotiation (FINEXIN), Ottowa, Canada, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_rejection_2005,
title = {Rejection of empathy and its linguistic manifestations},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20and%20its%20linguistic%20manifestations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of Conference on Formal and Informal Negotiation (FINEXIN)},
address = {Ottowa, Canada},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Rizzo, Albert; Zaag, Cheryl; McGee, Jocelyn; Buckwalter, John Galen
Gender Differences and Cognition Among Older Adults Journal Article
In: Aging, Neuropsychology, and Cognition, vol. 12, pp. 78–88, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_gender_2005,
title = {Gender Differences and Cognition Among Older Adults},
author = {Thomas D. Parsons and Albert Rizzo and Cheryl Zaag and Jocelyn McGee and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Gender%20Differences%20and%20Cognition%20Among%20Older%20Adults.pdf},
year = {2005},
date = {2005-01-01},
journal = {Aging, Neuropsychology, and Cognition},
volume = {12},
pages = {78–88},
abstract = {The more replicated findings about gender difference in cognitive performance suggest female superiority on visuomotor speed and language ability and male superiority on mechanical and visuospatial tasks. Generally, group strengths found in the early school years become more established at adolescence and remain stable through adulthood. The current study tested whether the patterns established in the early years remained among 30 adult subjects. We also utilized a series of exploratory analyses to determine if observed gender differences were impacted by the covariance present between all cognitive tests. Results suggest that although the patterns established in the early years remain stable through time for males, the established patterns for females are altered with age. Our findings are compelling in supporting a male advantage on visuospatial tasks among older adults. These findings are discussed in terms of common variance between test instruments as a possible source of difference. Our finding that the gender effect tended to increase when common variance was controlled argues that this methodology may enhance the ability to detect domain specific effects.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Mielke, Jens; Appiah, Kuku; Hall, Colin D.; Price, Richard W.; Kumwenda, Johnstone; Kanyama, Cecelia; Amod, Farida; Marra, Christina; Taylor, Terrie; Lalloo, Umesh; Jelsma, Jennifer; Holding, Penny; Boivin, Michael; Birbeck, Gretchen; Nakasujja, Noeline; Sanne, Ian; Parsons, Thomas D.; Parente, Amanda; Tucker, Karen A.
Assessment of neuroAIDS in Africa Journal Article
In: Journal of NeuroVirology, vol. 11, no. S1, pp. 7–16, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_assessment_2005,
title = {Assessment of neuroAIDS in Africa},
author = {R. Kevin Robertson and Jens Mielke and Kuku Appiah and Colin D. Hall and Richard W. Price and Johnstone Kumwenda and Cecelia Kanyama and Farida Amod and Christina Marra and Terrie Taylor and Umesh Lalloo and Jennifer Jelsma and Penny Holding and Michael Boivin and Gretchen Birbeck and Noeline Nakasujja and Ian Sanne and Thomas D. Parsons and Amanda Parente and Karen A. Tucker},
url = {http://ict.usc.edu/pubs/Assessment%20of%20neuroAIDS%20in%20Africa.pdf},
year = {2005},
date = {2005-01-01},
journal = {Journal of NeuroVirology},
volume = {11},
number = {S1},
pages = {7–16},
abstract = {In June of 2004, the Center for AIDS Mental Health Research, National Institute of Mental Health sponsored a conference on the Assessment of NeuroAIDS in Africa, which was held in Blantrye, Malawai. The conference presentations summarized here highlight the need for research on NeuroAIDS in Africa and methods for assessing HIV-related neurological diseases (Robertson, 2004).},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Lessons from Emotion Psychology for the Design of Lifelike Characters Journal Article
In: Applied Artificial Intelligence Journal, vol. 19, pp. 215–233, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_lessons_2005,
title = {Lessons from Emotion Psychology for the Design of Lifelike Characters},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Lessons%20from%20Emotion%20Psychology%20for%20the%20Design%20of%20Lifelike%20Characters.pdf},
year = {2005},
date = {2005-01-01},
journal = {Applied Artificial Intelligence Journal},
volume = {19},
pages = {215–233},
abstract = {This special issue describes a number of applications that utilize lifelike characters that teach indirectly, by playing some role in a social interaction with a user. The design of such systems reflects a compromise between competing, sometimes unarticulated de- mands: they must realistically exhibit the behaviors and characteristics of their role, they must facilitate the desired learning, and they must work within the limitations of current technology, and there is little theoretical or empirical guidance on the impact of these compromises on learning. Our perspective on this problem is shaped by our interest in the role of emotion and emotional behaviors in such forms of learning. In recent years, there has been an explosion of interest in the role of emotion in the design of virtual hu- mans. The techniques and motivations underlying these various efforts can seem, from an outsider's perspective, as bewildering and multifaceted as the concept of emotion itself is generally accused of being. Drawing on insights from emotion psychology, this article attempts to clarify for the designers of educational agents the various theoretical perspec- tives on the concept of emotion with the aim of giving guidance to designers of educa- tional agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
A Dual Light Stage Proceedings Article
In: Dutré, Philip; Bala, Kavita (Ed.): Eurographics Symposium on Rendering, Konstanz, Germany, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_dual_2005,
title = {A Dual Light Stage},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
editor = {Philip Dutré and Kavita Bala},
url = {http://ict.usc.edu/pubs/A%20Dual%20Light%20Stage.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Konstanz, Germany},
abstract = {We present a technique for capturing high-resolution 4D reflectance ï¬elds using the reciprocity property of light transport. In our technique we place the object inside a diffuse spherical shell and scan a laser across its surface. For each incident ray, the object scatters a pattern of light onto the inner surface of the sphere, and we photograph the resulting radiance from the sphere's interior using a camera with a ï¬sheye lens. Because of reciprocity, the image of the inside of the sphere corresponds to the reflectance function of the surface point illuminated by the laser, that is, the color that point would appear to a camera along the laser ray when the object is lit from each direction on the surface of the sphere. The measured reflectance functions allow the object to be photorealistically rendered from the laser's viewpoint under arbitrary directional illumination conditions. Since each captured reflectance function is a high-resolution image, our data reproduces sharp specular reflections and self-shadowing more accurately than previous approaches. We demonstrate our technique by scanning objects with a wide range of reflectance properties and show accurate renderings of the objects under novel illumination conditions.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallmann, Marcelo
Scalable Solutions for Interactive Virtual Humans that can Manipulate Objects Proceedings Article
In: First Annual Artificial Intelligence and Interactive Entertainment Conference, Marina del Rey, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{kallmann_scalable_2005,
title = {Scalable Solutions for Interactive Virtual Humans that can Manipulate Objects},
author = {Marcelo Kallmann},
url = {http://ict.usc.edu/pubs/Scalable%20Solutions%20for%20Interactive%20Virtual%20Humans%20that%20can%20Manipulate%20Objects.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {First Annual Artificial Intelligence and Interactive Entertainment Conference},
address = {Marina del Rey, CA},
abstract = {This paper presents scalable solutions for achieving virtual humans able to manipulate objects in interactive virtual environments. The scalability trades computational time with the ability of addressing increasingly difficult constraints. In time-critical environments, arm motions are computed in few milliseconds using fast analytical Inverse Kinematics. For other types of applications where collision-free motions are required, a randomized motion planner capable of generating motions of average complexity in about a second of computation time is employed. The steps required for defining and computing different types of manipulations are described in this paper.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kock, Arien; Gratch, Jonathan
An Evaluation of Automatic Lip-syncing Methods for Game Environments Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{kock_evaluation_2005,
title = {An Evaluation of Automatic Lip-syncing Methods for Game Environments},
author = {Arien Kock and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR.01.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 01 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Lip-synching is the production of articulator motion corresponding to a given audible utterance. The Mission Rehearsal Exercise training system requires lip-synching to increase the believability of its virtual agents. In this report I document the selection, exploration, evaluation and comparison of several candidate lip-synching systems, ending with a recommendation. The evaluation focuses on the believability of articulators' expression, the foreseeable difficulty of integration into MRE’s architecture, the support for facial expressions related to semantics and prosodic features as well as the scalability of each system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Pighin, Frédéric; Patel, Sanjit; Cohen, Jonathan; Chu, Anson
Oriented Particle Level Set for Fluid Simulation Book
2005.
Abstract | Links | BibTeX | Tags:
@book{pighin_oriented_2005,
title = {Oriented Particle Level Set for Fluid Simulation},
author = {Frédéric Pighin and Sanjit Patel and Jonathan Cohen and Anson Chu},
url = {http://ict.usc.edu/pubs/Oriented%20Particle%20Level%20Set%20for%20Fluid%20Simulation.pdf},
year = {2005},
date = {2005-01-01},
abstract = {The particle level set technique has been adopted in computer graphics as the method of choice for tracking the surface of simulated liquids. In this poster, we describe a novel technique for modeling such an interface. Our technique is based on a set of oriented particles that provides a piecewise linear approximation to the interface. Using this improved model, we obtain a more accurate representation of the water surface and reduced mass loss during simulation.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Proceedings Article
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Fictionalization of Lessons Learned Journal Article
In: IEEE Multimedia, vol. 12, no. 4, pp. 12–14, 2005.
Links | BibTeX | Tags: The Narrative Group
@article{gordon_fictionalization_2005,
title = {The Fictionalization of Lessons Learned},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Fictionalization%20of%20Lessons%20Learned.pdf},
year = {2005},
date = {2005-01-01},
journal = {IEEE Multimedia},
volume = {12},
number = {4},
pages = {12–14},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Responsive Behavior of a Listening Agent Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2005, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{maatman_responsive_2005,
title = {Responsive Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 02 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The purpose of this assignment is twofold. First the possibility of generating real time responsive behavior is evaluated in order to create a more human-like agent. Second, the effect of the behavior of the agent on the human interactor is evaluated. The main motivation for the focus on responsive gestures is because much research has been done already on gestures that accompany the speaker, and nothing on gesture that accompany the listener, although responsiveness is a crucial part of a conversation. The responsive behavior of a virtual agent consists of performing gestures during the time a human is speaking to the agent. To generate the correct gestures, first a literature research is carried out, from which is concluded that with the current of the current Natural Language Understanding technology, it is not possible to extract semantic features of the human speech in real time. Thus, other features have to be considered. The result of the literature research is a basic mapping between real time obtainable features and their correct responsive behavior: - if the speech contains a relatively long period of low pitch then perform a head nod. - if the speech contains relatively high intensity then perform a head nod - if the speech contains disfluency then perform a posture shift, gazing behavior or a frown - if the human performs a posture shift then mirror this posture shift - if the human performs a head shake then mirror this head shake - if the human performs major gazing behavior then mimic this behavior A design has been made to implement this mapping into the behavior of a virtual agent and this design has been implemented which results in two programs. One to mirror the physical features of the human and one to extract the speech features from the voice of the human. The two programs are combined and the effect of the resulting behavior on the human interactor has been tested. The results of these tests are that the performing of responsive behavior has a positive effect on the natural behavior of a virtual agent and thus looks promising for future research. However, the gestures proposed by this mapping are not always context-independent. Thus, much refinement is still to be done and more functionality can be added to improve the responsive behavior. The conclusion of this research is twofold. First the performing of responsive behaviors in real time is possible with the presented mapping and this results in a more natural behaving agent. Second, some responsive behavior is still dependant of semantic information. This leaves open the further enhancement of the presented mapping in order to increase the responsive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Alpaslan, Z. Y.; Yeh, S. -C.; Rizzo, Albert; Sawchuk, Alexander A.
Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays Proceedings Article
In: Stereoscopic Displays and Virtual Reality Systems XII Symposium, San Jose, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{alpaslan_quantitative_2005,
title = {Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays},
author = {Z. Y. Alpaslan and S. -C. Yeh and Albert Rizzo and Alexander A. Sawchuk},
url = {http://ict.usc.edu/pubs/Quantitative%20Comparison%20of%20Interaction%20with%20Shutter%20Glasses%20and%20Autostereoscopic%20Displays.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Stereoscopic Displays and Virtual Reality Systems XII Symposium},
address = {San Jose, CA},
abstract = {In this paper we describe experimental measurements and comparison of human interaction with three different types of stereo computer displays. We compare traditional shutter glasses-based viewing with three-dimensional (3D) autostereoscopic viewing on displays such as the Sharp LL-151-3D display and StereoGraphics SG 202 display. The method of interaction is a sphere-shaped "cyberprop" containing an Ascension Flock-of-Birds tracker that allows a user to manipulate objects by imparting the motion of the sphere to the virtual object. The tracking data is processed with OpenGL to manipulate objects in virtual 3D space, from which we synthesize two or more images as seen by virtual cameras observing them. We concentrate on the quantitative measurement and analysis of human performance for interactive object selection and manipulation tasks using standardized and scalable configurations of 3D block objects. The experiments use a series of progressively more complex block configurations that are rendered in stereo on various 3D displays. In general, performing the tasks using shutter glasses required less time as compared to using the autostereoscopic displays. While both male and female subjects performed almost equally fast with shutter glasses, male subjects performed better with the LL-151-3D display, while female subjects performed better with the SG202 display. Interestingly, users generally had a slightly higher efficiency in completing a task set using the two autostereoscopic displays as compared to the shutter glasses, although the differences for all users among the displays was relatively small. There was a preference for shutter glasses compared to autostereoscopic displays in the ease of performing tasks, and glasses were slightly preferred for overall image quality and stereo image quality. However, there was little difference in display preference in physical comfort and overall preference. We present some possible explanations of these results and point out the importance of the autostereoscopic "sweet spot" in relation to the user's head and body position.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Proceedings Article
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25–36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; McNerney, Peter J.; Eastlund, Ernie; Manson, Brian; Gratch, Jonathan; Hill, Randall W.; Swartout, William
Development of a VR Therapy Application for Iraq War Military Personnel with PTSD Book Section
In: Studies in Health Technology and Informatics, vol. 111, no. 13, pp. 407+413, 13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{rizzo_development_2005-1,
title = {Development of a VR Therapy Application for Iraq War Military Personnel with PTSD},
author = {Albert Rizzo and Jarrell Pair and Peter J. McNerney and Ernie Eastlund and Brian Manson and Jonathan Gratch and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20VR%20Therapy%20Application%20for%20Iraq%20War%20Veterans%20with%20PTSD.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Studies in Health Technology and Informatics},
volume = {111},
number = {13},
pages = {407+413},
address = {13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA},
series = {Medicine Meets Virtual Reality},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 returning Iraq War military personnel are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure therapy has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of an Iraq War PTSD VR application that is being created from the virtual assets that were initially developed for theX-Box game entitled Full Spectrum Warrior which was inspired by a combat tactical training simulation, Full Spectrum Command.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert; Morie, Jacquelyn; Williams, Josh; Pair, Jarrell; Buckwalter, John Galen
Human Emotional State and its Relevance for Military VR Training Proceedings Article
In: Proceedings of the 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans, Virtual Worlds
@inproceedings{rizzo_human_2005,
title = {Human Emotional State and its Relevance for Military VR Training},
author = {Albert Rizzo and Jacquelyn Morie and Josh Williams and Jarrell Pair and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Human%20Emotional%20State%20and%20its%20Relevance%20for%20Military%20VR%20Training.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Combat environments by their nature can produce a dramatic range of emotional responses in military personnel. When immersed in the emotional "fog of war," the potential exists for optimal human decision-making and performance of goal-directed activities to be seriously compromised. This may be especially true when combat training is conducted under conditions that lack emotional engagement by the soldier. Real world military training often naturally includes stress induction that aims to promote a similarity of internal emotional stimulus cues with what is expected to be present on the battlefield. This approach to facilitating optimal training effectiveness is supported by a long history of learning theory research. Current Virtual Reality military training approaches are noteworthy in their emphasis on creating hi-fidelity graphic and audio realism with the aim to foster better transfer of training. However, less emphasis is typically placed on the creation of emotionally evocative virtual training scenarios that can induce emotional stress in a manner similar to what is typically experienced under real world training conditions. As well, emotional issues in the post-combat aftermath need to be addressed, as can be seen in the devastating emotional difficulties that occur in some military personnel following combat. This is evidenced by the number of recent medical reports that suggest the incidence of "Vietnam-levels" of combat-related Post Traumatic Stress Disorder symptomatology in returning military personnel from the Iraq conflict. In view of these issues, the USC Institute for Creative Technologies (ICT) has initiated a research program to study emotional issues that are relevant to VR military applications. This paper will present the rationale and status of two ongoing VR research programs at the ICT that address sharply contrasting ends of the emotional spectrum relevant to the military: 1. The Sensory Environments Evaluation (SEE) Project is examining basic factors that underlie emotion as it occurs within VR training environments and how this could impact transfer of training, and 2. The Full Spectrum Warrior (FSW) Post Traumatic Stress Disorder Project which is currently in the process of converting the existing FSW combat tactical simulation training scenario (and X-Box game) into a VR treatment system for the conduct of graduated exposure therapy in Iraq war military personnel with Post Traumatic Stress Disorder.},
keywords = {MedVR, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
Acquisition of Time-Varying Participating Media Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_acquisition_2005,
title = {Acquisition of Time-Varying Participating Media},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquisition%20of%20Time-Varying%20Participating%20Media.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {We present a technique for capturing time-varying volumetric data of participating media. A laser sheet is swept repeatedly through the volume, and the scattered light is imaged using a high-speed camera. Each sweep of the laser provides a near-simultaneous volume of density values. We demonstrate rendered animations under changing viewpoint and illumination, making use of measured values for the scattering phase function and albedo.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Klimchuck, Dean; Mitura, Roman; Bowerly, Todd; Buckwalter, John Galen; Kerns, Kim; Randall, Karherine; Adams, Rebecca; Finn, Paul; Tarnanas, Ioannis; Sirbu, Cristian; Ollendick, Thomas H.; Yeh, Shih-Ching
A Virtual Reality Scenario for All Seasons: The Virtual Classroom Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{rizzo_virtual_2005,
title = {A Virtual Reality Scenario for All Seasons: The Virtual Classroom},
author = {Albert Rizzo and Dean Klimchuck and Roman Mitura and Todd Bowerly and John Galen Buckwalter and Kim Kerns and Karherine Randall and Rebecca Adams and Paul Finn and Ioannis Tarnanas and Cristian Sirbu and Thomas H. Ollendick and Shih-Ching Yeh},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Scenario%20for%20All%20Seasons-%20The%20Virtual%20Classroom%20(HCI).pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {Rather than relying on costly physical mock-ups of functional assessment and rehabilitation environments, VR offers the option to produce and distribute identical "standard" environments. Within such digital assessment and rehabilitation scenarios, normative data can be accumulated for performance comparisons needed for assessment, diagnosis and for training purposes. As well, in this manner, reusable archetypic virtual environments constructed for one purpose, could also be applied for clinical applications addressing other purposes. This has now been done with the Virtual Classroom scenario. While originally developed as a controlled stimulus environment in which attention processes could be systematically assessed in children while in the presence of varying levels of distraction, the system is now finding use for other clinical targets. Such applications that are being developed and tested using the Virtual Classroom for other purposes include: 1. Expansion of the range of attention assessment tests (i.e., a _Stroop` Interference testing system for all ages). 2. A wide field of view system to study eye tracking under distracting conditions with ADHD children using an Elumens VisionStation®. 3. Development of the Virtual Classroom as a tool for anxiety assessment and graduated exposure therapy for children with Social Anxiety Disorder. 4. An extension to the class to include a maze of halls leading out of the school for an earthquake safety training application with persons with developmental and learning disabilities.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Solomon, Steve; Gomboc, Dave; Carpenter, Paul
Toward Question Answering for Simulations Proceedings Article
In: International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions, Edinburgh, Scotland, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{core_toward_2005,
title = {Toward Question Answering for Simulations},
author = {Mark Core and H. Chad Lane and Michael Lent and Steve Solomon and Dave Gomboc and Paul Carpenter},
url = {http://ict.usc.edu/pubs/Toward%20Question%20Answering%20for%20Simulations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions},
address = {Edinburgh, Scotland},
abstract = {The new research area of explainable artiï¬cial intelligence (XAI) allows users to question simulated entities whose motivations would otherwise be hidden. Here, we focus on the knowledge representation issues involved in building such systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Causality and Responsibility: Modeling and Evaluation Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA), pp. 191–204, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2005,
title = {Social Causality and Responsibility: Modeling and Evaluation},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Causality%20and%20Responsibility-%20Modeling%20and%20Evaluation.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA)},
pages = {191–204},
address = {Kos, Greece},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent systems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich the believability and the cognitive capabilities of social intelligent agents. In this paper, we present a general computational model of social causality and responsibility, and empirically evaluate and compare the model with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Hill, Randall W.; Gordon, Andrew S.; Kim, Julia
Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_learning_2004,
title = {Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis},
author = {Randall W. Hill and Andrew S. Gordon and Julia Kim},
url = {http://ict.usc.edu/pubs/LEARNING%20THE%20LESSONS%20OF%20LEADERSHIP%20EXPERIENCE-%20TOOLS%20FOR%20INTERACTIVE%20CASE%20METHOD%20ANALYSIS.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {The Army Excellence in Leadership (AXL) project at the University of Southern California's Institute for Creative Technologies is aimed at supporting the acquisition of tacit knowledge of military leadership through the development of compelling filmed narratives of leadership scenarios and interactive training technologies. The approach taken in the AXL project is to leverage the best practices of case-method teaching and use Hollywood storytelling techniques to create fictional case studies (as filmed media) addressing specific leadership issues. In addition to authoring compelling cases for analysis, we have developed software prototypes that instantiate the case-method teaching approach. These systems engage individual trainees in human-computer dialogues that are focused on the leadership issues that have been embedded in the fictional cases.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Korris, James H.
Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{korris_full_2004,
title = {Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox},
author = {James H. Korris},
url = {http://ict.usc.edu/pubs/FULL%20SPECTRUM%20WARRIOR-%20HOW%20THE%20INSTITUTE%20FOR%20CREATIVE%20TECHNOLOGIES%20BUILT%20A%20COGNITIVE%20TRAINING%20TOOL%20FOR%20THE%20XBOX.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Microsoft's popular game console, the Xbox, combined the possibility of compelling training efficiencies with formidable obstacles to development, both in terms of the business model, the limitation of the Windows 2000 computer inside it and the systemDs standard human-machine interface. In its mission to leverage the capabilities of the entertainment industry to develop next-generation simulation tools, the Institute for Creative Technologies turned to this inexpensive, powerful platform for its Squad level cognitive tactical trainer. This paper will describe the pedagogical and technological challenges and unique processes that translated Squad level command doctrine to a commercial game interface and a cost-effective, universally-accessible computational medium.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Towards a Validated Model of the Influence of Emotion on Human Performance Proceedings Article
In: Proceedings of the 24th Army Science Conference, 2004.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2004,
title = {Towards a Validated Model of the Influence of Emotion on Human Performance},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/TOWARDS%20A%20VALIDATED%20MODEL%20OF%20THE%20INFLUENCE%20OF%20EMOTION%20ON%20HUMAN%20PERFORMANCE.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Poullis, Charalambos; Gardner, Andrew; Debevec, Paul
Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{poullis_photogrammetric_2004,
title = {Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation},
author = {Charalambos Poullis and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/PHOTOGRAMMETRIC%20MODELING%20AND%20IMAGE-BASED%20RENDERING%20FOR%20RAPID%20VIRTUAL%20ENVIRONMENT%20CREATION.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {For realistic simulations, architecture is one of the most important elements to model and render photorealistically. Current techniques of converting architectural plans or survey data to CAD models are labor intensive, and methods for rendering such models are generally not photorealistic. In this work, we present a new approach for modeling and rendering existing architectural scenes from a sparse set of still photographs. For modeling, we use photogrammetric modeling techniques to recover a the geometric representation of the architecture. The photogrammetric modeling approach presented in this paper is effective, robust and powerful because it fully exploits structural symmetries and constraints which are characteristic of architectural scenes. For rendering, we use view-dependent texture mapping, a method for compositing multiple images of a scene to create renderings from novel views. Lastly, we present a software package, named Fac¸ade, which uses the techniques described to recover the geometry and appearance of architectural scenes directly from a sparse set of photographs.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Leuski, Anton; Traum, David
First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_first_2004,
title = {First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers},
author = {Sudeep Gandhe and Andrew S. Gordon and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20Toward%20Linking%20Dialogues-%20Mediating%20Between%20Free-text%20Questions%20and%20Pre-recorded%20Video%20Answers.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Pre-recorded video segments can be very compelling for a variety of immersive training purposes, including providing answers to questions in after-action reviews. Answering questions fluently using pre-recorded video poses challenges, however. When humans interact, answers are constructed after questions are posed. When answers are pre-recorded, even if a correct answer exists in a library of video segments, the answer may be phrased in a way that is not coherent with the question. This paper reports on basic research experiments with short "linking dialogues" that mediate between the question and answer to reduce (or eliminate) the incoherence, resulting in more natural human-system interaction. A set of experiments were performed in which links were elicited to bridge between questions from users of an existing training application and selected answers from the system, and then comparisons made with unlinked answers. The results show that a linking dialogue can signiï¬cantly increase the perceived relevance of the system's answers.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Simulation of Small Group Discussions for Middle Level of Detail Crowds Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_simulation_2004,
title = {Simulation of Small Group Discussions for Middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Simulation%20of%20Small%20Group%20Discussions%20for%20Middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {We present an algorithm for animating middle level of detail crowds engaged in conversation. Based on previous work from Padilha and Carletta, this algorithm is used to provide gestures for group characters in an embedded virtual world. The algorithm is implemented and used within the Mission Rehearsal Exercise project at ICT to control Bosnian crowd members.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stumpfel, Jessi; Jones, Andrew; Wenger, Andreas; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Direct HDR Capture of the Sun and Sky Proceedings Article
In: Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa, Stellenbosch, South Africa, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{stumpfel_direct_2004,
title = {Direct HDR Capture of the Sun and Sky},
author = {Jessi Stumpfel and Andrew Jones and Andreas Wenger and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Direct%20HDR%20Capture%20of%20the%20Sun%20and%20Sky.pdf},
year = {2004},
date = {2004-11-01},
booktitle = {Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa},
address = {Stellenbosch, South Africa},
abstract = {We present a technique for capturing the extreme dynamic range of natural illumination environments that include the sun and sky, which has presented a challenge for traditional high dynamic range photography processes. We find that through careful selection of exposure times, aperture, and neutral density filters that this full range can be covered in seven exposures with a standard digital camera. We discuss the particular calibration issues such as lens vignetting, in- frared sensitivity, and spectral transmission of neutral den- sity filters which must be addressed. We present an adap- tive exposure range adjustment technique for minimizing the number of exposures necessary. We demonstrate our results by showing time-lapse renderings of a complex scene illuminated by high-resolution, high dynamic range natural illumination environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy
A Host-Based Real-Time Multichannel Immersive Sound Playback and Processing System Proceedings Article
In: Proceedings of the 117th Audio Engineering Society Convention, San Francisco, CA, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{sadek_host-based_2004,
title = {A Host-Based Real-Time Multichannel Immersive Sound Playback and Processing System},
author = {Ramy Sadek},
url = {http://ict.usc.edu/pubs/A%20Host-Based%20Real-Time%20Multichannel%20Immersive%20Sound%20Playback%20and%20Processing%20System.pdf},
year = {2004},
date = {2004-10-01},
booktitle = {Proceedings of the 117th Audio Engineering Society Convention},
address = {San Francisco, CA},
abstract = {This paper presents ARIA (Application Rendering Immersive Audio). This system provides a means for the research community to easily test and integrate algorithms into a multichannel playback/recording system. ARIA uses a host-based architecture, meaning that programs can be developed and debugged in standard C++ without the need for expensive, specialized DSP programming and testing tools. ARIA allows developers to exploit the speed and low cost of modern CPUs, provides cross-platform portability, and simplifies the modification and sharing of codes. This system is designed for real-time playback and processing, thus closing the gap between research testbed and delivery systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Cohen, Jonathan; Shah, Maurya
Modeling and Editing Flows Using Advected Radial Basis Functions Proceedings Article
In: Proceedings of ACM SIGGRAPH/Eurographics Symposium on Computer Animation, Grenoble, France, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{pighin_modeling_2004,
title = {Modeling and Editing Flows Using Advected Radial Basis Functions},
author = {Frédéric Pighin and Jonathan Cohen and Maurya Shah},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Editing%20Flows%20Using%20Advected%20Radial%20Basis%20Functions.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of ACM SIGGRAPH/Eurographics Symposium on Computer Animation},
address = {Grenoble, France},
abstract = {uid simulations are notoriously difficult to predict and control. As a result, authoring fluid flows often involves a tedious trial and error process. There is to date no convenient way of editing a fluid after it has been simulated. In particular, the Eulerian approach to fluid simulation is not suitable for flow editing since it does not provide a convenient spatio-temporal parameterization of the simulated flows. In this research, we develop a new technique to learn such parameterization. This technique is based on a new representation, the Advected Radial Basis Function. It is a time-varying kernel that models the local properties of the fluid. We describe this representation and demonstrate its use for interactive three-dimensional flow editing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Iuppa, Nicholas; Weltman, Gershon; Gordon, Andrew S.
Bringing Hollywood Storytelling Techniques to Branching Storylines for Training Applications Proceedings Article
In: Proceedings of the Third International Conference for Narrative and Interactive Learning Environments, Edinburgh, Scotland, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{iuppa_bringing_2004,
title = {Bringing Hollywood Storytelling Techniques to Branching Storylines for Training Applications},
author = {Nicholas Iuppa and Gershon Weltman and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Bringing%20Hollywood%20Storytelling%20Techniques%20to%20Branching%20Storylines%20for%20Training%20Applications.PDF},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of the Third International Conference for Narrative and Interactive Learning Environments},
address = {Edinburgh, Scotland},
abstract = {This paper describes the value of capitalizing on Hollywood storytelling techniques in the design of story-based training applications built around branching storylines. After reviewing the design of Outcome-Driven Simulations and the technical aspects of our application prototype, we describe storytelling techniques that greatly improve the level of user engagement in training simulations based on this design. These techniques concern the overall development of the story, the use of a story arc, the critical decisions in a story, notions of pay off and climax, dramatic sequences, character bibles, characters as a Greek chorus, and the significance of consequences and outcomes. Examples of each of these storytelling techniques are given in the context of the ICT Leaders Project, a prototype leadership development application for the US Army.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating the modeling and use of emotion in virtual humans Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004,
title = {Evaluating the modeling and use of emotion in virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20the%20modeling%20and%20use%20of%20emotion%20in%20virtual%20humans.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we begin to evaluate them against the phenomena they purport to model. In this paper, we present one methodology to evaluate an emotion model. The methodology is based on comparing the behavior of the computational model against human behavior, using a standard clinical instrument for assessing human emotion and coping. We use this methodology to evaluate the EMA model of emotion. The model did quite well. And, as expected, the comparison helped identify where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}