Publications
Search
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
TALBOT, Thomas B.; KALISCH, Nicolai; CHRISTOFFERSEN, Kelly; LUCAS, Gale; FORBELL, Eric
Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 407–413, 2016.
@article{talbot_natural_2016,
title = {Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters.},
author = {Thomas B. TALBOT and Nicolai KALISCH and Kelly CHRISTOFFERSEN and Gale LUCAS and Eric FORBELL},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA407&dq=%22through+regular+web+browsers+and+is+capable+of+multiple+types+of%22+%22practice+targeting+diagnostic+interviews.+A+natural+language+interview%22+%22narrative+statement+based+upon+dialog+context.+The+dialog+manager%27s%22+&ots=Ej8L8hxLlb&sig=GMnqEb5n7CB9x1lWE4gfe5_4n8o},
doi = {10.3233/978-1-61499-625-5-407},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {407–413},
abstract = {A virtual standardized patient (VSP) prototype was tested for natural language understanding (NLU) performance. The conversational VSP was evaluated in a controlled 61 subject study over four repetitions of a patient case. The prototype achieved more than 92% appropriate response rate from naïve users on their first attempt and results were stable by their fourth case repetition. This level of performance exceeds prior efforts and is at a level comparable of accuracy as seen in human conversational patient training, with caveats. This level of performance was possible due to the use of a unified medical taxonomy underpinning that allows virtual patient language training to be applied to all cases in our system as opposed to benefiting a single patient case.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Phan, Thai; Krum, David M.; Bolas, Mark
ShodanVR: Immersive Visualization of Text Records from the Shodan Database Proceedings Article
In: Proceedings of the 2016 Workshop on Immersive Analytics (IA), IEEE, Greenville,SC, 2016, ISBN: 978-1-5090-0834-6.
@inproceedings{phan_shodanvr_2016,
title = {ShodanVR: Immersive Visualization of Text Records from the Shodan Database},
author = {Thai Phan and David M. Krum and Mark Bolas},
url = {http://ieeexplore.ieee.org/document/7932379/?part=1},
doi = {10.1109/IMMERSIVE.2016.7932379},
isbn = {978-1-5090-0834-6},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of the 2016 Workshop on Immersive Analytics (IA)},
publisher = {IEEE},
address = {Greenville,SC},
abstract = {ShodanVR is an immersive visualization for querying and displaying text records from the Shodan database of Internet connected devices. Shodan provides port connection data retrieved from servers, routers, and other networked devices [2]. Cybersecurity professionals can glean this data for device populations, software versions, and potential security vulnerabilities [1].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Automated Path Prediction for Redirected Walking Using Navigation Meshes Proceedings Article
In: 2016 IEEE Symposium on 3D User Interfaces (3DUI), pp. 63–66, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_automated_2016,
title = {Automated Path Prediction for Redirected Walking Using Navigation Meshes},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7460032},
doi = {10.1109/3DUI.2016.7460032},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {63–66},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hutton, Courtney; Suma, Evan
A Realistic Walking Model for Enhancing Redirection in Virtual Reality Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 183–184, IEEE, Greenville, SC, 2016.
@inproceedings{hutton_realistic_2016,
title = {A Realistic Walking Model for Enhancing Redirection in Virtual Reality},
author = {Courtney Hutton and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504714},
doi = {10.1109/VR.2016.7504714},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {183–184},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Head Mounted Projection for Enhanced Gaze in Social Interactions Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 209–210, IEEE, Greenville, SC, 2016.
@inproceedings{krum_head_2016,
title = {Head Mounted Projection for Enhanced Gaze in Social Interactions},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504727},
doi = {10.1109/VR.2016.7504727},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {209–210},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Suma, Evan; Nordahl, Rolf; Bolas, Mark; Serafin, Stefania
Estimation of Detection Thresholds for Audiovisual Rotation Gains Proceedings Article
In: IEEE Virtual Reality 2016, pp. ID: A22, IEEE, Greenville, SC, 2016.
@inproceedings{nilsson_estimation_2016,
title = {Estimation of Detection Thresholds for Audiovisual Rotation Gains},
author = {Niels Nilsson and Evan Suma and Rolf Nordahl and Mark Bolas and Stefania Serafin},
url = {http://ieeevr.org/2016/posters/},
year = {2016},
date = {2016-03-01},
booktitle = {IEEE Virtual Reality 2016},
pages = {ID: A22},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of visual stimuli. We describe a within-subjects study (n=31) exploring if participants’ ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Silver, Kenneth
Ethics for a Combined Human-Machine Dialogue Agent Proceedings Article
In: Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium, pp. 184–189, AAAI Press, Stanford, California, 2016.
@inproceedings{artstein_ethics_2016,
title = {Ethics for a Combined Human-Machine Dialogue Agent},
author = {Ron Artstein and Kenneth Silver},
url = {http://www.aaai.org/ocs/index.php/SSS/SSS16/paper/viewFile/12706/11948},
year = {2016},
date = {2016-03-01},
booktitle = {Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium},
pages = {184–189},
publisher = {AAAI Press},
address = {Stanford, California},
abstract = {We discuss philosophical and ethical issues that arise from a dialogue system intended to portray a real person, using recordings of the person together with a machine agent that selects recordings during a synchronous conversation with a user. System output may count as actions of the speaker if the speaker intends to communicate with users and the outputs represent what the speaker would have chosen to say in context; in such cases the system can justifiably be said to be holding a conversation that is offset in time. The autonomous agent may at times misrepresent the speaker’s intentions, and such failures are analogous to good-faith misunderstandings. The user may or may not need to be informed that the speaker is not organically present, depending on the application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments Proceedings Article
In: 2nd Workshop on Everyday Virtual Reality, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_redirected_2016,
title = {The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://www.adalsimeone.me/papers/WEVR2016/WEVR2016_Azmandian.pdf},
year = {2016},
date = {2016-03-01},
booktitle = {2nd Workshop on Everyday Virtual Reality},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Morgens, Soja-Marie; Gordon, Andrew S.; Morency, Louis-Philippe
Recognizing Human Actions in the Motion Trajectories of Shapes Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 271–281, ACM Press, Sonoma, CA, 2016, ISBN: 978-1-4503-4137-0.
@inproceedings{roemmele_recognizing_2016,
title = {Recognizing Human Actions in the Motion Trajectories of Shapes},
author = {Melissa Roemmele and Soja-Marie Morgens and Andrew S. Gordon and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?id=2856793},
doi = {10.1145/2856767.2856793},
isbn = {978-1-4503-4137-0},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {271–281},
publisher = {ACM Press},
address = {Sonoma, CA},
abstract = {People naturally anthropomorphize the movement of nonliving objects, as social psychologists Fritz Heider and Marianne Simmel demonstrated in their influential 1944 research study. When they asked participants to narrate an animated film of two triangles and a circle moving in and around a box, participants described the shapes' movement in terms of human actions. Using a framework for authoring and annotating animations in the style of Heider and Simmel, we established new crowdsourced datasets where the motion trajectories of animated shapes are labeled according to the actions they depict. We applied two machine learning approaches, a spatial-temporal bag-of-words model and a recurrent neural network, to the task of automatically recognizing actions in these datasets. Our best results outperformed a majority baseline and showed similarity to human performance, which encourages further use of these datasets for modeling perception from motion trajectories. Future progress on simulating human-like motion perception will require models that integrate motion information with top-down contextual knowledge.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2007
Jan, Dusan; Herrera, David; Martinovski, Bilyana; Novick, David; Traum, David
A Computational Model of Culture-Specific Conversational Behavior Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_computational_2007,
title = {A Computational Model of Culture-Specific Conversational Behavior},
author = {Dusan Jan and David Herrera and Bilyana Martinovski and David Novick and David Traum},
url = {http://ict.usc.edu/pubs/A%20Computational%20Model%20of%20Culture-Specific%20Conversational%20Behavior.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Paris, France},
abstract = {This paper presents a model for simulating cultural differences in the conversational behavior of virtual agents. The model provides parameters for differences in proxemics, gaze and overlap in turn taking.We present a review of literature on these factors and show results of a study where native speakers of North American English, Mexican Spanish and Arabic were asked to rate the realism of the simulations generated based on different cultural parameters with respect to their culture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yeh, Shih-Ching; Stewart, Jill; McLaughlin, Margaret; Parsons, Thomas D.; Winstein, Carolee J.; Rizzo, Albert
Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training Proceedings Article
In: Lecture Notes in Computer Science, pp. 378–387, 2007, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{yeh_evaluation_2007,
title = {Evaluation Approach for Post-stroke Rehabilitation Via Virtual Reality Aided Motor Training},
author = {Shih-Ching Yeh and Jill Stewart and Margaret McLaughlin and Thomas D. Parsons and Carolee J. Winstein and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20Approach%20for%20Post-stroke%20Rehabilitation%20Via%20Virtual%20Reality%20Aided%20Motor%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Computer Science},
pages = {378–387},
address = {2007},
abstract = {This paper introduces an evaluation approach that was applied to clinical data collected from a virtual reality aided motor training program for post-stroke rehabilitation. The goal of the proposed evaluation approach is to diagnose the patient's current status (performance) and detect change in status over time (progression). Three measures, performance time, movement efficiency, and movement speed, were defined to represent kinematic features of reaching. 3-D performance maps and progression maps were generated based on each kinematic measure to visualize a single patient's behavior. The case study revealed the patient's current status as to direction and range of upper extremity reach ability, composed of pitch, yaw and arm length. Further, progression was found and visualized quantitatively over a series of practice sessions.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Robertson, R. Kevin; Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Robertson, Wendy T.; Wilson, Susan; Hall, Colin D.
Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL) Journal Article
In: Journal of Clinical Neuroscience, vol. 14, pp. 416+423, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_assessing_2007,
title = {Assessing health-related quality of life in NeuroAIDS: some psychometric properties of the Neurological Quality of Life Questionnaire (NeuroQOL)},
author = {R. Kevin Robertson and Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Wendy T. Robertson and Susan Wilson and Colin D. Hall},
url = {http://ict.usc.edu/pubs/Assessing%20health-related%20quality%20of%20life%20in%20NeuroAIDS-%20some%20psychometric%20properties%20of%20the%20Neurological%20Quality%20of%20Life%20Questionnaire%20(NeuroQOL).pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Clinical Neuroscience},
volume = {14},
pages = {416+423},
abstract = {Several studies were undertaken to assess the psychometric properties (reliability and initial convergent and discriminant construct validity) of the Neurological Quality of Life Questionnaire (NeuroQOL). The NeuroQOL contains 114 items answered in self report Likert format, with higher scores reflecting better quality of life. Study one compared the questionnaire with existing quality of life measures (Symptom Distress Scale, Sickness Impact Profile) and disease stage, psychological, neuropsychological and neurological measures, and a significant correlation was also fount with each domain. The internal consistency reliability (alpha = 0.96), split half reliability (r12 = 0.97), and test-retest reliability (coefficients were 0.78 for 6 months and 0.67 for one year intervals between test and retest) were all found to high and adequately stable. Overall, these results indicate acceptable reliability and initial construct valididty for the NeuroQOL.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Macedonio, Mary F.; Parsons, Thomas D.; Digiuseppe, Raymond A.; Wiederhold, Brenda K.; Rizzo, Albert
Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality Journal Article
In: CyberPsychology and Behavior, vol. 10, no. 4, pp. 508–515, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{macedonio_immersiveness_2007,
title = {Immersiveness and Physiological Arousal within Panoramic Video-Based Virtual Reality},
author = {Mary F. Macedonio and Thomas D. Parsons and Raymond A. Digiuseppe and Brenda K. Wiederhold and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Immersiveness%20and%20Physiological%20Arousal%20within%20Panoramic%20Video-Based%20Virtual%20Reality.pdf},
year = {2007},
date = {2007-01-01},
journal = {CyberPsychology and Behavior},
volume = {10},
number = {4},
pages = {508–515},
abstract = {In this paper, we discuss findings from a study that used panoramic video-based virtual environments (PVVEs) to induce self-reported anger. The study assessed "immersiveness" and physiological correlates of anger arousal (i.e., heart rate, blood pressure, galvanic skin response [GSR], respiration, and skin temperature). Results indicate that over time, panoramic video-based virtual scenarios can be, at the very least, physiologically arousing. Further, it can be affirmed from the results that hypnotizability, as defined by the applied measures, interacts with group on physiological arousal measures. Hence, physiological arousal appeared to be moderated by participant hypnotizability and absorption levels.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
2006
Busso, Carlos; Narayanan, Shrikanth
Interplay between linguistic and affective goals in facial expression during emotional utterances Proceedings Article
In: Proceedings of the 7th International Seminar on Speech Production, pp. 549–556, Ubatuba, Brazil, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{busso_interplay_2006,
title = {Interplay between linguistic and affective goals in facial expression during emotional utterances},
author = {Carlos Busso and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Interplay%20between%20linguistic%20and%20affective%20goals%20in%20facial%20expression%20during%20emotional%20utterances.pdf},
year = {2006},
date = {2006-12-01},
booktitle = {Proceedings of the 7th International Seminar on Speech Production},
pages = {549–556},
address = {Ubatuba, Brazil},
abstract = {Communicative goals are simultaneously expressed through gestures and speech to convey messages enriched with valuable verbal and non-verbal clues. This paper analyzes and quantifies how linguistic and affective goals are reflected in facial expressions. Using a database recorded from an actress with markers attached to her face, the facial features during emotional speech were compared with the ones expressed during neutral speech. The results show that the facial activeness is mainly driven by articulatory processes. However, clear spatial-temporal patterns are observed during emotional speech, which indicate that emotional goals enhance and modulate facial expressions. The results also show that the upper face region has more degrees of freedom to convey non-verbal information than the lower face region, which is highly constrained by the underlying articulatory processes. These results are important toward understanding how humans communicate and interact.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Belanich, James; Lane, H. Chad; Core, Mark; Dixon, Melissa; Forbell, Eric; Kim, Julia; Hart, John
Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation Proceedings Article
In: Proceedings of the 25th Army Science Conference, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_pedagogically_2006,
title = {Pedagogically Structured Game-Based Training: Development of the ELECT BiLat Simulation},
author = {Randall W. Hill and James Belanich and H. Chad Lane and Mark Core and Melissa Dixon and Eric Forbell and Julia Kim and John Hart},
url = {http://ict.usc.edu/pubs/PEDAGOGICALLY%20STRUCTURED%20GAME-BASED%20TRAINING-%20DEVELOPMENT%20OF%20THE%20ELECT%20BILAT%20SIMULATION.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
abstract = {ELECT BiLAT is a prototype game-based simulation for Soldiers to practice conducting bilateral engagements in a cultural context. The prototype provides students with the experience of preparing for a meeting including familiarization with the cultural context, gathering intelligence, conducting a meeting and negotiating when possible, and following up on meeting agreements as appropriate. The ELECT BiLAT architecture is based on a commercial game engine that is integrated with research technologies to enable the use of virtual human characters, scenario customization, as well as coaching, feedback and tutoring. Because the prototype application is intended to be a learning environment, pedagogy has been central throughout development. The project followed a five-phase process: (1) analyze the training domain; (2) develop a story board prototype; (3) implement a computer version of the training prototype; (4) refine training objectives and link their conditions and standards to game activities; and (5) develop training support content for students, instructors, and training developers. The goal is an authorable game-based environment that uses the pedagogy of guided discovery for training Soldiers in the conduct of bilateral engagements within a specific cultural context.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Kennedy, Brandon; Patel, Ronakkumar; Traum, David
Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be? Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_asking_2006,
title = {Asking Questions to Limited Domain Virtual Characters: How Good Does Speech Recognition Have to Be?},
author = {Anton Leuski and Brandon Kennedy and Ronakkumar Patel and David Traum},
url = {http://ict.usc.edu/pubs/Asking%20Questions%20to%20Limited%20Domain%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper, we describe the evaluation of a limited domain question-answering characters, particularly as to the effect of non-optimal speech recognition, and the ability to appropriately answer novel questions. Results show that answering ability is robust until speech recognition reaches over 60% Word error rate.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Lavrenko, Victor
Tracking Dragon-Hunters with Language Models Proceedings Article
In: Conference on Information and Knowledge Management, Arlington, VA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{leuski_tracking_2006,
title = {Tracking Dragon-Hunters with Language Models},
author = {Anton Leuski and Victor Lavrenko},
url = {http://ict.usc.edu/pubs/Tracking%20Dragon-Hunters%20with%20Language%20Models.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Conference on Information and Knowledge Management},
address = {Arlington, VA},
abstract = {We are interested in the problem of understanding the connections between human activities and the content of textual information generated in regard to those activities. Massive online collaborative environments, specifically online virtual worlds, where people meet, exchange messages, and perform actions can be a rich source for such an analysis. In this paper we study one of such virtual worlds and the activities of its inhabitants. We explore the existing dependencies between the activities and the content of the chat messages the world's inhabitants exchange with each other. We outline three experimental tasks and show how language modeling and text clustering techniques allow us to explore those dependencies successfully.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Kim, Julia; Zbylut, MIchelle L.; Gordon, Andrew S.; Traum, David; Gandhe, Sudeep; King, Stewart; Lavis, Salvo; Rocher, Scott
AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{hill_axlnet_2006,
title = {AXL.Net: Web-enabled Case Method Instruction for Accelerating Tacit Knowledge Acquisition in Leaders},
author = {Randall W. Hill and Julia Kim and MIchelle L. Zbylut and Andrew S. Gordon and David Traum and Sudeep Gandhe and Stewart King and Salvo Lavis and Scott Rocher},
url = {http://ict.usc.edu/pubs/AXLNet-%20Web-enabled%20Case%20Method%20Instruction%20for%20Accelerating%20Tacit%20Knowledge%20Acquisition%20in%20Leaders.PDF},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {AXL.Net is a prototype web-based immersive technology solution that supports case method teaching for U.S. Army leader development. The AXL.Net system addresses three challenges: (1) designing a pedagogicallysound research prototype for leader development, (2) integrating research technologies with the best of Web 2.0 innovations to enhance case method teaching, and (3) providing an easy to use system. Initial evaluations show that the prototype application and framework is effective for leader development.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Bolas, Mark; McDowall, Ian
Concave Surround Optics for Rapid Multi-View Imaging Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{debevec_concave_2006,
title = {Concave Surround Optics for Rapid Multi-View Imaging},
author = {Paul Debevec and Mark Bolas and Ian McDowall},
url = {http://ict.usc.edu/pubs/ConcaveSurroundOptics_ASC2006.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Many image-based modeling and rendering techniques involve photographing a scene from an array of different viewpoints. Usually, this is achieved by moving the camera or the subject to successive positions, or by photographing the scene with an array of cameras. In this work, we present a system of mirrors to simulate the appearance of camera movement around a scene while the physical camera remains stationary. The system thus is amenable to capturing dynamic events avoiding the need to construct and calibrate an array of cameras. We demonstrate the system with a high speed video of a dynamic scene. We show smooth camera motion rotating 360 degrees around the scene. We discuss the optical performance of our system and compare with alternate setups.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Traum, David; Lane, H. Chad; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan; Lent, Michael
Teaching Negotiation Skills through Practice and Reflection with Virtual Humans Journal Article
In: Simulation: Transactions of the Society for Modeling and Simulation, vol. 82, no. 11, pp. 685–701, 2006.
Abstract | Links | BibTeX | Tags: Learning Sciences, Social Simulation, Virtual Humans
@article{core_teaching_2006,
title = {Teaching Negotiation Skills through Practice and Reflection with Virtual Humans},
author = {Mark Core and David Traum and H. Chad Lane and William Swartout and Stacy C. Marsella and Jonathan Gratch and Michael Lent},
url = {http://ict.usc.edu/pubs/Teaching%20Negotiation%20Skills.pdf},
year = {2006},
date = {2006-11-01},
journal = {Simulation: Transactions of the Society for Modeling and Simulation},
volume = {82},
number = {11},
pages = {685–701},
abstract = {Although the representation of physical environments and behaviors will continue to play an important role in simulation-based training, an emerging challenge is the representation of virtual humans with rich mental models (e.g., including emotions, trust) that interact through conversational as well as physical behaviors. The motivation for such simulations is training soft skills such as leadership, cultural awareness, and negotiation, where the majority of actions are conversational, and the problem solving involves consideration of the emotions, attitudes, and desires of others.The educational power of such simulations can be enhanced by the integration of an intelligent tutoring system to support learners� understanding of the effect of their actions on virtual humans and how they might improve their performance. In this paper, we discuss our efforts to build such virtual humans, along with an accompanying intelligent tutor, for the domain of negotiation and cultural awareness.},
keywords = {Learning Sciences, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Duncan, Susan
Virtual Humans for the Study of Rapport in Cross Cultural Settings Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_virtual_2006,
title = {Virtual Humans for the Study of Rapport in Cross Cultural Settings},
author = {Jonathan Gratch and Anna Okhmatovskaia and Susan Duncan},
url = {http://ict.usc.edu/pubs/VIRTUAL%20HUMANS%20FOR%20THE%20STUDY%20OF%20RAPPORT%20IN%20CROSS%20CULTURAL%20SETTINGS.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {As an increasing part of the Army's mission involves establishing rapport with diverse populations, training interpersonal skills becomes critically important. Here we describe a "Rapport Agent" that senses and responds to a speakerQs nonverbal behavior and provide empirical evidence that it increases speaker fluency and engagement. We argue such agent technology has potential, both as a training system to enhance communication skills, and to assess the key factors that influence rapport in face-to-face interactions. We conclude by discussing ways the nonverbal correlates of rapport vary between Arabic and English speakers and discuss the potential of such technology to advance research and training into rapport in cross-cultural settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Pair, Jarrell
Projector-Camera Systems for Immersive Training Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{treskunov_projector-camera_2006,
title = {Projector-Camera Systems for Immersive Training},
author = {Anton Treskunov and Jarrell Pair},
url = {http://ict.usc.edu/pubs/PROJECTOR-CAMERA%20SYSTEMS%20FOR%20IMMERSIVE%20TRAINING.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Real time computer graphics are limited in that they can only be displayed on projection screens and monitors. Monitors and projection screens cannot be used in live fire training or scenarios in which the displays could be physically damaged by trainees. To address this issue, we have developed projection systems using computer vision based color correction and image processing to project onto non-ideal surfaces such as painted walls, cinder blocks, and concrete floors. These projector-camera systems effectively paint the real world with digital light. Any surface can become an interactive projection screen allowing unprepared spaces to be transformed into an immersive environment. Virtual bullet holes, charring, and cracks can be added to real doors, walls, tables, chairs, cabinets, and windows. Distortion correction algorithms allow positioning of projection devices out of the field of view of trainees and their weapons. This paper describes our motivation and approach for implementing projector-camera systems for use within the FlatWorld wide area mixed reality system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Parsons, Thomas D.; Liewer, Matt; Graap, Ken; Difede, JoAnn; Rothbaum, Barbara O.; Reger, Greg; Roy, Michael
A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the 25th Army Science Conference, Orlando, FL, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006-1,
title = {A Virtual Reality Therapy Application for OEF/OIF Combat-related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Jarrell Pair and Thomas D. Parsons and Matt Liewer and Ken Graap and JoAnn Difede and Barbara O. Rothbaum and Greg Reger and Michael Roy},
url = {http://ict.usc.edu/pubs/A%20VIRTUAL%20REALITY%20THERAPY%20APPLICATION%20FOR%20OEF%20OIF%20COMBAT-RELATED%20POST%20TRAUMATIC%20STRESS%20DISORDER.pdf},
year = {2006},
date = {2006-11-01},
booktitle = {Proceedings of the 25th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Fourth Frame Forums: Interactive Comics for Collaborative Learning Proceedings Article
In: Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006), Santa Barbara, CA, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_fourth_2006,
title = {Fourth Frame Forums: Interactive Comics for Collaborative Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Fourth%20Frame%20Forums-%20Interactive%20Comics%20for%20Collaborative%20Learning.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of the 14th Annual ACM International Conference on Multimedia (MM 2006)},
address = {Santa Barbara, CA},
abstract = {In this paper, we describe Fourth Frame Forums, an application that combines traditional four-frame comic strips with online web-based discussion forums. In this application, users are presented with a four-frame comic strip where the last dialogue balloon of the fourth frame is left blank. By typing a statement into this dialogue balloon, the user creates a new discussion thread in the forum, where the user?s dialogue choice can be critiqued and discussed by other users of the forum. We argue that Fourth Frame Forums provide an elegant and cost-effective solution for online education and training environments for communities of learners. We provide examples from the domain of US Army leadership development, and compare Fourth Frame Forums to alternative methods of story-directed simulation and training.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Integrating logical inference into statistical text classification applications Proceedings Article
In: Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications, Washington D.C., 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_integrating_2006,
title = {Integrating logical inference into statistical text classification applications},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Integrating%20Logical%20Inference%20Into%20Statistical%20Text%20Classification%20Applications.pdf},
year = {2006},
date = {2006-10-01},
booktitle = {Proceedings of AAAI Fall Symposium on Integrating Logical Reasoning into Everyday Applications},
address = {Washington D.C.},
abstract = {Contemporary statistical text classification is becoming increasingly common across a wide range of everyday applications. Typically, the bottlenecks in performance are the availability and consistency of large amounts of training data. We argue that these techniques could be improved by seamlessly integrating logical inference into the text encoding pipeline, making it possible to utilize large-scale commonsense and special-purpose knowledge bases to aid in the interpretation and encoding of documents.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Language evidence for changes in a Theory of Mind Book Section
In: Arbib, Michael A. (Ed.): Action to Language via the Mirror Neuron System, University of Cambridge Press, Cambridge, UK, 2006, ISBN: 978-0-521-84755-1.
Abstract | Links | BibTeX | Tags: The Narrative Group
@incollection{gordon_language_2006,
title = {Language evidence for changes in a Theory of Mind},
author = {Andrew S. Gordon},
editor = {Michael A. Arbib},
url = {http://www.cambridge.org/gb/knowledge/isbn/item1172518/?site_locale=en_GB},
isbn = {978-0-521-84755-1},
year = {2006},
date = {2006-09-01},
booktitle = {Action to Language via the Mirror Neuron System},
publisher = {University of Cambridge Press},
address = {Cambridge, UK},
abstract = {Mirror neurons may hold the brain's key to social interaction - each coding not only a particular action or emotion but also the recognition of that action or emotion in others. The Mirror System Hypothesis adds an evolutionary arrow to the story - from the mirror system for hand actions, shared with monkeys and chimpanzees, to the uniquely human mirror system for language. In this accessible 2006 volume, experts from child development, computer science, linguistics, neuroscience, primatology and robotics present and analyse the mirror system and show how studies of action and language can illuminate each other. Topics discussed in the fifteen chapters include: what do chimpanzees and humans have in common? Does the human capability for language rest on brain mechanisms shared with other animals? How do human infants acquire language? What can be learned from imaging the human brain? How are sign- and spoken-language related? Will robots learn to act and speak like humans?},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {incollection}
}
Tepperman, Joseph; Traum, David; Narayanan, Shrikanth
"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{tepperman_yeah_2006,
title = {"Yeah Right": Sarcasm Recognition for Spoken Dialogue Systems},
author = {Joseph Tepperman and David Traum and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Yeah%20Right-%20Sarcasm%20Recognition%20for%20Spoken%20Dialogue%20Systems.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {The robust understanding of sarcasm in a spoken dialogue system requires a reformulation of the dialogue manager's basic assumptions behind, for example, user behavior and grounding strategies. But automatically detecting a sarcastic tone of voice is not a simple matter. This paper presents some experiments toward sarcasm recognition using prosodic, spectral, and contextual cues. Our results demonstrate that spectral and contextual features can be used to detect sarcasm as well as a human annotator would, and confirm a long-held claim in the field of psychology — that prosody alone is not sufficient to discern whether a speaker is being sarcastic.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Simulating Spatially Varying Lighting on a Live Performance Proceedings Article
In: 3rd European Conference on Visual Media Production (CVMP 2006), London, UK, 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_simulating_2006,
title = {Simulating Spatially Varying Lighting on a Live Performance},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Simulating%20Spatially%20Varying%20Lighting%20on%20a%20Live%20Performance.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {3rd European Conference on Visual Media Production (CVMP 2006)},
address = {London, UK},
abstract = {We present an image-based technique for relighting dynamic human performances under spatially varying illumination. Our system generates a time-multiplexed LED basis and a geometric model recovered from high-speed structured light patterns. The geometric model is used to scale the intensity of each pixel differently according to its 3D position within the spatially varying illumination volume. This yields a first-order approximation of the correct appearance under the spatially varying illumination. A global illumination process removes indirect illumination from the original lighting basis and simulates spatially varying indirect illumination. We demonstrate this technique for a human performance under several spatially varying lighting environments.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Leuski, Anton; Rangarajan, Vivek; Robinson, Susan; Vaswani, Ashish; Narayanan, Shrikanth; Traum, David
Radiobot-CFF: A Spoken Dialogue System for Military Training Proceedings Article
In: Interspeech 2006, Pittsburgh, PA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_radiobot-cff_2006,
title = {Radiobot-CFF: A Spoken Dialogue System for Military Training},
author = {Antonio Roque and Anton Leuski and Vivek Rangarajan and Susan Robinson and Ashish Vaswani and Shrikanth Narayanan and David Traum},
url = {http://ict.usc.edu/pubs/Radiobot-CFF-%20A%20Spoken%20Dialogue%20System%20for%20Military%20Training.pdf},
year = {2006},
date = {2006-09-01},
booktitle = {Interspeech 2006},
address = {Pittsburgh, PA},
abstract = {We describe a spoken dialogue system which can engage in Call For Fire (CFF) radio dialogues to help train soldiers in proper procedures for requesting artillery fire missions. We describethe domain, an information-state dialogue manager with a novel system of interactive information components, and provide evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Carnicke, Sharon Marie; Gratch, Jonathan; Okhmatovskaia, Anna; Rizzo, Albert
An Exploration of Delsartes Structural Acting System Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA), pp. 80–92, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{marsella_exploration_2006,
title = {An Exploration of Delsartes Structural Acting System},
author = {Stacy C. Marsella and Sharon Marie Carnicke and Jonathan Gratch and Anna Okhmatovskaia and Albert Rizzo},
url = {http://ict.usc.edu/pubs/An%20Exploration%20of%20Delsarte%E2%80%99s%20Structural%20Acting%20System.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents (IVA)},
pages = {80–92},
address = {Marina del Rey, CA},
abstract = {The designers of virtual agents often draw on a large research literature in psychology, linguistics and human ethology to design embodied agents that can interact with people. In this paper, we consider a structural acting system developed by Francois Delsarte as a possible resource in designing the nonverbal behavior of embodied agents. Using human subjects,we evaluate one component of the system, Delsarte's Cube, that addresses the meaning of differing attitudes of the hand in gestures.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Virtual Cinematography: Relighting through Computation Journal Article
In: IEEE ComputerMagazine, vol. 39, pp. 57–65, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{debevec_virtual_2006,
title = {Virtual Cinematography: Relighting through Computation},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Virtual%20Cinematography-%20Relighting%20through%20Computation.pdf},
year = {2006},
date = {2006-08-01},
journal = {IEEE ComputerMagazine},
volume = {39},
pages = {57–65},
abstract = {Recording how scenes transform incident illumination into radiant light is an active topic in computational photography. Such techniques make it possible to create virtual images of a person or place from new viewpoints and in any form of illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Lee, Jina; Marsella, Stacy C.
Nonverbal Behavior Generator for Embodied Conversational Agents Proceedings Article
In: 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{lee_nonverbal_2006,
title = {Nonverbal Behavior Generator for Embodied Conversational Agents},
author = {Jina Lee and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Nonverbal%20Behavior%20Generator%20for%20Embodied%20Conversational%20Agents.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {Believable nonverbal behaviors for embodied conversational agents (ECA) can create a more immersive experience for users and improve the effectiveness of communication. This paper describes a nonverbal behavior generator that analyzes the syntactic and semantic structure of the surface text as well as the affective state of the ECA and annotates the surface text with appropriate nonverbal behaviors. A number of video clips of people conversing were analyzed to extract the nonverbal behavior generation rules. The system works in real-time and is user-extensible so that users can easily modify or extend the current behavior generation rules.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Gluck, Kevin A.; Gunzelmann, Glenn; Gratch, Jonathan; Hudlicka, Eva; Ritter, Frank E.
Modeling the Impact of Cognitive Moderators on Human Cognition and Performance Proceedings Article
In: Proceedings of the 2006 Conference of the Cognitive Society, pp. 2658, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gluck_modeling_2006,
title = {Modeling the Impact of Cognitive Moderators on Human Cognition and Performance},
author = {Kevin A. Gluck and Glenn Gunzelmann and Jonathan Gratch and Eva Hudlicka and Frank E. Ritter},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Impact%20of%20Cognitive%20Moderators%20on%20Human%20Cognition%20and%20Performance.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 2006 Conference of the Cognitive Society},
pages = {2658},
address = {Vancouver, CA},
abstract = {Cognitive moderators, such as emotions, personality, stress, and fatigue, represent an emerging area of research within the cognitive science community and are increasingly acknowledged as important and ubiquitous influences on cognitive processes. This symposium brings together scientists engaged in research to develop models that help us better understand the mechanisms through which these factors impact human cognition and performance. There are two unifying themes across the presentations. One theme is a commitment to developing computational models useful for simulating the processes that produce the effects and phenomena of interest. The second theme is a commitment to assessing the validity of the models by comparing their performance against empirical human data.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Okhmatovskaia, Anna; Lamothe, Francois; Marsella, Stacy C.; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Virtual Rapport Proceedings Article
In: Lecture Notes in Computer Science, pp. 14–27, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_virtual_2006-1,
title = {Virtual Rapport},
author = {Jonathan Gratch and Anna Okhmatovskaia and Francois Lamothe and Stacy C. Marsella and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Virtual%20Rapport.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Lecture Notes in Computer Science},
volume = {4311},
pages = {14–27},
address = {Marina del Rey, CA},
abstract = {Effective face-to-face conversations are highly interactive. Participants respond to each other, engaging in nonconscious behavioral mimicry and backchanneling feedback. Such behaviors produce a subjective sense of rapport and are correlated with effective communication, greater liking and trust, and greater influence between participants. Creating rapport requires a tight sense-act loop that has been traditionally lacking in embodied conversational agents. Here we describe a system, based on psycholinguistic theory, designed to create a sense of rapport between a human speaker and virtual human listener. We provide empirical evidence that it increases speaker fluency and engagement.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kopp, Stefan; Krenn, Brigitte; Marsella, Stacy C.; Marshall, Andrew; Pelachaud, Catherine; Pirker, Hannes; Thórisson, Kristinn R.; Vilhjálmsson, Hannes
Towards a Common Framework for Multimodal Generation: The Behavior Markup Language Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{kopp_towards_2006,
title = {Towards a Common Framework for Multimodal Generation: The Behavior Markup Language},
author = {Stefan Kopp and Brigitte Krenn and Stacy C. Marsella and Andrew Marshall and Catherine Pelachaud and Hannes Pirker and Kristinn R. Thórisson and Hannes Vilhjálmsson},
url = {http://ict.usc.edu/pubs/Towards%20a%20Common%20Framework%20for%20Multimodal%20Generation-%20The%20Behavior%20Markup%20Language.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference},
address = {Marina del Rey, CA},
abstract = {This paper describes an international effort to unify a multimodal behavior generation framework for Embodied Conversational Agents (ECAs). We propose a three stage model we call SAIBA where the stages represent intent planning, behavior planning and behavior realization. A Function Markup Language (FML), describing intent without referring to physical behavior, mediates between the first two stages and a Behavior Markup Language (BML)describing desired physical realization, mediates between the last two stages. In this paper we will focus on BML. The hope is that this abstraction and modularization will help ECA researchers pool their resources to build more sophisticated virtual humans.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Ronakkumar; Leuski, Anton; Traum, David
Dealing with Out of Domain Questions in Virtual Characters Proceedings Article
In: Proceedings of the 6th International Conference on Intelligent Virtual Agents, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_dealing_2006,
title = {Dealing with Out of Domain Questions in Virtual Characters},
author = {Ronakkumar Patel and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Out%20of%20Domain%20Questions%20in%20Virtual%20Characters.pdf},
year = {2006},
date = {2006-08-01},
booktitle = {Proceedings of the 6th International Conference on Intelligent Virtual Agents},
address = {Marina del Rey, CA},
abstract = {We consider the problem of designing virtual characters that support speech-based interactions in a limited domain. Previously we have shown that classification can be an effective and robust tool for selecting appropriate in-domain responses. In this paper, we consider the problem of dealing with out-of-domain user questions. We introduce a taxonomy of out-of-domain response types. We consider three classification architectures for selecting the most appropriate out-of-domain responses. We evaluate these architectures and show that they significantly improve the quality of the response selection making the user?s interaction with the virtual character more natural and engaging.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
An Information State-Based Dialogue Manager for Call for Fire Dialogues Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_information_2006,
title = {An Information State-Based Dialogue Manager for Call for Fire Dialogues},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/An%20Information%20State-Based%20Dialogue%20Manager%20for%20Call%20for%20Fire%20Dialogues.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {We present a dialogue manager for "Call for Fire" training dialogues. We describe the training environment, the domain, the features of its novel information state-based dialogue manager, the system it is a part of, and preliminary evaluation results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles Proceedings Article
In: Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_comparison_2006,
title = {A Comparison of Alternative Parse Tree Paths for Labeling Semantic Roles},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/A%20Comparison%20of%20Alternative%20Parse%20Tree%20Paths%20for%20Labeling%20Semantic%20Roles.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the Joint Conference of the International Committee on Computational Linguistics and the Association for Computational Linguistics (COLING/ACL)},
address = {Sydney, Australia},
abstract = {The integration of sophisticated inference-based techniques into natural language processing applications first requires a reliable methos of encoding the predicate-argument structure of the propositional context of text. Recent statistical approaches to automated predicate-argument annotaion have utilized parse tree paths as predictive features, which encode the path between a verb predicate and a node in the parse tree that governs its argument. In this paper, we explore a number of alternaitves for how these parse tree paths are encoded, focusing on the difference between automatically generated constituency parses and dependency parses. After describing five alternatives for encoding parse tree paths, we investigate how well each can be aligned with the argument substrings in annotated text corpora, their relative precision and recall performance, and their comparative learning curves. Results indicate that constituency parsers produce parse tree paths that can more easily be aligned to argument substrings, perform better in precision and recall, and have more favorable learning curves than those produced by a dependency parser.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Patel, Ronakkumar; Traum, David; Kennedy, Brandon
Building Effective Question Answering Characters Proceedings Article
In: 7th SIGdial Workshop on Discourse and Dialogue, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_building_2006,
title = {Building Effective Question Answering Characters},
author = {Anton Leuski and Ronakkumar Patel and David Traum and Brandon Kennedy},
url = {http://ict.usc.edu/pubs/Building%20Effective%20Question%20Answering%20Characters.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {7th SIGdial Workshop on Discourse and Dialogue},
address = {Sydney, Australia},
abstract = {In this paper, we describe methods for building and evaluation of limited domain question-answering characters. Several classification techniques are tested, including text classification using support vector machines, language-model based retrieval, and cross-language information retrieval techniques, with the latter having the highest success rate. We also evaluated the effect of speech recognition errors on performance with users, finding that retrieval is robust until recognition reaches over 50% WER.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana
Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind Proceedings Article
In: Proceedings of the 28th Annual Conference of the Cognitive Society, Vancouver, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{martinovski_cognitive_2006,
title = {Cognitive and Emotive Empathy in Discourse: Towards an Integrated Theory of Mind},
author = {Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Cognitive%20and%20Emotive%20Empathy%20in%20Discourse-%20Towards%20an%20Integrated%20Theory%20of%20Mind.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 28th Annual Conference of the Cognitive Society},
address = {Vancouver, CA},
abstract = {This paper presents an empirical qualitative analysis of eliciting, giving and receiving empathy in discourse. The study identifies discursive and linguistic features, which realize cognitive, emotive, parallel and reactive empathy and suggests that imitation, simulation and representation could be non-exclusive processes in Theory of Mind reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Gomboc, Dave; Solomon, Steve; Rosenberg, Milton
Building Explainable Artificial Intelligence Systems Proceedings Article
In: Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{core_building_2006,
title = {Building Explainable Artificial Intelligence Systems},
author = {Mark Core and H. Chad Lane and Michael Lent and Dave Gomboc and Steve Solomon and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Building%20Explainable%20Artificial%20Intelligence%20Systems.pdf},
year = {2006},
date = {2006-07-01},
booktitle = {Proceedings of the 18th Innovative Applications of Artificial Intelligence Conference},
address = {Boston, MA},
abstract = {As artiï¬cial intelligence (AI) systems and behavior models in military simulations become increasingly complex, it has been difï¬cult for users to understand the activities of computer-controlled entities. Prototype explanation systems have been added to simulators, but designers have not heeded the lessons learned from work in explaining expert system behavior. These new explanation systems are not modular and not portable; they are tied to a particular AI system. In this paper, we present a modular and generic architecture for explaining the behavior of simulated entities. We describe its application to the Virtual Humans, a simulation designed to teach soft skills such as negotiation and cultural awareness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Young, R. Michael
From Linear Story Generation to Branching Story Graphs Journal Article
In: IEEE Computer Graphics and Applications, vol. 26, no. 3, pp. 23–31, 2006.
Abstract | Links | BibTeX | Tags:
@article{riedl_linear_2006,
title = {From Linear Story Generation to Branching Story Graphs},
author = {Mark O. Riedl and R. Michael Young},
url = {http://ict.usc.edu/pubs/From%20Linear%20Story%20Generation%20to%20Branching%20Story%20Graphs.pdf},
year = {2006},
date = {2006-06-01},
journal = {IEEE Computer Graphics and Applications},
volume = {26},
number = {3},
pages = {23–31},
abstract = {Interactive narrative systems are storytelling systems in which the user can influence the content or ordering of story world events. Conceptually, an interactive narrative can be represented as a branching graph of narrative elements, implying points at which an interactive user?s decisions influence the content or ordering of the remaining elements. Generative approaches to interactive narrative construct narrative at runtime or pre-construct on a per-session basis highly interactive branching narrative structures. One generative approach ? narrative mediation ? represents story as a linear progression of events with anticipated user actions and system-controlled agent actions together in a partially-ordered plan. For every possible way the user can violate the story plan, an alternative story plan is generated. If narrative mediation is powerful enough to express the same interactive stories as systems that use branching narrative structures, then linear narrative generation techniques can be applied to interactive narrative generation. This paper lays out this argument and sketches a proof that narrative mediation is at least as powerful as acyclic branching story structures.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Lent, Michael; Clevenger, William; Tien, Wen C.
Using Environmental Annotations & Affordances to Model Culture Proceedings Article
In: Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_using_2006,
title = {Using Environmental Annotations & Affordances to Model Culture},
author = {Ryan McAlinden and Michael Lent and William Clevenger and Wen C. Tien},
url = {http://ict.usc.edu/pubs/Using%20Environmental%20Annotations%20&%20Affordances%20to%20Model%20Culture.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Artificial Intelligence and Interactive Digital Entertainment Conference Demonstrations},
address = {Marina del Rey, CA},
abstract = {This paper details the demonstration of an annotation and affordance-based software model intended to introduce cultural and social influences into a non-player character's (NPC) decision-making process. We describe how recent research has supported the need to begin incorporating the effects of culture into the interactive digital domain. The technical approach is presented that describes the software techniques for embedding and utilizing culturally-specific information inside of a virtual environment, as well as the design and implementation of a deterministic Markov Decision Process (MDP) to model the affects of culture on the AI.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Callieri, Marco; Debevec, Paul; Scopigno, Roberto
A realtime immersive application with realistic lighting: The Parthenon Journal Article
In: Computers & Graphics, vol. 30, no. 3, pp. 368–376, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@article{callieri_realtime_2006,
title = {A realtime immersive application with realistic lighting: The Parthenon},
author = {Marco Callieri and Paul Debevec and Roberto Scopigno},
url = {http://ict.usc.edu/pubs/A%20realtime%20immersive%20application%20with%20realistic%20lighting-%20The%20Parthenon.pdf},
year = {2006},
date = {2006-06-01},
journal = {Computers & Graphics},
volume = {30},
number = {3},
pages = {368–376},
abstract = {Offline rendering techniques have nowadays reached an astonishing level of realism but pay the cost of long computational times. The new generation of programmable graphic hardware, on the other hand, gives the possibility to implement in realtime some of the visual effects previously available only for cinematographic production. We describe the design and implementation of an interactive system which is able to reproduce in realtime one of the crucial sequences from the short movie “The Parthenon” presented at Siggraph 2004. The application is designed to run on a specific immersive reality system, making possible for a user to perceive the virtual environment with nearly cinematographic visual quality.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rogers, Steven A.; Braaten, Alyssa J.; Woods, Steven Paul; Tröster, Alexander I.
Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis Journal Article
In: Lancet Neurology, vol. 5, pp. 578–588, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_cognitive_2006,
title = {Cognitive sequelae of subthalamic nucleus deep brain stimulation in Parkinson's disease: a meta-analysis},
author = {Thomas D. Parsons and Steven A. Rogers and Alyssa J. Braaten and Steven Paul Woods and Alexander I. Tröster},
url = {http://ict.usc.edu/pubs/Cognitive%20sequelae%20of%20subthalamic%20nucleus%20deep%20brain%20stimulation%20in%20Parkinson%E2%80%99s%20disease-%20a%20meta-analysis.pdf},
year = {2006},
date = {2006-06-01},
journal = {Lancet Neurology},
volume = {5},
pages = {578–588},
abstract = {Summary: Background Deep brain stimulation of the subthalamic nucleus (STN DBS) is an increasingly common treatment for Parkinson's disease. Qualitative reviews have concluded that diminished verbal fluency is common after STN DBS, but that changes in global cognitive abilities, attention, executive functions, and memory are only inconsistently observed and, when present, often nominal or transient. We did a quantitative meta-analysis to improve understanding of the variability and clinical signiï¬cance of cognitive dysfunction after STN DBS. Methods: We searched MedLine, PsycLIT, and ISI Web of Science electronic databases for articles published between 1990 and 2006, and extracted information about number of patients, exclusion criteria, conï¬rmation of target by microelectrode recording, veriï¬cation of electrode placement via radiographic means, stimulation parameters, assessment time points, assessment measures, whether patients were on levodopa or dopaminomimetics, and summary statistics needed for computation of effect sizes. We used the random-effects meta-analytical model to assess continuous outcomes before and after STN DBS. Findings: Of 40 neuropsychological studies identiï¬ed, 28 cohort studies (including 612 patients) were eligible for inclusion in the meta-analysis. After adjusting for heterogeneity of variance in study effect sizes, the random effects meta-analysis revealed signiï¬cant, albeit small, declines in executive functions and verbal learning and memory. Moderate declines were only reported in semantic (Cohen's d 0·73) and phonemic verbal fluency (0·51). Changes in verbal fluency were not related to patient age, disease duration, stimulation parameters, or change in dopaminomimetic dose after surgery. Interpretation: STN DBS, in selected patients, seems relatively safe from a cognitive standpoint. However, diffculty in identiï¬cation of factors underlying changes in verbal fluency draws attention to the need for uniform and detailed reporting of patient selection, demographic, disease, treatment, surgical, stimulation, and clinical outcome parameters.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Dini, Don M.; Lent, Michael; Carpenter, Paul; Iyer, Kumar
Building Robust Planning and Execution Systems for Virtual Worlds Proceedings Article
In: Proceedings of Artificial Intelligence and Interactive Digital Entertainment, Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{dini_building_2006,
title = {Building Robust Planning and Execution Systems for Virtual Worlds},
author = {Don M. Dini and Michael Lent and Paul Carpenter and Kumar Iyer},
url = {http://ict.usc.edu/pubs/Building%20Robust%20Planning%20and%20Execution%20Systems%20for%20Virtual%20Worlds.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Proceedings of Artificial Intelligence and Interactive Digital Entertainment},
address = {Marina del Rey, CA},
abstract = {Planning and execution systems have been used in a wide varietyof systems to create practical and successful automation. Theyhave been used for everything from performing scientific researchon the surface of Mars to controlling enemy characters in video games to performing military air campaign planning. After reviewing past work on these various planning and executionsystems, we believe that most lack one or more key componentscontained in another system. To enable future researchers to build more complete systems, and avoid possible serious system failure, we identify the major technical problems any implementer of such a system would have to face. In addition wecite recent solutions to each of these technical problems. We limit our focus to planning and execution for virtual worlds and theunique problems faced therein.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Riedl, Mark O.; Stern, Andrew; Dini, Don M.
Mixing Story and Simulation in Interactive Narrative Proceedings Article
In: 2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE), Marina del Rey, CA, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_mixing_2006,
title = {Mixing Story and Simulation in Interactive Narrative},
author = {Mark O. Riedl and Andrew Stern and Don M. Dini},
url = {http://ict.usc.edu/pubs/Mixing%20Story%20and%20Simulation%20in%20Interactive%20Narrative.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {2nd Conference on Artificial Intelligence and Interactive Entertainment (AIIDE)},
address = {Marina del Rey, CA},
abstract = {Simulation is a common feature in computer entertainment. However, in computer games simulation and story are often kept distinct by interleaving interactive play and cut scenes. We describe a technique for an interactive narrative system that more closely integrates simulation and storyline. The technique uses a combination of semi-autonomous character agents and high-level story direction. The storyline is decomposed into directives to character agents to achieve particular world states. Otherwise, character agents are allowed to behave autonomously. When the player?s actions create inconsistency between the simulation state and storyline, the storyline is dynamically adapted and repaired to resolve any inconsistencies.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Einarsson, Per; Chabert, Charles-Felix; Jones, Andrew; Ma, Wan-Chun; Lamond, Bruce; Hawkins, Tim; Bolas, Mark; Sylwan, Sebastian; Debevec, Paul
Relighting Human Locomotion with Flowed Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering (2006), 2006.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{einarsson_relighting_2006,
title = {Relighting Human Locomotion with Flowed Reflectance Fields},
author = {Per Einarsson and Charles-Felix Chabert and Andrew Jones and Wan-Chun Ma and Bruce Lamond and Tim Hawkins and Mark Bolas and Sebastian Sylwan and Paul Debevec},
url = {http://ict.usc.edu/pubs/Relighting%20Human%20Locomotion%20with%20Flowed%20Reflectance%20Fields.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Eurographics Symposium on Rendering (2006)},
abstract = {We present an image-based approach for capturing the appearance of a walking or running person so they can be rendered realistically under variable viewpoint and illumination. In our approach, a person walks on a treadmill at a regular rate as a turntable slowly rotates the person's direction. As this happens, the person is filmed with a vertical array of high-speed cameras under a time-multiplexed lighting basis, acquiring a seven-dimensional dataset of the person under variable time, illumination, and viewing direction in approximately forty seconds. We process this data into a flowed reflectance field using an optical flow algorithm to correspond pixels in neighboring camera views and time samples to each other, and we use image compression to reduce the size of this data.We then use image-based relighting and a hardware-accelerated combination of view morphing and light field rendering to render the subject under user-specified viewpoint and lighting conditions. To composite the person into a scene, we use an alpha channel derived from back lighting and a retroreflective treadmill surface and a visual hull process to render the shadows the person would cast onto the ground. We demonstrate realistic composites of several subjects into real and virtual environments using our technique.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters Proceedings Article
In: 11th International Fall Workshop on Vision, Modeling and Visualization, Aachen, Germany, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{tariq_efficient_2006-1,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/Efficient%20Estimation%20of%20Spatially%20Varying%20Subsurface%20Scattering%20Parameters.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {11th International Fall Workshop on Vision, Modeling and Visualization},
address = {Aachen, Germany},
abstract = {We present an image-based technique to efficiently acquire spatially varying subsurface reflectance properties of a human face. The estimated prop- erties can be used directly to render faces with spa- tially varying scattering, or can be used to estimate a robust average across the face. We demonstrate our technique with renderings of peoples' faces un- der novel, spatially-varying illumination and pro- vide comparisons with current techniques. Our cap- tured data consists of images of the face from a sin- gle viewpoint under two small sets of projected im- ages. The first set, a sequence of phase-shifted pe- riodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The sec- ond set of structured light patterns is used to obtain face geometry. We subtract the minimum of each profile to remove the contribution of interreflected light from the rest of the face, and then match the observed reflectance profiles to scattering properties predicted by a scattering model using a lookup ta- ble. From these properties we can generate images of the subsurface reflectance of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Egges, Arjan; Eliëns, Anton; Isbister, Katherine; Paiva, Ana; Rist, Thomas; Hagen, Paul
Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans Proceedings Article
In: Dagstuhl Seminar Proceedings, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_design_2006,
title = {Design criteria, techniques and case studies for creating and evaluating interactive experiences for virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella and Arjan Egges and Anton Eliëns and Katherine Isbister and Ana Paiva and Thomas Rist and Paul Hagen},
url = {http://ict.usc.edu/pubs/Design%20criteria%20techniques%20and%20case%20studies%20for%20creating%20and%20evaluating%20interactive%20experiences%20for%20virtual%20humans.pdf},
year = {2006},
date = {2006-06-01},
booktitle = {Dagstuhl Seminar Proceedings},
abstract = {How does one go about designing a human? With the rise in recent years of virtual humans this is no longer purely a philosophical question. Virtual humans are intelligent agents with a body, often a human-like graphical body, that interact verbally and non-verbally with human users on a variety of tasks and applications. At a recent meeting on this subject, the above authors participated in a several day discussion on the question of virtual human design. Our working group approached this question from the perspective of interactivity. Specifically, how can one design effective interactive experiences involving a virtual human, and what constraints does this goal place on the form and function of an embodied conversational agent. Our group grappled with several related questions: What ideals should designers aspire to, what sources of theory and data will best lead to this goal and what methodologies can inform and validate the design process? This article summarizes our output and suggests a specific framework, borrowed from interactive media design, as a vehicle for advancing the state of interactive experiences with virtual humans.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Tucker, Karen A.; Hall, Colin D.; Robertson, Wendy T.; Eron, Joseph J.; Fried, Michael W.; Robertson, R. Kevin
Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection Journal Article
In: AIDS, vol. 20, pp. 1591–1595, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_neurocognitive_2006,
title = {Neurocognitive functioning and HAART in HIV and hepatitis C virus co-infection},
author = {Thomas D. Parsons and Karen A. Tucker and Colin D. Hall and Wendy T. Robertson and Joseph J. Eron and Michael W. Fried and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Neurocognitive%20functioning%20and%20HAART%20in%20HIV%20and%20hepatitis%20C%20virus%20co-infection.pdf},
year = {2006},
date = {2006-05-01},
journal = {AIDS},
volume = {20},
pages = {1591–1595},
abstract = {Objectives: This study examined the effects of HAART on neurocognitive functioning in persons with hepatitis C virus (HCV) and HIV co-infection. Design: A prospective study examining neurocognitive performance before and after HAART initiation. Method: Participant groups included a mono-infected group (45 HIV/HCV-participants) and a co-infected group (20 HIV/HCV participants). A neuropsychological battery (attention/concentration, psychomotor speed, executive functioning, verbal memory, visual memory, ï¬ne motor, and gross motor functioning) was used to evaluate all participants. After 6 months of HAART, 31 HIV mono-infected and 13 HCV/ HIV co-infected participants were reevaluated. Results: Neurocognitive functioning by domain revealed signiï¬cantly worse performance in the co-infected group when compared to the monoinfected group on domains of visual memory and ï¬ne motor functioning. Assessment of neurocognitive functioning after antiretroviral therapy revealed that the co-infected group was no longer performing worse than the monoinfected group. Conclusions: The ï¬ndings of the current study suggest that persons with HCV/HIV co-infection may have greater neurocognitive declines than persons with HIV infection alone. HCV/HIV co-infection may accelerate the progression of HIV related neurocognitive decline.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Stern, Andrew
Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training Proceedings Article
In: 15th Conference on Behavior Representation in Modeling and Simulation (BRIMS), Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_believable_2006,
title = {Believable Agents and Intelligent Scenario Direction for Social and Cultural Leadership Training},
author = {Mark O. Riedl and Andrew Stern},
url = {http://ict.usc.edu/pubs/Believable%20Agents%20and%20Intelligent%20Scenario%20Direction%20for%20Social%20and%20Cultural%20Leadership%20Training.pdf},
year = {2006},
date = {2006-05-01},
booktitle = {15th Conference on Behavior Representation in Modeling and Simulation (BRIMS)},
address = {Baltimore, MD},
abstract = {Simulation provides an opportunity for a trainee to practice skills in an interactive and reactive virtual environment. We present a technique for social and cultural leader training through simulation based on a combination of interactive synthetic agents and intelligent scenario direction and adaptation. Social simulation through synthetic characters provides an engaging and believable experience for the trainee. In addition, the trainee is exposed to a sequence of relevant learning situations where the trainee can practice problem-solving under particular conditions. An Automated Scenario Director provides high-level guidance to semi-autonomous character agents to coerce the trainee's experience to conform to a given scenario. When the trainee performs actions in the virtual world that cause the simulation state to deviate from the scenario, the Automated Scenario Director adapts the scenario to resolve any unexpected inconsistencies, thereby preserving the trainee's perception of self control while still retaining any relevant learning situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
EMA: A computational model of appraisal dynamics Proceedings Article
In: Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion, Vienna, Austria, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{marsella_ema_2006,
title = {EMA: A computational model of appraisal dynamics},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/EMA-%20A%20computational%20model%20of%20appraisal%20dynamics.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Agent Construction and Emotions: Modeling the Cognitive Antecedents and Consequences of Emotion},
address = {Vienna, Austria},
abstract = {A computational model of emotion must explain both the rapid dynamics of some emotional reactions as well as the slower responses that follow deliberation. This is often addressed by positing multiple appraisal processes such as fast pattern directed vs. slower deliberative appraisals. In our view, this confuses appraisal with inference. Rather, we argue for a single and automatic appraisal process that operates over a person’s interpretation of their relationship to the environment. Dynamics arise from perceptual and inferential processes operating on this interpretation (including deliberative and reactive processes). We illustrate this perspective through the computational modeling of a naturalistic emotional situation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents Book Section
In: Modeling Communication with Robots and Virtual Humans, pp. 296–309, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@incollection{traum_talking_2006,
title = {Talking to Virtual Humans: Dialogue Models and Methodologies for Embodied Conversational Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Talking%20to%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-04-01},
booktitle = {Modeling Communication with Robots and Virtual Humans},
pages = {296–309},
abstract = {Virtual Humans are artificial characters who look and act like humans, but inhabit a simulated environment. One important aspect of many virtual humans is their communicative dialogue ability. In this paper we outline a methodology for study of dialogue behavior and construction of virtual humans. We also consider three architectures for different types of virtual humans that have been built at the Institute for Creative Technologies.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bolas, Mark; Pair, Jarrell; Haynes, Kip; McDowall, Ian
Display Research at the University of Southern California Proceedings Article
In: IEEE Emerging Displays Workshop, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MxR
@inproceedings{bolas_display_2006,
title = {Display Research at the University of Southern California},
author = {Mark Bolas and Jarrell Pair and Kip Haynes and Ian McDowall},
url = {http://ict.usc.edu/pubs/Display%20Research%20at%20the%20University%20of%20Southern%20California.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {IEEE Emerging Displays Workshop},
address = {Alexandria, VA},
abstract = {The University of Southern California and its collaborative research partner, Fakespace Labs, are participating in a number of research programs to invent and implement new forms of display technologies for immersive and semi-immersive applications. This paper briefly describes three of these technologies and highlights a few emerging results from those efforts. The first system is a rear projected 300 degree field of view cylindrical display. It is driven by 11 projectors with geometry correction and edge blending hardware. A full scale prototype will be completed in March 2006. The second system is a 14 screen projected panoramic room environment used as an advanced teaching and meeting space. It can be driven by a cluster of personal computers or low-cost DVD players, or driven by a single personal computer. The third is a prototype stereoscopic head mounted display that can be worn in a fashion similar to standard dust protection goggles. It provides a field of view in excess of 150 degrees.},
keywords = {MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; Graap, Ken; Manson, Brian; McNerney, Peter J.; Wiederhold, Brenda K.; Wiederhold, Mark; Spira, James
A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment Proceedings Article
In: NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Military Personnel with Post Traumatic Stress Disorder: From Training to Toy to Treatment},
author = {Albert Rizzo and Jarrell Pair and Ken Graap and Brian Manson and Peter J. McNerney and Brenda K. Wiederhold and Mark Wiederhold and James Spira},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Military%20Personnel%20with%20Post%20Traumatic%20Stress%20Disorder-%20From%20Training%20to%20Toy%20to%20Treatment.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {NATO Advanced Research Workshop on Novel Approached to the Diagnosis and Treatment of Posttraumatic Stress Disorder},
abstract = {Post Traumatic Stress Disorder is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of a Virtual Iraq PTSD VR application that has been created from the virtual assets that were initially developed for a combat tactical training simulation, which then served as the inspiration for the X-Box game entitled Full Spectrum Warrior.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Allen, Brian; Dautricourt, Matthieu; Treskunov, Anton; Liewer, Matt; Graap, Ken; Reger, Greg; Rizzo, Albert
A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder Proceedings Article
In: Proceedings of the IEEE VR 2006 Conference, pp. 64–71, Alexandria, VA, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{pair_virtual_2006,
title = {A Virtual Reality Exposure Therapy Application for Iraq War Post Traumatic Stress Disorder},
author = {Jarrell Pair and Brian Allen and Matthieu Dautricourt and Anton Treskunov and Matt Liewer and Ken Graap and Greg Reger and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Exposure%20Therapy%20Application%20for%20Iraq%20War%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2006},
date = {2006-03-01},
booktitle = {Proceedings of the IEEE VR 2006 Conference},
pages = {64–71},
address = {Alexandria, VA},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure treatment has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-Centered tests with the application are currently underway at the Naval Medical Center–San Diego and within an Army Combat Stress Control Team in Iraq with clinical trials scheduled to commence in February 2006.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Miller, Karen J.; Parsons, Thomas D.; Whybrow, Peter C.; Herle, Katja; Rasgon, Natalie; Herle, Andre; Martinez, Dorothy; Silverman, Dan H.; Bauer, Michael
Memory Improvement with Treatment of Hypothyroidism Journal Article
In: International Journal of Neuroscience, vol. 16, no. 8, pp. 895–906, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{miller_memory_2006,
title = {Memory Improvement with Treatment of Hypothyroidism},
author = {Karen J. Miller and Thomas D. Parsons and Peter C. Whybrow and Katja Herle and Natalie Rasgon and Andre Herle and Dorothy Martinez and Dan H. Silverman and Michael Bauer},
url = {http://ict.usc.edu/pubs/Memory%20Improvement%20with%20Treatment%20of%20Hypothyroidism.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
number = {8},
pages = {895–906},
abstract = {The consequences of inadequate thyroid hormone availability to the brain and treatment effects of levothyroxine function are still poorly understood. This study prospectively assessed the effects of thyroid replacement therapy on cognitive function in patients suffering from biochemical evidenced, untreated hypothyroidism. Significant effects between the untreated hypothyroid group and control group were limited to verbal memory retrieval. When assessing the effects of 3-month treatment, results revealed that the treated hypothyroid group had significant increased verbal memory retrieval. Results suggest that specific memory retrieval deficits associated with hypothyroidism can resolve after replacement therapy with levothyroxine.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Toward Virtual Humans Journal Article
In: AI Magazine, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{swartout_toward_2006,
title = {Toward Virtual Humans},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/Toward%20Virtual%20Humans.pdf},
year = {2006},
date = {2006-01-01},
journal = {AI Magazine},
abstract = {This paper describes the virtual humans developed as part of the Mission Rehearsal Exercise project, a virtual reality-based training system. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. We describe the key capabilities of the virtual humans, including task representation and reasoning, natural language dialogue, and emotion reasoning, and show how these capabilities are integrated to provide more human-level intelligence than would otherwise be possible.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}