Publications
Search
Mühlberger, Andreas; Jekel, K.; Probst, Thomas; Schecklmann, Martin; Conzelmann, A.; Andreatta, M.; Rizzo, A. A.; Pauli, P.; Romanos, M.
The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test Journal Article
In: Journal of attention disorders, 2016.
@article{muhlberger_influence_2016,
title = {The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test},
author = {Andreas Mühlberger and K. Jekel and Thomas Probst and Martin Schecklmann and A. Conzelmann and M. Andreatta and A. A. Rizzo and P. Pauli and M. Romanos},
url = {http://journals.sagepub.com/doi/abs/10.1177/1087054716647480},
doi = {10.1177/1087054716647480},
year = {2016},
date = {2016-05-01},
journal = {Journal of attention disorders},
abstract = {This study compares the performance in a continuous performance test within a virtual reality classroom (CPT-VRC) between medicated children with ADHD, unmedicated children with ADHD, and healthy children. Method:N = 94 children with ADHD (n = 26 of them received methylphenidate and n = 68 were unmedicated) and n = 34 healthy children performed the CPT-VRC. Omission errors, reaction time/variability, commission errors, and body movements were assessed. Furthermore, ADHD questionnaires were administered and compared with the CPT-VRC measures. Results: The unmedicated ADHD group exhibited more omission errors and showed slower reaction times than the healthy group. Reaction time variability was higher in the unmedicated ADHD group compared with both the healthy and the medicated ADHD group. Omission errors and reaction time variability were associated with inattentiveness ratings of experimenters. Head movements were correlated with hyperactivity ratings of parents and experimenters. Conclusion: Virtual reality is a promising technology to assess ADHD symptoms in an ecologically valid environment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosoff, Heather; John, Richard S.
Semi-Automated Construction of Decision-Theoretic Models of Human Behavior Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 891–899, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{pynadath_semi-automated_2016,
title = {Semi-Automated Construction of Decision-Theoretic Models of Human Behavior},
author = {David V. Pynadath and Heather Rosoff and Richard S. John},
url = {http://dl.acm.org/citation.cfm?id=2937055},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {891–899},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Multiagent social simulation provides a powerful mechanism for policy makers to understand the potential outcomes of their decisions before implementing them. However, the value of such simulations depends on the accuracy of their underlying agent models. In this work, we present a method for automatically exploring a space of decision-theoretic models to arrive at a multiagent social simulation that is consistent with human behavior data. We start with a factored Partially Observable Markov Decision Process (POMDP) whose states, actions, and reward capture the questions asked in a survey from a disaster response scenario. Using input from domain experts, we construct a set of hypothesized dependencies that may or may not exist in the transition probability function. We present an algorithm to search through each of these hypotheses, evaluate their accuracy with respect to the data, and choose the models that best re ect the observed behavior, including individual di⬚erences. The result is a mechanism for constructing agent models that are grounded in human behavior data, while still being able to support hypothetical reasoning that is the main advantage of multiagent social simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
TALBOT, Thomas B.; KALISCH, Nicolai; CHRISTOFFERSEN, Kelly; LUCAS, Gale; FORBELL, Eric
Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 407–413, 2016.
@article{talbot_natural_2016,
title = {Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters.},
author = {Thomas B. TALBOT and Nicolai KALISCH and Kelly CHRISTOFFERSEN and Gale LUCAS and Eric FORBELL},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA407&dq=%22through+regular+web+browsers+and+is+capable+of+multiple+types+of%22+%22practice+targeting+diagnostic+interviews.+A+natural+language+interview%22+%22narrative+statement+based+upon+dialog+context.+The+dialog+manager%27s%22+&ots=Ej8L8hxLlb&sig=GMnqEb5n7CB9x1lWE4gfe5_4n8o},
doi = {10.3233/978-1-61499-625-5-407},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {407–413},
abstract = {A virtual standardized patient (VSP) prototype was tested for natural language understanding (NLU) performance. The conversational VSP was evaluated in a controlled 61 subject study over four repetitions of a patient case. The prototype achieved more than 92% appropriate response rate from naïve users on their first attempt and results were stable by their fourth case repetition. This level of performance exceeds prior efforts and is at a level comparable of accuracy as seen in human conversational patient training, with caveats. This level of performance was possible due to the use of a unified medical taxonomy underpinning that allows virtual patient language training to be applied to all cases in our system as opposed to benefiting a single patient case.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2003
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Neumann, Ulrich; Piepol, Diane; Swartout, William
FlatWorld: Combining Hollywood Set-Design Techniques with VR Journal Article
In: IEEE Computer Graphics and Applications, no. January/February, 2003.
@article{pair_flatworld_2003,
title = {FlatWorld: Combining Hollywood Set-Design Techniques with VR},
author = {Jarrell Pair and Ulrich Neumann and Diane Piepol and William Swartout},
editor = {Lawrence Rosenblum and Macedonia},
url = {http://ict.usc.edu/pubs/FlatWorld-%20Combining%20Hollywood%20Set-Design%20Techniques%20with%20VR.pdf},
year = {2003},
date = {2003-01-01},
journal = {IEEE Computer Graphics and Applications},
number = {January/February},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies Kloster Irsee, Germany, no. ICT TR 02 2003, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003-1,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
Links | BibTeX | Tags: Virtual Humans
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Cao, Yong; Faloutsos, Petros; Pighin, Frédéric
Unsupervised Learning for Speech Motion Editing Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_unsupervised_2003,
title = {Unsupervised Learning for Speech Motion Editing},
author = {Yong Cao and Petros Faloutsos and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Unsupervised%20Learning%20for%20Speech%20Motion%20Editing.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {We present a new method for editing speech related facial motions. Our method uses an unsupervised learning technique, Independent Component Analysis (ICA), to extract a set of meaningful parameters without any annotation of the data. With ICA, we are able to solve a blind source separation problem and describe the original data as a linear combination of two sources. One source captures content (speech) and the other captures style (emotion). By manipulating the independent components we can edit the motions in intuitive ways.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Pighin, Frédéric
Hybrid Control For Interactive Character Animation Proceedings Article
In: Proceedings of the 11th Pacific Conference on Computer Graphics and Applications, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{shapiro_hybrid_2003,
title = {Hybrid Control For Interactive Character Animation},
author = {Ari Shapiro and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Hybrid%20Control%20For%20Interactive%20Character%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications},
abstract = {We implement a framework for animating interactive characters by combining kinematic animation with physical simulation. The combination of animation techniques allows the characters to exploit the advantages of each technique. For example, characters can perform natural-looking kinematic gaits and react dynamically to unexpected situations.Kinematic techniques such as those based on motion capture data can create very natural-looking animation. However, motion capture based techniques are not suitable for modeling the complex interactions between dynamically interacting characters. Physical simulation, on the other hand, is well suited for such tasks. Our work develops kinematic and dynamic controllers and transition methods between the two control methods for interactive character animation. In addition, we utilize the motion graph technique to develop complex kinematic animation from shorter motion clips as a method of kinematic control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh
The Gestalt of Virtual Environments Proceedings Article
In: International Workshop on Presence, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_gestalt_2003,
title = {The Gestalt of Virtual Environments},
author = {Jacquelyn Morie and Josh Williams},
url = {http://ict.usc.edu/pubs/The%20Gestalt%20of%20Virtual%20Environments.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Presence},
abstract = {The majority of research in the field of virtual reality to date has focused on increasing the fidelity of the environments created and trying to determine the quality of the participant experience. Efforts have been made to quantify such aspects, especially in regards to visuals and sound, and to a lesser extent to the user experience. Recent thinking has tended towards the assumption that ever-greater fidelity would ensure a better user experience. However, such emphasis on photo-realism and audio-realism does not take into account the collective results of our multimodal sensory inputs with their intertwined effects. Our design philosophy for the creation of virtual environments attempts to replicate the human experience, and asks the question: Is there an underlying fidelity of feels-real through which the quality of the participant experience could be improved?},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Moore, Benjamin
QuBit Documentation Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2003, 2003.
@techreport{moore_qubit_2003,
title = {QuBit Documentation},
author = {Benjamin Moore},
url = {http://ict.usc.edu/pubs/QuBit%20Documentation.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 01.2003},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2002
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of Spatial Averaging Equalization Methods: A Statistical Approach Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002-1,
title = {Robustness of Spatial Averaging Equalization Methods: A Statistical Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Spatial%20Averaging%20Equalization%20Methods-%20A%20Statistical%20Approach.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of room responses. However, the performance of averaging based equalization, at the listeners, may be affected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to performance of spatial averaging based equalization. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Perceptual Multiple Location Equalization with Clustering Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_perceptual_2002,
title = {Perceptual Multiple Location Equalization with Clustering},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Perceptual%20Multiple%20Location%20Equalization%20with%20Clustering.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Typically, room equalization techniques do not focus on designing ï¬lters that equalize the room transfer functions on perceptually relevant spectral features. In this paper we address the problem of room equalization for multiple listeners, simultaneously, using a perceptually designed equalization ï¬lter based on pattern recognition techniques. Some features of the proposed ï¬lter are, its ability to perform simultaneous equalization at multiple locations, a reduced order, and a psychoacoustically motivated design. In summary, the simultaneous multiple location equalization, using a pattern recognition method, is performed over perceptually relevant spectral components derived from the auditory ï¬ltering mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Hilmes, Philip; Kyriakakis, Chris
Robustness of Multiple Listener Equalization With Magnitude Response Averaging Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002,
title = {Robustness of Multiple Listener Equalization With Magnitude Response Averaging},
author = {Sunil Bharitkar and Philip Hilmes and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Multiple%20Listener%20Equalization%20With%20Magnitude%20Response%20Averaging.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of magnitude responses at locations of interest. However, the performance of averaging based equalization, at the listeners, may be a!ected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to a performance metric of equalization for magnitude response averaging. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alternative_2002,
title = {An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/An%20Alternative%20Model%20for%20Sound%20Signals%20Encountered%20in%20Reverberant%20Environments%3b%20Robust%20Maximum%20Likelihood%20Localization%20and%20Parameter%20Estimation%20Based%20on%20a%20Sub-Gaussian%20Model.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {In this paper we investigate an alternative to the Gaussian density for modeling signals encountered in audio environments. The observation that sound signals are impulsive in nature, combined with the reverberation e!ects commonly encountered in audio, motivates the use of the Sub-Gaussian density. The new Sub-Gaussian statistical model and the separable solution of its Maximum Likelihood estimator are derived. These are used in an array scenario to demonstrate with both simulations and two different microphone arrays the achievable performance gains. The simulations exhibit the robustness of the sub-Gaussian based method while the real world experiments reveal a signiï¬cant performance gain, supporting the claim that the sub-Gaussian model is better suited for sound signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, W. Lewis; Narayanan, Shrikanth; Whitney, Richard; Das, Rajat; Labore, Catherine
Limited Domain Synthesis of Expressive Military Speech for Animated Characters Proceedings Article
In: IEEE 2002 Workshop on Speech Synthesis, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{johnson_limited_2002,
title = {Limited Domain Synthesis of Expressive Military Speech for Animated Characters},
author = {W. Lewis Johnson and Shrikanth Narayanan and Richard Whitney and Rajat Das and Catherine Labore},
url = {http://ict.usc.edu/pubs/Limited%20Domain%20Synthesis%20of%20Expressive%20Military%20Speech%20for%20Animated%20Characters.pdf},
year = {2002},
date = {2002-09-01},
booktitle = {IEEE 2002 Workshop on Speech Synthesis},
abstract = {Text-to-speech synthesis can play an important role in interactive education and training applications, as voices for animated agents. Such agents need high-quality voices capable of expressing intent and emotion. This paper presents preliminary results in an effort aimed at synthesizing expressive military speech for training applications. Such speech has acoustic and prosodic characteristics that can differ markedly from ordinary conversational speech. A limited domain synthesis approach is used employing samples of expressive speech, classified according to speaking style. The resulting synthesizer was tested both in isolation and in the context of a virtual reality training scenario with animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Theory of Mind in Strategy Representations Proceedings Article
In: Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, George Mason University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_theory_2002,
title = {The Theory of Mind in Strategy Representations},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Theory%20of%20Mind%20in%20Strategy%20Representations.PDF},
year = {2002},
date = {2002-08-01},
booktitle = {Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {George Mason University},
abstract = {Many scientific fields continue to explore cognition related to Theory of Mind abilities, where people reason about the mental states of themselves and others. Experimental and theoretical approaches to this problem have largely avoided issues concerning the contents of representations employed in this class of reasoning. In this paper, we describe a new approach to the investigation of representations related to Theory of Mind abilities that is based on the analysis of commonsense strategies. We argue that because the mental representations of strategies must include concepts of mental states and processes, the large-scale analysis of strategies can be informative of the representational scope of Theory of Mind abilities. The results of an analysis of this sort are presented as a description of thirty representational areas that organize the breadth of Theory of Mind concepts. Implications for Theory Theories and Simulation Theories of Theory of Mind reasoning are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Angros, Richard Jr.; Johnson, W. Lewis; Rickel, Jeff; Scholer, Andrew
Learning Domain Knowledge for Teaching Procedural Skills Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{angros_learning_2002,
title = {Learning Domain Knowledge for Teaching Procedural Skills},
author = {Richard Jr. Angros and W. Lewis Johnson and Jeff Rickel and Andrew Scholer},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Skills.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
abstract = {This paper describes a method for acquiring procedural knowledge for use by pedagogical agents in interactive simulation-based learning environments. Such agents need to be able to adapt their behavior to the changing conditions of the simulated world, and respond appropriately in mixed-initiative interactions with learners. This requires a good understanding of the goals and causal dependencies in the procedures being taught. Our method, inspired by human tutorial dialog, combines direct speciï¬cation, demonstration, and experimentation. The human instructor demonstrates the skill being taught, while the agent observes the effects of the procedure on the simulated world. The agent then autonomously experiments with the procedure, making modiï¬cations to it, in order to understand the role of each step in the procedure. At various points the instructor can provide clariï¬cations, and modify the developing procedural description as needed. This method is realized in a system called Diligent, which acquires procedural knowledge for the STEVE animated pedagogical agent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kovar, Lucas; Gleicher, Michael; Pighin, Frédéric
Motion Graphs Proceedings Article
In: Proceedings of SIGGRAPH '02, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{kovar_motion_2002,
title = {Motion Graphs},
author = {Lucas Kovar and Michael Gleicher and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Motion%20Graphs.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {Proceedings of SIGGRAPH '02},
address = {San Antonio, TX},
abstract = {n this paper we present a novel method for creating realistic, controllable motion. Given a corpus of motion capture data, we automatically construct a directed graph called a motion graph that encapsulates connections among the database. The motion graph consists both of pieces of original motion and automatically generated transitions. Motion can be generated simply by building walks on the graph. We present a general framework for extracting particular graph walks that meet a user's specifications. We then show how this framework can be applied to the specific problem of generating different styles of locomotion along arbitrary paths.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Lent, Michael
Virtual Humans as Participants vs. Virtual Humans as Actors Proceedings Article
In: AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_virtual_2002,
title = {Virtual Humans as Participants vs. Virtual Humans as Actors},
author = {Andrew S. Gordon and Michael Lent},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20as%20Participants%20vs%20Virtual%20Humans%20as%20Actors.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu Proceedings Article
In: The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_enabling_2002,
title = {Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Enabling%20and%20recognizing%20strategic%20play%20in%20strategy%20games-%20Lessons%20from%20Sun%20Tzu.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
abstract = {The interactive entertainment genre of the strategy game entertains users by allowing them to engage in strategic play, which should encourage game designers to devote development efforts toward facilitating users that wish to employ commonsense strategies, and to recognize and react to specific user strategies during game play. This paper attempts to facilitate these development efforts by identifying and analyzing 43 strategies from Sun Tzu's The Art of War, which are broadly applicable across games in the strategy game genre. For each strategy, a set of specific actions are identified that should be provided to users to enable their execution, along with generalized recognition rules that can facilitatethe design of entertaining responses to users' strategic behavior. Consideration of how the enabling actions could be incorporated into an existing strategy game is provided.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fleischman, Michael; Hovy, Eduard
Emotional Variation in Speech-Based Natural Language Generation Proceedings Article
In: International Natural Language Generation Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{fleischman_emotional_2002,
title = {Emotional Variation in Speech-Based Natural Language Generation},
author = {Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Emotional%20Variation%20in%20Speech-Based%20Natural%20Language%20Generation.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Natural Language Generation Conference},
abstract = {We present a framework for handling emotional variations in a speech-based natural language system for use in the MRE virtual training environment. The system is a first step toward addressing issues in emotion-based modeling of verbal communicative behavior. We cast the problem of emotional generation as a distance minimization task, in which the system chooses between multiple valid realizations for a given input based on the emotional distance of each realization from the speaker's attitude toward that input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
Links | BibTeX | Tags: Graphics
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Han, Changhee; Lent, Michael
Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments Proceedings Article
In: Proceedings of 14th Innovative Applications of Artificial Intelligence Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_applying_2002,
title = {Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments},
author = {Randall W. Hill and Changhee Han and Michael Lent},
url = {http://ict.usc.edu/pubs/Applying%20Perceptually%20Driven%20Cognitive%20Mapping%20To%20Virtual%20Urban%20Environments.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of 14th Innovative Applications of Artificial Intelligence Conference},
abstract = {This paper describes a method for building a cognitive map of a virtual urban environment. Our routines enable virtual humans to map their environment using a realistic model of perception. We based our implementation on a computational framework proposed by Yeap and Jefferies (Yeap & Jefferies 1999) for representing a local environment as a structure called an Absolute Space Representation (ASR). Their algorithms compute and update ASRs from a 2-1/2D 1 sketch of the local environment, and then connect the ASRs together to form a raw cognitive map. Our work extends the framework developed by Yeap and Jefferies in three important ways. First, we implemented the framework in a virtual training environment, the Mission Rehearsal Exercise (Swartout et al. 2001). Second, we describe a method for acquiring a 2- 1/2D sketch in a virtual world, a step omitted from their framework, but which is essential for computing an ASR. Third, we extend the ASR algorithm to map regions that are partially visible through exits of the local space. Together, the implementation of the ASR algorithm along with our extensions will be useful in a wide variety of applications involving virtual humans and agents who need to perceive and reason about spatial concepts in urban environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Szeliski, Richard; Salesin, David H.
Modeling and Animating Realistic Faces from Images Journal Article
In: International Journal on Computer Vision, vol. 50, pp. 143–169, 2002.
Abstract | Links | BibTeX | Tags:
@article{pighin_modeling_2002,
title = {Modeling and Animating Realistic Faces from Images},
author = {Frédéric Pighin and Richard Szeliski and David H. Salesin},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Animating%20Realistic%20Faces%20from%20Images.pdf},
year = {2002},
date = {2002-01-01},
journal = {International Journal on Computer Vision},
volume = {50},
pages = {143–169},
abstract = {We present a new set of techniques f or mo deling and animating realistic f aces f rom photographs and videos. Given a set of face photographs taken simultaneously, our modeling technique allows the interactive recovery of a textured 3D face model. By repeating this process for several facial expressions, we acquire a set of faces models that can be linearly combined to express a wide range of expressions. Given a video sequence, this linear face model can be used to estimate the face position, orientation, and facial expression at each frame. We illustrate these techniques on several datasets and demonstrate robust estimations of detailed face geometry and motion.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
Links | BibTeX | Tags: Virtual Humans
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rickel, Jeff; Johnson, W. Lewis
Extending Virtual Human to Support Team Training in Virtual Reality Book Section
In: Lakemeyer, G.; Nebel, B. (Ed.): Exploring Artificial Intelligence in the New Millennium, Morgan Kaufmann Publishers, San Francisco, CA, 2002.
Abstract | Links | BibTeX | Tags:
@incollection{rickel_extending_2002,
title = {Extending Virtual Human to Support Team Training in Virtual Reality},
author = {Jeff Rickel and W. Lewis Johnson},
editor = {G. Lakemeyer and B. Nebel},
url = {http://ict.usc.edu/pubs/Extending%20Virtual%20Humans%20to%20Support%20Team%20Training%20in%20Virtual%20Reality.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Exploring Artificial Intelligence in the New Millennium},
publisher = {Morgan Kaufmann Publishers},
address = {San Francisco, CA},
abstract = {This paper describes the use of virtual humans and distributed virtual reality to support team training, where students must learn their individual role in the team as well as how to coordinate their actions with their teammates. Students, instructors, and virtual humans cohabit a three-dimensional, interactive, simulated mock-up of their work environment, where they can practice together in realistic situations. The virtual humans can serve as instructors for individual students, and they can substitute for missing team members, allowing students to practive team tasks when some or all human instructors and teammates are unavailable. The paper describes our learning environment, the issues that arise in developing virtual humans for team training, and our design for the virtual humans, which is an extension of our Steve agent previously used for one-on-one tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Morie, Jacquelyn; Iyer, Kumar; Valanejad, R.; Sadek, Ramy; Miraglia, D.; Milam, D.
Emotionally Evocative Environments for Training Proceedings Article
In: Proceedings of the 23th Army Science Conference, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_emotionally_2002,
title = {Emotionally Evocative Environments for Training},
author = {Jacquelyn Morie and Kumar Iyer and R. Valanejad and Ramy Sadek and D. Miraglia and D. Milam},
url = {http://ict.usc.edu/pubs/EMOTIONALLY%20EVOCATIVE%20ENVIRONMENTS%20FOR%20TRAINING.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of the 23th Army Science Conference},
address = {Orlando, FL},
abstract = {This paper describes a project currently in progress at the University of Southern California's Institute for Creative Technologies (ICT). Much of the research at ICT involves developing better graphics, sound and artificial intelligence to be used in creating the next generation of training tools for the United States Army. Our project focuses on the use of emotional responses as an enhancement for training. Research indicates that an emotional connection is a strong factor in how and what we remember. In addition, real world situations often evoke surprising and significant emotional reactions that soldiers must deal with. Few current immersive training scenarios, however, focus on the emotional state of the trainee, limiting training scenarios to basic objective elements. The Sensory Environments Evaluation (SEE) Project at ICT is investigating the potential of emotionally compelling environments for more effective training. We do this by skillfully combining the sensory inputs available in virtual environments. Our current efforts concentrate on sight and sound; smell will be included as scent delivery methods improve. Evaluation studies are planned to determine the effectiveness of the techniques we are developing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2001
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation Proceedings Article
In: IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, New Paltz, New York, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2001,
title = {Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/ROBUSTNESS%20OF%20THE%20EIGENFILTER%20FOR%20VARIATIONS%20IN%20LISTENER%20RESPONSES%20FOR%20SELECTIVE%20SIGNAL%20CANCELLATION.pdf},
year = {2001},
date = {2001-10-01},
booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
address = {New Paltz, New York},
abstract = {Selectively cancelling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. We have proposed the eigenfilter for selectively cancelling signals in one direction, while attempting to retain them at unintentional directions. In this paper we investigate the behaviour of the performance measure (i.e., the gain) for a vowel and an unvoiced fricative, when the listener moves his head, in an automobile type environment. We show that in such a situation, a large energy in the difference between the impulse responses at a listener's location may affect the gain substantially. listeners in which only a subset wish to listen to the audio signal.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}