Publications
Search
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosoff, Heather; John, Richard S.
Semi-Automated Construction of Decision-Theoretic Models of Human Behavior Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 891–899, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{pynadath_semi-automated_2016,
title = {Semi-Automated Construction of Decision-Theoretic Models of Human Behavior},
author = {David V. Pynadath and Heather Rosoff and Richard S. John},
url = {http://dl.acm.org/citation.cfm?id=2937055},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {891–899},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Multiagent social simulation provides a powerful mechanism for policy makers to understand the potential outcomes of their decisions before implementing them. However, the value of such simulations depends on the accuracy of their underlying agent models. In this work, we present a method for automatically exploring a space of decision-theoretic models to arrive at a multiagent social simulation that is consistent with human behavior data. We start with a factored Partially Observable Markov Decision Process (POMDP) whose states, actions, and reward capture the questions asked in a survey from a disaster response scenario. Using input from domain experts, we construct a set of hypothesized dependencies that may or may not exist in the transition probability function. We present an algorithm to search through each of these hypotheses, evaluate their accuracy with respect to the data, and choose the models that best re ect the observed behavior, including individual di⬚erences. The result is a mechanism for constructing agent models that are grounded in human behavior data, while still being able to support hypothetical reasoning that is the main advantage of multiagent social simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
TALBOT, Thomas B.; KALISCH, Nicolai; CHRISTOFFERSEN, Kelly; LUCAS, Gale; FORBELL, Eric
Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 407–413, 2016.
@article{talbot_natural_2016,
title = {Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters.},
author = {Thomas B. TALBOT and Nicolai KALISCH and Kelly CHRISTOFFERSEN and Gale LUCAS and Eric FORBELL},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA407&dq=%22through+regular+web+browsers+and+is+capable+of+multiple+types+of%22+%22practice+targeting+diagnostic+interviews.+A+natural+language+interview%22+%22narrative+statement+based+upon+dialog+context.+The+dialog+manager%27s%22+&ots=Ej8L8hxLlb&sig=GMnqEb5n7CB9x1lWE4gfe5_4n8o},
doi = {10.3233/978-1-61499-625-5-407},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {407–413},
abstract = {A virtual standardized patient (VSP) prototype was tested for natural language understanding (NLU) performance. The conversational VSP was evaluated in a controlled 61 subject study over four repetitions of a patient case. The prototype achieved more than 92% appropriate response rate from naïve users on their first attempt and results were stable by their fourth case repetition. This level of performance exceeds prior efforts and is at a level comparable of accuracy as seen in human conversational patient training, with caveats. This level of performance was possible due to the use of a unified medical taxonomy underpinning that allows virtual patient language training to be applied to all cases in our system as opposed to benefiting a single patient case.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Phan, Thai; Krum, David M.; Bolas, Mark
ShodanVR: Immersive Visualization of Text Records from the Shodan Database Proceedings Article
In: Proceedings of the 2016 Workshop on Immersive Analytics (IA), IEEE, Greenville,SC, 2016, ISBN: 978-1-5090-0834-6.
@inproceedings{phan_shodanvr_2016,
title = {ShodanVR: Immersive Visualization of Text Records from the Shodan Database},
author = {Thai Phan and David M. Krum and Mark Bolas},
url = {http://ieeexplore.ieee.org/document/7932379/?part=1},
doi = {10.1109/IMMERSIVE.2016.7932379},
isbn = {978-1-5090-0834-6},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of the 2016 Workshop on Immersive Analytics (IA)},
publisher = {IEEE},
address = {Greenville,SC},
abstract = {ShodanVR is an immersive visualization for querying and displaying text records from the Shodan database of Internet connected devices. Shodan provides port connection data retrieved from servers, routers, and other networked devices [2]. Cybersecurity professionals can glean this data for device populations, software versions, and potential security vulnerabilities [1].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2004
Gratch, Jonathan; Marsella, Stacy C.
A Domain-independent Framework for Modeling Emotion Journal Article
In: Journal of Cognitive Systems Research, vol. 5, no. 4, pp. 269–306, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_domain-independent_2004,
title = {A Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Cognitive Systems Research},
volume = {5},
number = {4},
pages = {269–306},
abstract = {In this article, we show how psychological theories of emotion shed light on the interaction between emotion and cognition, and thus can inform the design of human-like autonomous agents that must convey these core aspects of human behavior. We lay out a general computational framework of appraisal and coping as a central organizing principle for such systems. We then discuss a detailed domain-independent model based on this framework, illustrating how it has been applied to the problem of generating behavior for a significant social training application. The model is useful not only for deriving emotional state, but also for informing a number of the behaviors that must be modeled by virtual humans such as facial expressions, dialogue management, planning, reacting, and social understanding. Thus, the work is of potential interest to models of strategic decision-making, action selection, facial animation, and social intelligence.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hawkins, Tim; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Goransson, Fredrik; Debevec, Paul
Animatable Facial Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering, Norkoping, Sweden, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_animatable_2004,
title = {Animatable Facial Reflectance Fields},
author = {Tim Hawkins and Andreas Wenger and Chris Tchou and Andrew Gardner and Fredrik Goransson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Animatable%20Facial%20Re%EF%AC%82ectance%20Fields.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Norkoping, Sweden},
abstract = {We present a technique for creating an animatable image-based appearance model of a human face, able to capture appearance variation over changing facial expression, head pose, view direction, and lighting condition. Our capture process makes use of a specialized lighting apparatus designed to rapidly illuminate the subject sequentially from many different directions in just a few seconds. For each pose, the subject remains still while six video cameras capture their appearance under each of the directions of lighting. We repeat this process for approximately 60 different poses, capturing different expressions, visemes, head poses, and eye positions. The images for each of the poses and camera views are registered to each other semi-automatically with the help of fiducial markers. The result is a model which can be rendered realistically under any linear blend of the captured poses and under any desired lighting condition by warping, scaling, and blending data from the original images. Finally, we show how to drive the model with performance capture data, where the pose is not necessarily a linear combination of the original captured poses.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Strategy Representation: An Analysis of Planning Knowledge Book
Lawrence Erlbaum Associates/Psychology Press, Mahwah, NJ, 2004, ISBN: 0-8058-4527-5.
Abstract | Links | BibTeX | Tags: The Narrative Group
@book{gordon_strategy_2004,
title = {Strategy Representation: An Analysis of Planning Knowledge},
author = {Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/sr.html},
isbn = {0-8058-4527-5},
year = {2004},
date = {2004-01-01},
publisher = {Lawrence Erlbaum Associates/Psychology Press},
address = {Mahwah, NJ},
abstract = {Strategy Representation: An Analysis of Planning Knowledge describes an innovative methodology for investigating the conceptual structures that underlie human reasoning. This work explores the nature of planning strategies-the abstract patterns of planning behavior that people recognize across a broad range of real world situations. With a sense of scale that is rarely seen in the cognitive sciences, this book catalogs 372 strategies across 10 different planning domains: business practices, education, object counting, Machiavellian politics, warfare, scientific discovery, personal relationships, musical performance, and the anthropomorphic strategies of animal behavior and cellular immunology. Noting that strategies often serve as the basis for analogies that people draw across planning situations, this work attempts to explain these analogies by defining the fundamental concepts that are common across all instances of each strategy. By aggregating evidence from each of the strategy definitions provided, the representational requirements of strategic planning are identified. The important finding is that the concepts that underlie strategic reasoning are of incredibly broad scope. Nearly 1,000 fundamental concepts are identified, covering every existing area of knowledge representation research and many areas that have not yet been adequately formalized, particularly those related to common sense understanding of mental states and processes. An organization of these concepts into 48 fundamental areas of knowledge and representation is provided, offering an invaluable roadmap for progress within the field.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {book}
}
Gordon, Andrew S.; Lent, Michael; Velson, Martin; Carpenter, Paul; Jhala, Arnav
Branching Storylines in Virtual Reality Environments for Leadership Development Proceedings Article
In: Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04), pp. 844–851, AAAI Press, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_branching_2004,
title = {Branching Storylines in Virtual Reality Environments for Leadership Development},
author = {Andrew S. Gordon and Michael Lent and Martin Velson and Paul Carpenter and Arnav Jhala},
url = {http://ict.usc.edu/pubs/Branching%20Storylines%20in%20Virtual%20Reality%20Environments%20for%20Leadership%20Development.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04)},
pages = {844–851},
publisher = {AAAI Press},
address = {San Jose, CA},
abstract = {Simulation-based training is increasingly being used within the military to practice and develop the skills of successful soldiers. For the skills associated with successful military leadership, our inability to model human behavior to the necessary degree of fidelity in constructive simulations requires that new interactive designs be developed. The ICT Leaders project supports leadership development through the use of branching storylines realized within a virtual reality environment. Trainees assume a role in a fictional scenario, where the decisions that they make in this environment ultimately affect the success of a mission. All trainee decisions are made in the context of natural language conversations with virtual characters. The ICT Leaders project advances a new form of interactive training by incorporating a suite of Artificial Intelligence technologies, including control architectures, agents of mixed autonomy, and natural language processing algorithms.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.
Everything in perspective Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 03.2004, 2004.
@techreport{muller_everything_2004,
title = {Everything in perspective},
author = {T. J. Muller},
url = {http://ict.usc.edu/pubs/Everything%20in%20perspective.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 03.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Huang, Hesu; Kyriakakis, Chris
Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation Proceedings Article
In: Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers, pp. 259–262, Pacific Grove, CA, 2004, ISBN: 0-7803-8622-1.
Abstract | Links | BibTeX | Tags:
@inproceedings{huang_real-valued_2004,
title = {Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation},
author = {Hesu Huang and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Real-valued%20Delayless%20Subband%20Affine%20Projection%20Algorithm%20for%20Acoustic%20Echo%20Cancellation.pdf},
doi = {10.1109/ACSSC.2004.1399131},
isbn = {0-7803-8622-1},
year = {2004},
date = {2004-01-01},
booktitle = {Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers},
volume = {1},
pages = {259–262},
address = {Pacific Grove, CA},
abstract = {Acoustic echo cancellation (AEC) often involves adaptive filters with large numbers of taps, which results in poor performance in real-time applications. The utilization of delayless subband adaptive filter (DSAF) helps reduce computations and improve the overall performance. However, conventional oversampled subband adaptive filters mainly use DFT or GDFT based analysts/synthesis filter banks and generate "complex-valued" subband signals. This is particularly inefficient when applying the affine projection algorithm (APA), a popular adaptive algorithm for AEC problem, to each subband. For APA implementation, real-valued signals show higher efficiency than complex signals. In this paper, we present a real-valued delayless subband APA and study both its computational complexity and performance on AEC problems. Compared to the complex valued approach, our method achieves a better performance with lower computational cost.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Tchou, Chris; Gardner, Andrew; Hawkins, Tim; Poullis, Charis; Stumpfel, Jessi; Jones, Andrew; Yun, Nathaniel; Einarsson, Per; Lundgren, Therese; Fajardo, Marcos; Martinez, Philippe
Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 06 2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_estimating_2004,
title = {Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination},
author = {Paul Debevec and Chris Tchou and Andrew Gardner and Tim Hawkins and Charis Poullis and Jessi Stumpfel and Andrew Jones and Nathaniel Yun and Per Einarsson and Therese Lundgren and Marcos Fajardo and Philippe Martinez},
url = {http://ict.usc.edu/pubs/ICT-TR-06.2004.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 06 2004},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a process for estimating spatially-varying surface re- flectance of a complex scene observed under natural illumination conditions. The process uses a laser-scanned model of the scene's geometry, a set of digital images viewing the scene's surfaces under a variety of natural illumination conditions, and a set of correspond- ing measurements of the scene's incident illumination in each pho- tograph. The process then employs an iterative inverse global illu- mination technique to compute surface colors for the scene which, when rendered under the recorded illumination conditions, best re- produce the scene's appearance in the photographs. In our process we measure BRDFs of representative surfaces in the scene to better model the non-Lambertian surface reflectance. Our process uses a novel lighting measurement apparatus to record the full dynamic range of both sunlit and cloudy natural illumination conditions. We employ Monte-Carlo global illumination, multiresolution geome- try, and a texture atlas system to perform inverse global illumina- tion on the scene. The result is a lighting-independent model of the scene that can be re-illuminated under any form of lighting. We demonstrate the process on a real-world archaeological site, show- ing that the technique can produce novel illumination renderings consistent with real photographs as well as reflectance properties that are consistent with ground-truth reflectance measurements.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117–127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Gandhe, Sudeep; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Wang, Dagen
The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews Proceedings Article
In: Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{narayanan_transonics_2004,
title = {The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and Sudeep Gandhe and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and Dagen Wang},
url = {http://ict.usc.edu/pubs/The%20Transonics%20Spoken%20Dialogue%20Translator-%20An%20aid%20for%20English-Persian%20Doctor-Patient%20interviews.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication},
abstract = {In this paper we describe our spoken english-persian medical dialogue translation system. We describe the data collection effort and give an overview of the component technologies, including speech recognition, translation, dialogue management, and user interface design. The individual modules and system are designed for flexibility, and to be able to leverage different amounts of available resources to maximize the ability for communication between medical care-giver and patient.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Thompson, E.; Buckwalter, John Galen; Bluestein, Brendon
Pregnancy History and Cognition During and After Pregnancy Journal Article
In: International Journal of Neuroscience, vol. 114, pp. 1099–1110, 2004, ISSN: 0020-7454.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_pregnancy_2004,
title = {Pregnancy History and Cognition During and After Pregnancy},
author = {Thomas D. Parsons and E. Thompson and John Galen Buckwalter and Brendon Bluestein},
url = {http://ict.usc.edu/pubs/Pregnancy%20History%20and%20Cognition%20During%20and%20After%20Pregnancy.pdf},
doi = {10.1080/00207450490475544},
issn = {0020-7454},
year = {2004},
date = {2004-01-01},
journal = {International Journal of Neuroscience},
volume = {114},
pages = {1099–1110},
abstract = {An increasing body of literature confirms anecdotal reports that cognitive changes occur during pregnancy. This article assessed whether prior pregnancy, which alters a woman's subsequent hormonal environment, is associated with a specific cognitive profile during and after pregnancy. Seven primigravids and nine multigravids were compared, equivalent for age and education. No differences between groups were found during pregnancy. After delivery, multigravids performed better than primigravids on verbal memory tasks. After controlling for mood, a significant difference in verbal memory remained. A neuroadaptive mechanism may develop after first pregnancy that increases the ability to recover from some cognitive deficits after later pregnancies.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Muller, T. J.
Interaction on Emotions Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 02.2004, 2004.
Abstract | Links | BibTeX | Tags:
@techreport{hartholt_interaction_2004,
title = {Interaction on Emotions},
author = {Arno Hartholt and T. J. Muller},
url = {http://ict.usc.edu/pubs/Interaction%20on%20emotions.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 02.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the addition of an emotion dialogue to the Mission Rehearsal Exercise (MRE) system. The goal of the MRE system is to provide an immersive learning environment for army officer recruits. The user can engage in conversation with several intelligent agents in order to accomplish the goals within a certain scenario. Although these agents did already posses emotions, they were unable to express them verbally. A question - answer dialogue has been implemented to this purpose. The implementation makes use of proposition states for modelling knowledge, keyword scanning for natural language understanding and templates for natural language generation. The system is implemented using Soar and TCL. An agent can understand emotion related questions in four different domains, type, intensity, state, and the combination of responsible-agent and blameworthiness. Some limitations arise due to the techniques used and to the relative short time frame in which the assignment was to be executed. Main issues are that the existing natural language understanding and generation modules could not be fully used, that very little context about the conversation is available and that the emotion states simplify the emotional state of an agent. These limitations and other thoughts give rise to the following recommendations for further work: * Make full use of references. * Use coping strategies for generating agent's utterances. * Use focus mechanisms for generating agent's utterances. * Extend known utterances. * Use NLU and NLG module. * Use emotion dialogue and states to influence emotions. * Fix known bugs.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gordon, Andrew S.
The Representation of Planning Strategies Journal Article
In: Artificial Intelligence, vol. 153, pp. 287–305, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_representation_2004,
title = {The Representation of Planning Strategies},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Representation%20of%20Planning%20Strategies.PDF},
year = {2004},
date = {2004-01-01},
journal = {Artificial Intelligence},
volume = {153},
pages = {287–305},
abstract = {An analysis of strategies, recognizable abstract patterns of planned behavior, highlights the difference between the assumptions that people make about their own planning processes and the representational commitments made in current automated planning systems. This article describes a project to collect and represent strategies on a large scale to identify the representational components of our commonsense understanding of intentional action. Three hundred and seventy-two strategies were collected from ten different planning domains. Each was represented in a pre-formal manner designed to reveal the assumptions that these strategies make concerning the human planning process. The contents of these representations, consisting of nearly one thousand unique concepts, were then collected and organized into forty-eight groups that outline the representational requirements of strategic planning systems.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Image-Based Techniques for Digitizing Environments and Artifacts Proceedings Article
In: 4th International Conference on 3-D Digital Imaging and Modeling (3DIM), 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_image-based_2003,
title = {Image-Based Techniques for Digitizing Environments and Artifacts},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Techniques%20for%20Digitizing%20Environments%20and%20Artifacts.pdf},
year = {2003},
date = {2003-10-01},
booktitle = {4th International Conference on 3-D Digital Imaging and Modeling (3DIM)},
abstract = {This paper presents an overview of techniques for generating photoreal computer graphics models of real-world places and objects. Our group's early efforts in modeling scenes involved the development of Facade, an interactive photogrammetric modeling system that uses geometric primitives to model the scene, and projective texture mapping to produce the scene appearance properties. Subsequent work has produced techniques to model the incident illumination within scenes, which we have shown to be useful for realistically adding computer-generated objects to image-based models. More recently, our work has focussed on recovering lighting-independent models of scenes and objects, capturing how each point on an object reflects light. Our latest work combines three-dimensional range scans, digital photographs, and incident illumination measurements to produce lighting-independent models of complex objects and environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Itti, Laurent; Dhavale, Nitin; Pighin, Frédéric
Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention Proceedings Article
In: Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology, San Diego, CA, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{itti_realistic_2003,
title = {Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention},
author = {Laurent Itti and Nitin Dhavale and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Realistic%20Avatar%20Eye%20and%20Head%20Animation%20Using%20a%20Neurobiological%20Model%20of%20Visual%20Attention.pdf},
doi = {10.1117/12.512618},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology},
address = {San Diego, CA},
abstract = {We describe a neurobiological model of visual attention and eye/head movements in primates, and its application to the automatic animation of a realistic virtual human head watching an unconstrained variety of visual inputs. The bottom-up (image-based) attention model is based on the known neurophysiology of visual processing along the occipito-parietal pathway of the primate brain, while the eye/head movement model is derived from recordings in freely behaving Rhesus monkeys. The system is successful at autonomously saccading towards and tracking salient targets in a variety of video clips, including synthetic stimuli, real outdoors scenes and gaming console outputs. The resulting virtual human eye/head animation yields realistic rendering of the simulation results, both suggesting applicability of this approach to avatar animation and reinforcing the plausibility of the neural model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Douglas, Jay; Gordon, Andrew S.; Pighin, Frédéric; Velson, Martin
Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters Proceedings Article
In: Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference, Acapulco, Mexico, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_guided_2003,
title = {Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters},
author = {Randall W. Hill and Jay Douglas and Andrew S. Gordon and Frédéric Pighin and Martin Velson},
url = {http://ict.usc.edu/pubs/Guided%20Conversations%20about%20Leadership-%20Mentoring%20with%20Movies%20and%20Interactive%20Characters.pdf},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference},
address = {Acapulco, Mexico},
abstract = {Think Like a Commander - Excellence in Leadership (TLAC-XL) is an application designed for learning leadership skills both from the experiences of others and through a structured dialogue about issues raised in a vignette. The participant watches a movie, interacts with a synthetic mentor and interviews characters in the story. The goal is to enable leaders to learn the human dimensions of leadership, addressing a gap in the training tools currently available to the U.S. Army. The TLAC-XL application employs a number of Artificial Intelligence technologies, including the use of a coordination architecture, a machine learning approach to natural language processing, and an algorithm for the automated animation of rendered human faces.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Kazemzadeh, Abe; Nair, Anish; Petrova, Milena
Recognizing Expressions of Commonsense Psychology in English Text Proceedings Article
In: Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL), Sapporo, Japan, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_recognizing_2003,
title = {Recognizing Expressions of Commonsense Psychology in English Text},
author = {Andrew S. Gordon and Abe Kazemzadeh and Anish Nair and Milena Petrova},
url = {http://ict.usc.edu/pubs/Recognizing%20Expressions%20of%20Commonsense%20Psychology%20in%20English%20Text.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)},
address = {Sapporo, Japan},
abstract = {Many applications of natural language processing technologies involve analyzing texts that concern the psychological states and processes of people, including their beliefs, goals, predictions, explanations, and plans. In this paper, we describe our efforts to create a robust, large-scale lexical-semantic resource for the recognition and classification of expressions of commonsense psychology in English Text. We achieve high levels of precision and recall by hand-authoring sets of local grammars for commonsense psychology concepts, and show that this approach can achieve classification performance greater than that obtained by using machine learning techniques. We demonstrate the utility of this resource for large-scale corpus analysis by identifying references to adversarial and competitive goal in political speeches throughout U.S. history.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Nair, Anish
Literary Evidence for the Cultural Development of a Theory of Mind Proceedings Article
In: Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci), Boston, MA, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_literary_2003,
title = {Literary Evidence for the Cultural Development of a Theory of Mind},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Literary%20Evidence%20for%20the%20Cultural%20Development%20of%20a%20Theory%20of%20Mind.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci)},
address = {Boston, MA},
abstract = {The term Theory of Mind is used within the cognitive sciences to refer to the abilities that people have to reason about their own mental states and the mental states of others. An important question is whether these abilities are culturally acquired or innate to our species. This paper outlines the argument that the mental models that serve as the basis for Theory of Mind abilities are the product of cultural development. To support this thesis, we present evidence gathered from the large-scale automated analysis of text corpora. We show that the Freudian conception of a subconscious desire is a relatively modern addition to our culturally shared Theory of Mind, as evidenced by a shift in the way these ideas appeared in 19th and 20th century English language novels.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Hill, Randall W.; McAlinden, Ryan; Brobst, Paul
2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards Technical Report
no. AFRL-HE-WP-TP-2007-0008, 2003.
Abstract | Links | BibTeX | Tags:
@techreport{van_lent_2002_2003,
title = {2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards},
author = {Michael Lent and Randall W. Hill and Ryan McAlinden and Paul Brobst},
url = {http://ict.usc.edu/pubs/2002%20Defense%20Modeling%20and%20Simulation%20Office%20(DMSO)%20Laboratory%20for%20Human%20Behavior%20Model%20Interchange%20Standards.pdf},
year = {2003},
date = {2003-07-01},
number = {AFRL-HE-WP-TP-2007-0008},
abstract = {This report describes the effort to address the following research objective: "To begin to define, prototype, and demonstrate an interchange standard among Human Behavior Modeling (HEM) -related models in the Department of Defense (DoD), Industry, Academia, and other Government simulations by establishing a Laboratory for the Study of Human Behavior Representation Interchange Standard." With experience, expertise, and technologies of the commercial computer game industry, the academic research community, and DoD simulation developers, the Institute for Creative Technologies discusses their design and implementation for a prototype HBM interface standard and also describes their demonstration of that standard in a game-based simulation environment that combines HBM models from the entertainment industry and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Marsella, Stacy C.
Fight the Way You Train:The Role and Limits of Emotions in Training for Combat Journal Article
In: Brown Journal of World Affairs, vol. X, pp. 63–76, 2003.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_fight_2003,
title = {Fight the Way You Train:The Role and Limits of Emotions in Training for Combat},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Fight%20the%20Way%20You%20Train-The%20Role%20and%20Limits%20of%20Emotions%20in%20Training%20for%20Combat.pdf},
year = {2003},
date = {2003-06-01},
journal = {Brown Journal of World Affairs},
volume = {X},
pages = {63–76},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Iuppa, Nicholas
Experience Management Using Storyline Adaptation Strategies Proceedings Article
In: Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment, Darmstadt, Germany, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_experience_2003,
title = {Experience Management Using Storyline Adaptation Strategies},
author = {Andrew S. Gordon and Nicholas Iuppa},
url = {http://ict.usc.edu/pubs/Experience%20Management%20Using%20Storyline%20Adaptation%20Strategies.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment},
address = {Darmstadt, Germany},
abstract = {The central problem of creating interactive drama is structuring a media experience for participants such that a good story is presented while enabling a high degree of meaningful interactivity. This paper presents a new approach to interactive drama, where pre-authored storylines are made interactive by adapting them at run-time by applying strategies that react to unexpected user behavior. The approach, called Experience Management, relies heavily on the explication of a broad range of adaptation strategies and a means of selecting which strategy is most appropriate given a particular story context. We describe a formal approach to storyline representation to enable the selection of applicable strategies, and a strategy formalization that allows for storyline modification. Finally, we discuss the application of this approach in the context of a story-based training system for military leadership skills, and the direction for continuing research.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Coverage and Competency in Formal Theories: A Commonsense Theory of Memory Proceedings Article
In: Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning, Stanford University, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_coverage_2003,
title = {Coverage and Competency in Formal Theories: A Commonsense Theory of Memory},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Coverage%20and%20Competency%20in%20Formal%20Theories-%20A%20Commonsense%20Theory%20of%20Memory.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Stanford University},
abstract = {The utility of formal theories of commonsense reasoning will depend both on their competency in solving problems and on their concemptual coverage. We argue that the problems of coverage and competency can be decoupled and solved with different methods for a given commonsense domain. We describe a methodology for identifying the coverage requirements of theories through the large-sclae analysis of planning strategies, with further refinements made by collecting and categorizing instances of natural language expressions pertaining to the domain. We demonstrate the effectiveness of this methodology in identifying the representational coverage requirements of theories of the commonsense psychology of human memory. We then apply traditional methods of formalization to produce a formal first-order theory of commonsense memory with a high degree of competency and coverage.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Uhrmacher, Adelinde; Swartout, William
Agent-Oriented Simulation Journal Article
In: Applied System Simulation, pp. 215–239, 2003.
Abstract | Links | BibTeX | Tags:
@article{uhrmacher_agent-oriented_2003,
title = {Agent-Oriented Simulation},
author = {Adelinde Uhrmacher and William Swartout},
url = {http://link.springer.com/chapter/10.1007/978-1-4419-9218-5_10},
year = {2003},
date = {2003-01-01},
journal = {Applied System Simulation},
pages = {215–239},
abstract = {Metaphors play a key role in computer science and engineering. Agents bring the notion of locality of information (as in object-oriented programming) together with locality of intent or purpose. The relation between multi-agent and simulation systems is multi-facetted. Simulation systems are used to evaluate software agents in virtual dynamic environments. Agents become part of the model design, if autonomous entities in general, and human or social actors in particular shall be modeled. A couple of research projects shall illuminate some of these facets.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Joshi, Pushkar; Tien, Wen C.; Desbrun, Mathieu; Pighin, Frédéric
Learning Controls for Blend Shape Based Realistic Facial Animation Proceedings Article
In: Breen, D.; Lin, M. (Ed.): Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{joshi_learning_2003,
title = {Learning Controls for Blend Shape Based Realistic Facial Animation},
author = {Pushkar Joshi and Wen C. Tien and Mathieu Desbrun and Frédéric Pighin},
editor = {D. Breen and M. Lin},
url = {http://ict.usc.edu/pubs/Learning%20Controls%20for%20Blend%20Shape%20Based%20Realistic%20Facial%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Blend shape animation is the method of choice for keyframe facial animation: a set of blend shapes (key facial expressions) are used to deï¬ne a linear space of facial expressions. However, in order to capture a signiï¬cant range of complexity of human expressions, blend shapes need to be segmented into smaller regions where key idiosyncracies of the face being animated are present. Performing this segmentation by hand requires skill and a lot of time. In this paper, we propose an automatic, physically-motivated segmentation that learns the controls and parameters directly from the set of blend shapes. We show the usefulness and efï¬ciency of this technique for both,motion-capture animation and keyframing. We also provide a rendering algorithm to enhance the visual realism of a blend shape model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Neumann, Ulrich; Piepol, Diane; Swartout, William
FlatWorld: Combining Hollywood Set-Design Techniques with VR Journal Article
In: IEEE Computer Graphics and Applications, no. January/February, 2003.
@article{pair_flatworld_2003,
title = {FlatWorld: Combining Hollywood Set-Design Techniques with VR},
author = {Jarrell Pair and Ulrich Neumann and Diane Piepol and William Swartout},
editor = {Lawrence Rosenblum and Macedonia},
url = {http://ict.usc.edu/pubs/FlatWorld-%20Combining%20Hollywood%20Set-Design%20Techniques%20with%20VR.pdf},
year = {2003},
date = {2003-01-01},
journal = {IEEE Computer Graphics and Applications},
number = {January/February},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies Kloster Irsee, Germany, no. ICT TR 02 2003, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003-1,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
number = {ICT TR 02 2003},
address = {Kloster Irsee, Germany},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
Links | BibTeX | Tags: Virtual Humans
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Cao, Yong; Faloutsos, Petros; Pighin, Frédéric
Unsupervised Learning for Speech Motion Editing Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_unsupervised_2003,
title = {Unsupervised Learning for Speech Motion Editing},
author = {Yong Cao and Petros Faloutsos and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Unsupervised%20Learning%20for%20Speech%20Motion%20Editing.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {We present a new method for editing speech related facial motions. Our method uses an unsupervised learning technique, Independent Component Analysis (ICA), to extract a set of meaningful parameters without any annotation of the data. With ICA, we are able to solve a blind source separation problem and describe the original data as a linear combination of two sources. One source captures content (speech) and the other captures style (emotion). By manipulating the independent components we can edit the motions in intuitive ways.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Pighin, Frédéric
Hybrid Control For Interactive Character Animation Proceedings Article
In: Proceedings of the 11th Pacific Conference on Computer Graphics and Applications, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{shapiro_hybrid_2003,
title = {Hybrid Control For Interactive Character Animation},
author = {Ari Shapiro and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Hybrid%20Control%20For%20Interactive%20Character%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications},
abstract = {We implement a framework for animating interactive characters by combining kinematic animation with physical simulation. The combination of animation techniques allows the characters to exploit the advantages of each technique. For example, characters can perform natural-looking kinematic gaits and react dynamically to unexpected situations.Kinematic techniques such as those based on motion capture data can create very natural-looking animation. However, motion capture based techniques are not suitable for modeling the complex interactions between dynamically interacting characters. Physical simulation, on the other hand, is well suited for such tasks. Our work develops kinematic and dynamic controllers and transition methods between the two control methods for interactive character animation. In addition, we utilize the motion graph technique to develop complex kinematic animation from shorter motion clips as a method of kinematic control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh
The Gestalt of Virtual Environments Proceedings Article
In: International Workshop on Presence, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_gestalt_2003,
title = {The Gestalt of Virtual Environments},
author = {Jacquelyn Morie and Josh Williams},
url = {http://ict.usc.edu/pubs/The%20Gestalt%20of%20Virtual%20Environments.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Presence},
abstract = {The majority of research in the field of virtual reality to date has focused on increasing the fidelity of the environments created and trying to determine the quality of the participant experience. Efforts have been made to quantify such aspects, especially in regards to visuals and sound, and to a lesser extent to the user experience. Recent thinking has tended towards the assumption that ever-greater fidelity would ensure a better user experience. However, such emphasis on photo-realism and audio-realism does not take into account the collective results of our multimodal sensory inputs with their intertwined effects. Our design philosophy for the creation of virtual environments attempts to replicate the human experience, and asks the question: Is there an underlying fidelity of feels-real through which the quality of the participant experience could be improved?},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Moore, Benjamin
QuBit Documentation Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2003, 2003.
@techreport{moore_qubit_2003,
title = {QuBit Documentation},
author = {Benjamin Moore},
url = {http://ict.usc.edu/pubs/QuBit%20Documentation.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 01.2003},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2002
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of Spatial Averaging Equalization Methods: A Statistical Approach Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002-1,
title = {Robustness of Spatial Averaging Equalization Methods: A Statistical Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Spatial%20Averaging%20Equalization%20Methods-%20A%20Statistical%20Approach.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of room responses. However, the performance of averaging based equalization, at the listeners, may be affected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to performance of spatial averaging based equalization. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Perceptual Multiple Location Equalization with Clustering Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_perceptual_2002,
title = {Perceptual Multiple Location Equalization with Clustering},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Perceptual%20Multiple%20Location%20Equalization%20with%20Clustering.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Typically, room equalization techniques do not focus on designing ï¬lters that equalize the room transfer functions on perceptually relevant spectral features. In this paper we address the problem of room equalization for multiple listeners, simultaneously, using a perceptually designed equalization ï¬lter based on pattern recognition techniques. Some features of the proposed ï¬lter are, its ability to perform simultaneous equalization at multiple locations, a reduced order, and a psychoacoustically motivated design. In summary, the simultaneous multiple location equalization, using a pattern recognition method, is performed over perceptually relevant spectral components derived from the auditory ï¬ltering mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Hilmes, Philip; Kyriakakis, Chris
Robustness of Multiple Listener Equalization With Magnitude Response Averaging Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002,
title = {Robustness of Multiple Listener Equalization With Magnitude Response Averaging},
author = {Sunil Bharitkar and Philip Hilmes and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Multiple%20Listener%20Equalization%20With%20Magnitude%20Response%20Averaging.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of magnitude responses at locations of interest. However, the performance of averaging based equalization, at the listeners, may be a!ected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to a performance metric of equalization for magnitude response averaging. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alternative_2002,
title = {An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/An%20Alternative%20Model%20for%20Sound%20Signals%20Encountered%20in%20Reverberant%20Environments%3b%20Robust%20Maximum%20Likelihood%20Localization%20and%20Parameter%20Estimation%20Based%20on%20a%20Sub-Gaussian%20Model.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {In this paper we investigate an alternative to the Gaussian density for modeling signals encountered in audio environments. The observation that sound signals are impulsive in nature, combined with the reverberation e!ects commonly encountered in audio, motivates the use of the Sub-Gaussian density. The new Sub-Gaussian statistical model and the separable solution of its Maximum Likelihood estimator are derived. These are used in an array scenario to demonstrate with both simulations and two different microphone arrays the achievable performance gains. The simulations exhibit the robustness of the sub-Gaussian based method while the real world experiments reveal a signiï¬cant performance gain, supporting the claim that the sub-Gaussian model is better suited for sound signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, W. Lewis; Narayanan, Shrikanth; Whitney, Richard; Das, Rajat; Labore, Catherine
Limited Domain Synthesis of Expressive Military Speech for Animated Characters Proceedings Article
In: IEEE 2002 Workshop on Speech Synthesis, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{johnson_limited_2002,
title = {Limited Domain Synthesis of Expressive Military Speech for Animated Characters},
author = {W. Lewis Johnson and Shrikanth Narayanan and Richard Whitney and Rajat Das and Catherine Labore},
url = {http://ict.usc.edu/pubs/Limited%20Domain%20Synthesis%20of%20Expressive%20Military%20Speech%20for%20Animated%20Characters.pdf},
year = {2002},
date = {2002-09-01},
booktitle = {IEEE 2002 Workshop on Speech Synthesis},
abstract = {Text-to-speech synthesis can play an important role in interactive education and training applications, as voices for animated agents. Such agents need high-quality voices capable of expressing intent and emotion. This paper presents preliminary results in an effort aimed at synthesizing expressive military speech for training applications. Such speech has acoustic and prosodic characteristics that can differ markedly from ordinary conversational speech. A limited domain synthesis approach is used employing samples of expressive speech, classified according to speaking style. The resulting synthesizer was tested both in isolation and in the context of a virtual reality training scenario with animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Theory of Mind in Strategy Representations Proceedings Article
In: Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, George Mason University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_theory_2002,
title = {The Theory of Mind in Strategy Representations},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Theory%20of%20Mind%20in%20Strategy%20Representations.PDF},
year = {2002},
date = {2002-08-01},
booktitle = {Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {George Mason University},
abstract = {Many scientific fields continue to explore cognition related to Theory of Mind abilities, where people reason about the mental states of themselves and others. Experimental and theoretical approaches to this problem have largely avoided issues concerning the contents of representations employed in this class of reasoning. In this paper, we describe a new approach to the investigation of representations related to Theory of Mind abilities that is based on the analysis of commonsense strategies. We argue that because the mental representations of strategies must include concepts of mental states and processes, the large-scale analysis of strategies can be informative of the representational scope of Theory of Mind abilities. The results of an analysis of this sort are presented as a description of thirty representational areas that organize the breadth of Theory of Mind concepts. Implications for Theory Theories and Simulation Theories of Theory of Mind reasoning are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Angros, Richard Jr.; Johnson, W. Lewis; Rickel, Jeff; Scholer, Andrew
Learning Domain Knowledge for Teaching Procedural Skills Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{angros_learning_2002,
title = {Learning Domain Knowledge for Teaching Procedural Skills},
author = {Richard Jr. Angros and W. Lewis Johnson and Jeff Rickel and Andrew Scholer},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Skills.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
abstract = {This paper describes a method for acquiring procedural knowledge for use by pedagogical agents in interactive simulation-based learning environments. Such agents need to be able to adapt their behavior to the changing conditions of the simulated world, and respond appropriately in mixed-initiative interactions with learners. This requires a good understanding of the goals and causal dependencies in the procedures being taught. Our method, inspired by human tutorial dialog, combines direct speciï¬cation, demonstration, and experimentation. The human instructor demonstrates the skill being taught, while the agent observes the effects of the procedure on the simulated world. The agent then autonomously experiments with the procedure, making modiï¬cations to it, in order to understand the role of each step in the procedure. At various points the instructor can provide clariï¬cations, and modify the developing procedural description as needed. This method is realized in a system called Diligent, which acquires procedural knowledge for the STEVE animated pedagogical agent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}