Publications
Search
Lubetich, Shannon; Sagae, Kenji
Data-driven Measurement of Child Language Development with Simple Syntactic Templates Proceedings Article
In: Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pp. 2151 – 2160, Dublin, Ireland, 2014.
@inproceedings{lubetich_data-driven_2014,
title = {Data-driven Measurement of Child Language Development with Simple Syntactic Templates},
author = {Shannon Lubetich and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Data-driven%20Measurement%20of%20Child%20Language%20Development%20with%20Simple%20Syntactic%20Templates.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers},
pages = {2151 – 2160},
address = {Dublin, Ireland},
abstract = {When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.; Sagae, Kenji; Demski, Abram
Distributed Vector Representations of Words in the Sigma Cognitive Architecture Proceedings Article
In: Proceedings of the 7th Conference on Artificial General Intelligence 2014, Québec City, Canada, 2014.
@inproceedings{ustun_distributed_2014,
title = {Distributed Vector Representations of Words in the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul S. Rosenbloom and Kenji Sagae and Abram Demski},
url = {http://ict.usc.edu/pubs/Distributed%20Vector%20Representations%20of%20Words%20in%20the%20Sigma%20Cognitive%20Architecture.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Conference on Artificial General Intelligence 2014},
address = {Québec City, Canada},
abstract = {Recently reported results with distributed-vector word representations in natural language processing make them appealing for incorporation into a general cognitive architecture like Sigma. This paper describes a new algorithm for learning such word representations from large, shallow information resources, and how this algorithm can be implemented via small modifications to Sigma. The effectiveness and speed of the algorithm are evaluated via a comparison of an external simulation of it with state-of-the-art algorithms. The results from more limited experiments with Sigma are also promising, but more work is required for it to reach the effectiveness and speed of the simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan; Carnevale, Peter J.
The Importance of Cognition and Affect for Artificially Intelligent Decision Makers Proceedings Article
In: 28th AAAI Conference on Artificial Intelligence, 2014.
@inproceedings{de_melo_importance_2014,
title = {The Importance of Cognition and Affect for Artificially Intelligent Decision Makers},
author = {Celso M. Melo and Jonathan Gratch and Peter J. Carnevale},
url = {http://ict.usc.edu/pubs/The%20Importance%20of%20Cognition%20and%20Affect%20for%20Artificially%20Intelligent%20Decision%20Makers.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {28th AAAI Conference on Artificial Intelligence},
abstract = {Agency – the capacity to plan and act – and experience – the capacity to sense and feel – are two critical aspects that determine whether people will perceive non-human ntities, such as autonomous agents, to have a mind. There is evidence that the absence of either can reduce cooperation. We present an experiment that tests the necessity of both for cooperation with agents. In this experiment we manipulated people’s perceptions about the cognitive and affective abilities of agents, when engaging in the ultimatum game. The results indicated that people offered more money to agents that were perceived to make decisions according to their intentions (high agency), rather than randomly (low agency). Additionally, the results showed that people offered more money to agents that expressed emotion (high experience), when compared to agents that did not (low experience). We discuss the implications of this agencyexperience theoretical framework for the design of artificially intelligent decision makers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Eunkyung; Dehghani, Morteza; Kim, Yoo Kyoung; Carnevale, Peter J.; Gratch, Jonathan
Effects of Moral Concerns on Negotiations Proceedings Article
In: Proceedings of 36th Annual Meeting of the Cognitive Science Society, pp. 2495–2500, Quebec City, Canada, 2014.
@inproceedings{kim_effects_2014,
title = {Effects of Moral Concerns on Negotiations},
author = {Eunkyung Kim and Morteza Dehghani and Yoo Kyoung Kim and Peter J. Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Effects%20of%20Moral%20Concerns%20on%20Negotiations.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Proceedings of 36th Annual Meeting of the Cognitive Science Society},
pages = {2495–2500},
address = {Quebec City, Canada},
abstract = {There is now considerable evidence that emotion plays an important role in negotiation. Emotions, such as anger and happiness, affect concession-making, not only in human vs. human negotiations but also in human vs. agent negotiations. Recent research has demonstrated the impact of emotional expressions in morally-charged negotiations. Thus, taking people’s moral concerns into account is crucial for building agents that operate in morally sensitive domains. This paper explores the interplay between people’s moral concerns, emotional expressions and concession-making during a morally charged negotiation. Our results demonstrate that participants who had stronger concerns for the Individualizing foundations (Harm and Fairness) make greater concessions for sacred negotiation items when faced with a sad opponent than an angry opponent. Also, we find that participants who had high Binding foundations (In-group, Authority and Purity) are more sensitive to social status, and make greater concessions in scenarios that involve agents in a higher social status.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.
Deconstructing Episodic Memory and Learning in Sigma Proceedings Article
In: Proceedings of the 36th Annual Conference of the Cognitive Science Society, Cognitive Science Society, Quebec City, Canada, 2014.
@inproceedings{rosenbloom_deconstructing_2014,
title = {Deconstructing Episodic Memory and Learning in Sigma},
author = {Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Deconstructing%20Reinforcement%20Learning%20in%20Sigma.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Proceedings of the 36th Annual Conference of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Quebec City, Canada},
abstract = {In an experiment in functional elegance, episodic memory and learning have been deconstructed in the Sigma cognitive architecture in terms of pre-existing memory and learning mechanisms plus a template-based structure generator. As a side effect, base-level activation also becomes deconstructed in terms of a learned temporal prior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Carnevale, Peter; Gratch, Jonathan
Social Categorization and Cooperation between humans and computers Proceedings Article
In: Proceedings of the 36th annual meeting of the Cognitive Science Society, pp. 2109–2114, Québec City, Canada, 2014.
@inproceedings{de_melo_social_2014,
title = {Social Categorization and Cooperation between humans and computers},
author = {Celso M. Melo and Peter Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Categorization%20and%20Cooperation.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Proceedings of the 36th annual meeting of the Cognitive Science Society},
pages = {2109–2114},
address = {Québec City, Canada},
abstract = {Computers increasingly perform a variety of important tasks and services that influence individuals and organizations, yet few studies tell us about how humans interact with computers and other non-human decision-makers. In four experiments, we asked people to engage in cooperation tasks with computers and with humans. Experiment 1 found that people gave more money to a human than a computer. We argue this effect reflects a basic bias in favor of humans, which are perceived to be the in-group, when compared to computers, which are perceived to be the out-group. In Experiment 2, we varied computer and human ethnicity to be the same or different as the participant; results indicated that ethnicity had a parallel but additive effect that was independent to the effect of the human social category. The data of Experiment 3 indicate that it is also possible to promote group membership with computers by creating structural interdependence based on shared incentives. Finally, we demonstrate in Experiment 4 that our framework based on social categorization theory can predict situations where people will cooperate more with computers than with humans. We discuss implications for understanding people’s decision making with human and non-human others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Woo, Simon S.; Mirkovic, Jelena; Artstein, Ron; Kaiser, Elsi
Life-Experience Passwords (LEPs) Proceedings Article
In: Proceedings of the Symposium on Usable Privacy and Security (SOUPS), Menlo Park, CA, 2014.
@inproceedings{woo_life-experience_2014,
title = {Life-Experience Passwords (LEPs)},
author = {Simon S. Woo and Jelena Mirkovic and Ron Artstein and Elsi Kaiser},
url = {http://ict.usc.edu/pubs/Life-Experience%20Passwords%20(LEPs).pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Proceedings of the Symposium on Usable Privacy and Security (SOUPS)},
address = {Menlo Park, CA},
abstract = {User-supplied textual passwords are extensively used today for user authentication. However, these passwords have serious de⬚ciencies in the way they interact with humans' natural ability to form memories. Strong passwords that are hard to crack are also often hard for humans to remember, while memorable passwords are easily brute-forced or guessed. We propose a novel password design textbackslashtextbackslashvphantom life-experience passwords (LEPs). We explain how to use users' existing episodic memories about de⬚ning life events to create memorable and hard-to-guess passwords and discuss challenges involved in design and use of LEPs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morbini, Fabrizio; Forbell, Eric; Sagae, Kenji
Improving Classification-Based Natural Language Understanding with Non-Expert Annotation Proceedings Article
In: Proceedings of SIGDIAL 2014, pp. 69–73, Philadelphia, PA, 2014.
@inproceedings{morbini_improving_2014,
title = {Improving Classification-Based Natural Language Understanding with Non-Expert Annotation},
author = {Fabrizio Morbini and Eric Forbell and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Improved%20Classification-based%20Natural%20Language%20Understanding%20with%20Non-Expert%20Annotation.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of SIGDIAL 2014},
pages = {69–73},
address = {Philadelphia, PA},
abstract = {Although data-driven techniques are commonly used for Natural Language Understanding in dialogue systems, their efficacy is often hampered by the lack of appropriate annotated training data in sufficient amounts. We present an approach for rapid and cost-effective annotation of training data for classification-based language understanding in conversational dialogue systems. Experiments using a webaccessible conversational character that interacts with a varied user population show that a dramatic improvement in natural language understanding and a substantial reduction in expert annotation effort can be achieved by leveraging non-expert annotation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wienberg, Christopher; Gordon, Andrew S.
Privacy Considerations for Public Storytelling Proceedings Article
In: Proceedings of the Eighth International AAAI Conference on Weblogs and Social Media, pp. 627–630, Ann Harbor, MI, 2014.
@inproceedings{wienberg_privacy_2014,
title = {Privacy Considerations for Public Storytelling},
author = {Christopher Wienberg and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Privacy%20Considerations%20for%20Public%20Storytelling.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of the Eighth International AAAI Conference on Weblogs and Social Media},
pages = {627–630},
address = {Ann Harbor, MI},
abstract = {The popularity of the web and social media have afforded researchers unparalleled access to content about the daily lives of people. Human research ethics guidelines, while actively expanding to meet the new challenges posed by web research, still rely on offline principles of interaction that are a poor fit to modern technology. In this context, we present a study of the identifiability of authors of socially sensitive content. With the goal of identity obfuscation, we compare this to the identifiability of the same content translated to and then back from a foreign language, focusing on how easily a person could locate the original source of the content. We discuss the risk to these authors presented by dissemination of their content, and consider the implications for research ethics guidelines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vigil, Jesse; Tait, Asa Shumskas; Wienberg, Christopher; Gordon, Andrew S.
Friends You Haven’t Met Yet: A Documentary Short Film Proceedings Article
In: Proceedings of the 2014 ACM conference on Web science, pp. 176–176, ACM Press, Bloomington, IN, 2014, ISBN: 978-1-4503-2622-3.
@inproceedings{vigil_friends_2014,
title = {Friends You Haven’t Met Yet: A Documentary Short Film},
author = {Jesse Vigil and Asa Shumskas Tait and Christopher Wienberg and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=2615569.2617797},
doi = {10.1145/2615569.2617797},
isbn = {978-1-4503-2622-3},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of the 2014 ACM conference on Web science},
pages = {176–176},
publisher = {ACM Press},
address = {Bloomington, IN},
abstract = {"Friends You Haven't Met Yet" is a documentary short film that chronicles encounters between extremely prolific bloggers and a computer scientist who uses their personal narratives for research. It explores issues related to public sharing of personal stories, the ethical obligations of researchers who use web data, and the changing nature of online privacy. The film was conceived by Andrew Gordon and Christopher Wienberg at the University of Southern California, whose research involves the collection of millions of personal stories posted to internet weblogs. In analyzing their data, these researchers discovered an unusual population of extremely prolific bloggers, people who post personal stories about their daily lives everyday over the course of many years. They posed three questions about this population: 1. What motivates these people to post so frequently and publicly about their personal life? 2. To what degree do these people embellish their stories to make them more interesting than reality? 3. What expectations do these authors have about their readers, and what are the ethical implications for researchers like us who analyze their posts? To answer these questions, PhD Student Christopher Wienberg contacted many of these bloggers directly and set up face-to-face interviews at their homes. Accompanied by a documentary film crew, Christopher traveled to locations around California, in both urban and rural settings, to better understand the people whose contributions on the web serve as data in social media research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Nelson, Claire; Traum, David
Single-agent vs. multi-agent techniques for concurrent reinforcement learning of negotiation dialogue policies Proceedings Article
In: Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 500–510, Association for Computational Linguistics, Baltimore, MD, 2014.
@inproceedings{georgila_single-agent_2014,
title = {Single-agent vs. multi-agent techniques for concurrent reinforcement learning of negotiation dialogue policies},
author = {Kallirroi Georgila and Claire Nelson and David Traum},
url = {https://www.aclweb.org/anthology/P/P14/P14-1047.xhtml},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
volume = {1},
pages = {500–510},
publisher = {Association for Computational Linguistics},
address = {Baltimore, MD},
abstract = {We use single-agent and multi-agent Reinforcement Learning (RL) for learning dialogue policies in a resource allocation negotiation scenario. Two agents learn concurrently by interacting with each other without any need for simulated users (SUs) to train against or corpora to learn from. In particular, we compare the Qlearning, Policy Hill-Climbing (PHC) and Win or Learn Fast Policy Hill-Climbing (PHC-WoLF) algorithms, varying the scenario complexity (state space size), the number of training episodes, the learning rate, and the exploration rate. Our results show that generally Q-learning fails to converge whereas PHC and PHC-WoLF always converge and perform similarly. We also show that very high gradually decreasing exploration rates are required for convergence. We conclude that multiagent RL of dialogue policies is a promising alternative to using single-agent RL and SUs or learning directly from corpora.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
SAWDUST: a Semi-Automated Wizard Dialogue Utterance Selection Tool for domain-independent large-domain dialogue Proceedings Article
In: SIGDIAL 2014 Conference, Association for Computational Linguistics, Philadelphia, PA, 2014.
@inproceedings{gandhe_sawdust_2014,
title = {SAWDUST: a Semi-Automated Wizard Dialogue Utterance Selection Tool for domain-independent large-domain dialogue},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/SAWDUST%20-%20a%20Semi-Automated%20Wizard%20Dialogue%20Utterance%20Selection%20Tool%20for%20domain-independent%20large-domain%20dialogue.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {SIGDIAL 2014 Conference},
publisher = {Association for Computational Linguistics},
address = {Philadelphia, PA},
abstract = {We present a tool that allows human wizards to select appropriate response utterances for a given dialogue context from a set of utterances observed in a dialogue corpus. Such a tool can be used in Wizard-of-Oz studies and for collecting data which can be used for training and/or evaluating automatic dialogue models. We also propose to incorporate such automatic dialogue models back into the tool as an aid in selecting utterances from a large dialogue corpus. The tool allows a user to rank candidate utterances for selection according to these automatic models.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Initiative Taking in Negotiation Proceedings Article
In: Proceedings of the 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL), pp. 186–193, 2014.
@inproceedings{nouri_initiative_2014,
title = {Initiative Taking in Negotiation},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Taking%20in%20Negotiation.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of the 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL)},
pages = {186–193},
abstract = {We examine the relationship between initiative behavior in negotiation dialogues and the goals and outcomes of the negotiation. We propose a novel annotation scheme for dialogue initiative, including four labels for initiative and response behavior in a dialogue turn. We annotate an existing human-human negotiation dataset, and use initiative-based features to try to predict both negotiation goal and outcome, comparing our results to prior work using other (non-initiative) features sets. Results show that combining initiative features with other features leads to improvements over either set and a majority class baseline.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morbini, Fabrizio; DeVault, David; Georgila, Kallirroi; Artstein, Ron; Traum, David; Morency, Louis-Philippe
A Demonstration of Dialogue Processing in SimSensei Kiosk Proceedings Article
In: 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 254, 2014.
@inproceedings{morbini_demonstration_2014,
title = {A Demonstration of Dialogue Processing in SimSensei Kiosk},
author = {Fabrizio Morbini and David DeVault and Kallirroi Georgila and Ron Artstein and David Traum and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/A%20Demonstration%20of%20Dialogue%20Processing%20in%20SimSensei%20Kiosk.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {15th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {254},
abstract = {This demonstration highlights the dialogue processing in SimSensei Kiosk, a virtual human dialogue system that con- ducts interviews related to psychological distress conditions such as depression, anxiety, and post-traumatic stress disorder (PTSD). The dialogue processing in SimSensei Kiosk allows the system to con- duct coherent spoken interviews of human users that are 15-25 minutes in length, and in which users feel comfortable talking and openly sharing information. We present the design of the individual dialogue components, and show examples of natural conversation flow between the sys- tem and users, including expressions of empathy, follow-up responses and continuation prompts, and turn-taking.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rahimtoroghi, Elahe; Corcoran, Thomas; Swanson, Reid; Walker, Marilyn A.; Sagae, Kenji; Gordon, Andrew S.
Minimal Narrative Annotation Schemes and Their Applications Proceedings Article
In: Intelligent Narrative Technologies 7: Papers from the 2014 Workshop, Milwaukee, WI, 2014.
@inproceedings{rahimtoroghi_minimal_2014,
title = {Minimal Narrative Annotation Schemes and Their Applications},
author = {Elahe Rahimtoroghi and Thomas Corcoran and Reid Swanson and Marilyn A. Walker and Kenji Sagae and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Minimal%20Narrative%20Annotation%20Schemes%20and%20Their%20Applications.PDF},
year = {2014},
date = {2014-06-01},
booktitle = {Intelligent Narrative Technologies 7: Papers from the 2014 Workshop},
address = {Milwaukee, WI},
abstract = {The increased use of large corpora in narrative research has created new opportunities for empirical research and intelligent narrative technologies. To best exploit the value of these corpora, several research groups are eschewing complex discourse analysis techniques in favor of high-level minimalist narrative annotation schemes that can be quickly applied, achieve high inter-rater agreement, and are amenable to automation using machine-learning techniques. In this paper we compare different annotation schemes that have been employed by two groups of researchers to annotate large corpora of narrative text. Using a dualannotation methodology, we investigate the correlation between narrative clauses distinguished by their structural role (orientation, action, evaluation), their subjectivity, and their narrative level within the discourse. We find that each simple narrative annotation scheme captures a structurally distinct characteristic of real-world narratives, and each combination of labels is evident in a corpus of 19 weblog narratives (951 narrative clauses). We discuss several potential applications of minimalist narrative annotation schemes, noting the combination of label across these two annotation schemes that best support each task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miguel, Eder; Feng, Andrew; Xu, Yuyu; Shapiro, Ari
Towards Cloth-Manipulating Characters Proceedings Article
In: CASA 2014, Houston, Texas, 2014.
@inproceedings{miguel_towards_2014,
title = {Towards Cloth-Manipulating Characters},
author = {Eder Miguel and Andrew Feng and Yuyu Xu and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Towards%20Cloth-Manipulating%20Characters.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {CASA 2014},
address = {Houston, Texas},
abstract = {Cloth manipulation is a common action in humans that current animated virtual characters are not able to perform due to its complexity. In this paper we focus on dressing-up, which is probably the most common action involving cloth. We identify the steps required to perform the task and describe the systems responsible for each of them. Our results show a character that is able to put on a scarf and react to cloth collision and over-stretching events.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Stratou, Giota; Scherer, Stefan; Morency, Louis-Philippe
CONTEXT-BASED SIGNAL DESCRIPTORS OF HEART-RATE VARIABILITY FOR ANXIETY ASSESSMENT Proceedings Article
In: Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on, pp. 3631–3635, IEEE, Florence, Italy, 2014.
@inproceedings{chatterjee_context-based_2014,
title = {CONTEXT-BASED SIGNAL DESCRIPTORS OF HEART-RATE VARIABILITY FOR ANXIETY ASSESSMENT},
author = {Moitreya Chatterjee and Giota Stratou and Stefan Scherer and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Context-based%20signal%20descriptors%20of%20heart-rate%20variability%20for%20anxiety%20assessment.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Acoustics, Speech and Signal Processing (ICASSP), 2014 IEEE International Conference on},
pages = {3631–3635},
publisher = {IEEE},
address = {Florence, Italy},
abstract = {In this paper, we investigate the role of multiple context-based heart-rate variability descriptors for evaluating a person’s psychological health, specifically anxiety disorders. The descriptors are extracted from visually sensed heart-rate signals obtained during the course of a semi-structured interview with a virtual human and can potentially integrate question context as well. The proposed descriptors are motivated by prior related work and are constructed based on histogram-based approaches, time and frequency domain analysis of heart-rate variability. In order to contextualize our descriptors, we use information about the polarity and intimacy levels of the questions asked. Our experiments reveal that the descriptors, both with and without context, perform far better than chance in predicting anxiety. Further on, we perform at-a-par with the state-of-the-art in predicting anxiety and other psychological disorders when we integrate the question context information into the descriptors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chiu, Chung-Cheng; Marsella, Stacy C.
Gesture Generation with Low-Dimensional Embeddings Proceedings Article
In: Proceedings of the 13th International Conference on Autonomous Agents and Multiagent Systems, pp. 781–788, Paris, France, 2014.
@inproceedings{chiu_gesture_2014,
title = {Gesture Generation with Low-Dimensional Embeddings},
author = {Chung-Cheng Chiu and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Gesture%20generation%20with%20low-dimensional%20embeddings.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of the 13th International Conference on Autonomous Agents and Multiagent Systems},
pages = {781–788},
address = {Paris, France},
abstract = {There is a growing demand for embodied agents capable of engaging in face-to-face dialog using the same verbal and nonverbal behavior that people use. The focus of our work is generating coverbal hand gestures for these agents, gestures coupled to the content and timing of speech. A common approach to achieve this is to use motion capture of an actor or hand-crafted animations for each utterance. An alternative machine learning approach that saves development effort is to learn a general gesture controller that can generate behavior for novel utterances. However learning a direct mapping from speech to gesture movement faces the complexity of inferring the relation between the two time series of speech and gesture motion. We present a novel machine learning approach that decomposes the overall learning problem into learning two mappings: from speech to a gestural annotation and from gestural annotation to gesture motion. The combined model learns to synthesize natural gesture animation from speech audio. We assess the quality of generated animations by comparing them with the result generated by a previous approach that learns a direct mapping. Results from a human subject study show that our framework is perceived to be significantly better.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Lucas, Gale M.; King, Aisha Aisha; Morency, Louis-Philippe
It’s Only a Computer: The Impact of Human-agent Interaction in Clinical Interviews Proceedings Article
In: Proceedings of 13th International Conference on Autonomous Agents and Multiagent Systems, pp. 85–92, International Foundation for Autonomous Agents and Multiagent Systems, Paris, France, 2014.
@inproceedings{gratch_its_2014,
title = {It’s Only a Computer: The Impact of Human-agent Interaction in Clinical Interviews},
author = {Jonathan Gratch and Gale M. Lucas and Aisha Aisha King and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/It%E2%80%99s%20only%20a%20computer%20-%20The%20impact%20of%20human-agent%20interaction%20in%20clinical%20interviews.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of 13th International Conference on Autonomous Agents and Multiagent Systems},
pages = {85–92},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Paris, France},
abstract = {Research has begun to explore the use of virtual humans (VHs) in medical interviews [1]. When designed as supportive and “safe” interaction partners, VHs may improve such screenings by encouraging patients to disclose more personal information [2-3]. In medical contexts, patients often feel resistance to selfdisclosure and engage in impression management to be viewed more positively by healthcare providers. This paper provides the first empirical evidence that VHs can reduce such resistance and impression management. In the context of health-screening interviews, we report a study in which participants interacted with a VH that was either teleo-operated by humans (Wizard-of-Oz) or fully-automated (AI). Independently, we manipulated whether participants believed the VH was controlled by humans or automation. As predicted, participants who believed they were interacting with a computer reported lower resistance to selfdisclosure, lower impression management and higher system usability than those who believed they were interacting with a human operator. Whether the virtual human was actually operated by a human or AI only affected ratings of the system’s usability. These results suggest that automated VHs can help overcome a significant barrier to obtaining truthful patient information in medical domains.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Artstein, Ron; Lucas, Gale; Stratou, Giota; Scherer, Stefan; Nazarian, Angela; Wood, Rachel; Boberg, Jill; DeVault, David; Marsella, Stacy; Traum, David; Rizzo, Albert; Morency, Louis-Philippe
The Distress Analysis Interview Corpus of human and computer interviews Proceedings Article
In: Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC 2014), pp. 3123–3128, LREC, Reykjavik, Iceland, 2014.
@inproceedings{gratch_distress_2014,
title = {The Distress Analysis Interview Corpus of human and computer interviews},
author = {Jonathan Gratch and Ron Artstein and Gale Lucas and Giota Stratou and Stefan Scherer and Angela Nazarian and Rachel Wood and Jill Boberg and David DeVault and Stacy Marsella and David Traum and Albert Rizzo and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/The%20Distress%20Analysis%20Interview%20Corpus%20of%20human%20and%20computer%20interviews.pdf},
year = {2014},
date = {2014-05-01},
booktitle = {Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC 2014)},
pages = {3123–3128},
publisher = {LREC},
address = {Reykjavik, Iceland},
abstract = {The Distress Analysis Interview Corpus (DAIC) contains clinical interviews designed to support the diagnosis of psychological distress conditions such as anxiety, depression, and post traumatic stress disorder. The interviews are conducted by humans, human controlled agents and autonomous agents, and the participants include both distressed and non-distressed individuals. Data collected include audio and video recordings and extensive questionnaire responses; parts of the corpus have been transcribed and annotated for a variety of verbal and non-verbal features. The corpus has been used to support the creation of an automated interviewer agent, and for research on the automatic identification of psychological distress.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2004
Lent, Michael; Fisher, William; Mancuso, Michael
An Explainable Artificial Intelligence System for Small-unit Tactical Behavior Proceedings Article
In: National Conference on Artificial Intelligence, pp. 900–907, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_lent_explainable_2004,
title = {An Explainable Artificial Intelligence System for Small-unit Tactical Behavior},
author = {Michael Lent and William Fisher and Michael Mancuso},
url = {http://ict.usc.edu/pubs/An%20Explainable%20Artificial%20Intelligence%20System%20for%20Small-unit%20Tactical%20Behavior.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {National Conference on Artificial Intelligence},
pages = {900–907},
address = {San Jose, CA},
abstract = {As the artificial intelligence (AI) systems in military simulations and computer games become more complex, their actions become increasingly difficult for users to understand. Expert systems for medical diagnosis have addressed this challenge though the addition of explanation generation systems that explain a system's internal processes. This paper describes the AI architecture and associated explanation capability used by Full Spectrum Command, a training system developed for the US Army by commercial game developers and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Authoring Branching Storylines for Training Applications Proceedings Article
In: Proceedings of the Sixth International Conference of the Learning Sciences (ICLS), Santa Monica, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_authoring_2004,
title = {Authoring Branching Storylines for Training Applications},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Authoring%20Branching%20Storylines%20for%20Training%20Applications.PDF},
year = {2004},
date = {2004-06-01},
booktitle = {Proceedings of the Sixth International Conference of the Learning Sciences (ICLS)},
address = {Santa Monica, CA},
abstract = {Progress in the area of interactive training applications has led to the formulation of methodologies that have been successfully transitioned out of research labs and into the practices of commercial developers. This paper reviews the academic origins of a methodology for developing training applications that incorporate branching storylines to engage users in a firstperson learn-by-doing experience, originally referred to as Outcome-Driven Simulations. Innovations and modifications to this methodology from the commercial sector are then reviewed, and the steps in this methodology are described, as implemented in current best practices. Finally, new research efforts based on this methodology are examined, including the introduction of natural language processing technology to enable human-computer conversations and the integration of branching storylines into real-time virtual reality environments. A prototype application to support leadership development within the U.S. Army that includes these advances is described.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a General Model of Emotional Appraisal and Coping Proceedings Article
In: AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations, Palo Alto, CA, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004-1,
title = {Evaluating a General Model of Emotional Appraisal and Coping},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20General%20Model%20of%20Emotional%20Appraisal%20and%20Coping.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations},
address = {Palo Alto, CA},
abstract = {Introduction: In our research, we have developed a general computational model of human emotion. The model attempts to account for both the factors that give rise to emotions as well as the wide-ranging impact emotions have on cognitive and behavioral responses. Emotions influence our beliefs, our decision-making and how we adapt our behavior to the world around us. While most apparent in moments of great stress, emotions sway even the mundane decisions we face in everyday life. Emotions also infuse our social relationships. Our interactions with each other are a source of many emotions and we have developed a range of behaviors that can communicate emotional information as well as an ability to recognize and be influenced by the emotional arousal of others. By virtue of their central role and wide influence, emotion arguably provides the means to coordinate the diverse mental and physical components required to respond to the world in a coherent fashion. (1st Paragraph)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
A Utility-Based Approach to Intention Recognition Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_utility-based_2004,
title = {A Utility-Based Approach to Intention Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Utility-Based%20Approach%20to%20Intention%20Recognition.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Based on the assumption that a rational agent will adopt a plan that maximizes the expected utility, we present a utility-based approach to plan recognition problem in this paper. The approach explicitly takes the observed agent's preferences into consideration, and computes the estimated expected utilities of plans to disambiguate competing hypotheses. Online plan recognition is realized by incrementally using plan knowledge and observations to change state probabilities. We also discuss the work and compare it with other probabilistic models in the paper.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Tough Love Between Artificial Intelligence and Interactive Entertainment Proceedings Article
In: Proceedings of IE2004: Australian Workshop on Interactive Entertainment, Sydney, Australia, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_tough_2004,
title = {Tough Love Between Artificial Intelligence and Interactive Entertainment},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Tough%20Love%20Between%20Artificial%20Intelligence%20and%20Interactive%20Entertainment.PDF},
year = {2004},
date = {2004-02-01},
booktitle = {Proceedings of IE2004: Australian Workshop on Interactive Entertainment},
address = {Sydney, Australia},
abstract = {Burgeoning interest in Interactive Entertainment has led many computer scientists with roots in Artificial Intelligence toward the exploration of ideas in mass-market entertainment applications. Increasing numbers of workshops, journals, and funding programs for Interactive Entertainment indicate that AI researchers in this area have a good sense for following hot new trends, but are they vanguards of a fruitful science or misguided opportunists? In this IE2004 invited talk, I'll explore the relationship between AI research and the Interactive Entertainment field, from its seductive courtship through its rocky marriage, and offer some relationship advice for the future.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Hyeok-Soo; Gratch, Jonathan
A Planner-Independent Collaborative Planning Assistant Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 766–773, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_planner-independent_2004,
title = {A Planner-Independent Collaborative Planning Assistant},
author = {Hyeok-Soo Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Planner-Independent%20Collaborative%20Planning%20Assistant.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {766–773},
address = {New York, NY},
abstract = {This article introduces a novel approach to the problem of collaborative planning. We present a method that takes classical one-shot planning techniques - that take a fixed set of goals, initial state, and a domain theory - and adapts them to support the incremental, hierarchical and exploratory nature of collaborative planning that occurs between human planners, and that multi-agent planning systems attempt to support. This approach is planner-independent - in that it could be applied to any classical planning technique - and recasts the problem of collaborative planning as a search through a space of possible inputs to a classical planning system. This article outlines the technique and describes its application to the Mission Rehearsal Exercise, a multi-agent training system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Judgment in Multiagent Interactions Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 210–217, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2004,
title = {Social Judgment in Multiagent Interactions},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Judgment%20in%20Multiagent%20Interactions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {210–217},
address = {New York, NY},
abstract = {Social judgment is a process of social explanation whereby one evaluates which entities deserve credit or blame for multi-agent activities. Such explanations are a key aspect of inference in a social environment and a model of this process can advance several design components of multi-agent systems. Social judgment underlies social planning, social learning, natural language pragmatics and computational model of emotion. Based on psychological attribution theory, this paper presents a computational approach to forming social judgment based on an agents causal knowledge and communicative interactions with other agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cao, Yong; Faloutsos, Petros; Kohler, Eddie; Pighin, Frédéric
Real-time Speech Motion Synthesis from Recorded Motions Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_real-time_2004,
title = {Real-time Speech Motion Synthesis from Recorded Motions},
author = {Yong Cao and Petros Faloutsos and Eddie Kohler and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Real-time%20Speech%20Motion%20Synthesis%20from%20Recorded%20Motions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Data-driven approaches have been successfully used for realistic visual speech synthesis. However, little effort has been devoted to real-time lip-synching for interactive applications. In particular, algorithms that are based on a graph of motions are notorious for their exponential complexity. In this paper, we present a greedy graph search algorithm that yields vastly superior performance and allows real-time motion synthesis from a large database of motions. The time complexity of the algorithm is linear with respect to the size of an input utterance. In our experiments, the synthesis time for an input sentence of average length is under a second.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Goransson, Fredrik; Debevec, Paul
Animatable Facial Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering, Norkoping, Sweden, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_animatable_2004,
title = {Animatable Facial Reflectance Fields},
author = {Tim Hawkins and Andreas Wenger and Chris Tchou and Andrew Gardner and Fredrik Goransson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Animatable%20Facial%20Re%EF%AC%82ectance%20Fields.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Norkoping, Sweden},
abstract = {We present a technique for creating an animatable image-based appearance model of a human face, able to capture appearance variation over changing facial expression, head pose, view direction, and lighting condition. Our capture process makes use of a specialized lighting apparatus designed to rapidly illuminate the subject sequentially from many different directions in just a few seconds. For each pose, the subject remains still while six video cameras capture their appearance under each of the directions of lighting. We repeat this process for approximately 60 different poses, capturing different expressions, visemes, head poses, and eye positions. The images for each of the poses and camera views are registered to each other semi-automatically with the help of fiducial markers. The result is a model which can be rendered realistically under any linear blend of the captured poses and under any desired lighting condition by warping, scaling, and blending data from the original images. Finally, we show how to drive the model with performance capture data, where the pose is not necessarily a linear combination of the original captured poses.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Lent, Michael; Velson, Martin; Carpenter, Paul; Jhala, Arnav
Branching Storylines in Virtual Reality Environments for Leadership Development Proceedings Article
In: Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04), pp. 844–851, AAAI Press, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_branching_2004,
title = {Branching Storylines in Virtual Reality Environments for Leadership Development},
author = {Andrew S. Gordon and Michael Lent and Martin Velson and Paul Carpenter and Arnav Jhala},
url = {http://ict.usc.edu/pubs/Branching%20Storylines%20in%20Virtual%20Reality%20Environments%20for%20Leadership%20Development.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04)},
pages = {844–851},
publisher = {AAAI Press},
address = {San Jose, CA},
abstract = {Simulation-based training is increasingly being used within the military to practice and develop the skills of successful soldiers. For the skills associated with successful military leadership, our inability to model human behavior to the necessary degree of fidelity in constructive simulations requires that new interactive designs be developed. The ICT Leaders project supports leadership development through the use of branching storylines realized within a virtual reality environment. Trainees assume a role in a fictional scenario, where the decisions that they make in this environment ultimately affect the success of a mission. All trainee decisions are made in the context of natural language conversations with virtual characters. The ICT Leaders project advances a new form of interactive training by incorporating a suite of Artificial Intelligence technologies, including control architectures, agents of mixed autonomy, and natural language processing algorithms.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Huang, Hesu; Kyriakakis, Chris
Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation Proceedings Article
In: Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers, pp. 259–262, Pacific Grove, CA, 2004, ISBN: 0-7803-8622-1.
Abstract | Links | BibTeX | Tags:
@inproceedings{huang_real-valued_2004,
title = {Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation},
author = {Hesu Huang and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Real-valued%20Delayless%20Subband%20Affine%20Projection%20Algorithm%20for%20Acoustic%20Echo%20Cancellation.pdf},
doi = {10.1109/ACSSC.2004.1399131},
isbn = {0-7803-8622-1},
year = {2004},
date = {2004-01-01},
booktitle = {Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers},
volume = {1},
pages = {259–262},
address = {Pacific Grove, CA},
abstract = {Acoustic echo cancellation (AEC) often involves adaptive filters with large numbers of taps, which results in poor performance in real-time applications. The utilization of delayless subband adaptive filter (DSAF) helps reduce computations and improve the overall performance. However, conventional oversampled subband adaptive filters mainly use DFT or GDFT based analysts/synthesis filter banks and generate "complex-valued" subband signals. This is particularly inefficient when applying the affine projection algorithm (APA), a popular adaptive algorithm for AEC problem, to each subband. For APA implementation, real-valued signals show higher efficiency than complex signals. In this paper, we present a real-valued delayless subband APA and study both its computational complexity and performance on AEC problems. Compared to the complex valued approach, our method achieves a better performance with lower computational cost.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117–127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Gandhe, Sudeep; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Wang, Dagen
The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews Proceedings Article
In: Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{narayanan_transonics_2004,
title = {The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and Sudeep Gandhe and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and Dagen Wang},
url = {http://ict.usc.edu/pubs/The%20Transonics%20Spoken%20Dialogue%20Translator-%20An%20aid%20for%20English-Persian%20Doctor-Patient%20interviews.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication},
abstract = {In this paper we describe our spoken english-persian medical dialogue translation system. We describe the data collection effort and give an overview of the component technologies, including speech recognition, translation, dialogue management, and user interface design. The individual modules and system are designed for flexibility, and to be able to leverage different amounts of available resources to maximize the ability for communication between medical care-giver and patient.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Image-Based Techniques for Digitizing Environments and Artifacts Proceedings Article
In: 4th International Conference on 3-D Digital Imaging and Modeling (3DIM), 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_image-based_2003,
title = {Image-Based Techniques for Digitizing Environments and Artifacts},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Techniques%20for%20Digitizing%20Environments%20and%20Artifacts.pdf},
year = {2003},
date = {2003-10-01},
booktitle = {4th International Conference on 3-D Digital Imaging and Modeling (3DIM)},
abstract = {This paper presents an overview of techniques for generating photoreal computer graphics models of real-world places and objects. Our group's early efforts in modeling scenes involved the development of Facade, an interactive photogrammetric modeling system that uses geometric primitives to model the scene, and projective texture mapping to produce the scene appearance properties. Subsequent work has produced techniques to model the incident illumination within scenes, which we have shown to be useful for realistically adding computer-generated objects to image-based models. More recently, our work has focussed on recovering lighting-independent models of scenes and objects, capturing how each point on an object reflects light. Our latest work combines three-dimensional range scans, digital photographs, and incident illumination measurements to produce lighting-independent models of complex objects and environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Itti, Laurent; Dhavale, Nitin; Pighin, Frédéric
Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention Proceedings Article
In: Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology, San Diego, CA, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{itti_realistic_2003,
title = {Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention},
author = {Laurent Itti and Nitin Dhavale and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Realistic%20Avatar%20Eye%20and%20Head%20Animation%20Using%20a%20Neurobiological%20Model%20of%20Visual%20Attention.pdf},
doi = {10.1117/12.512618},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology},
address = {San Diego, CA},
abstract = {We describe a neurobiological model of visual attention and eye/head movements in primates, and its application to the automatic animation of a realistic virtual human head watching an unconstrained variety of visual inputs. The bottom-up (image-based) attention model is based on the known neurophysiology of visual processing along the occipito-parietal pathway of the primate brain, while the eye/head movement model is derived from recordings in freely behaving Rhesus monkeys. The system is successful at autonomously saccading towards and tracking salient targets in a variety of video clips, including synthetic stimuli, real outdoors scenes and gaming console outputs. The resulting virtual human eye/head animation yields realistic rendering of the simulation results, both suggesting applicability of this approach to avatar animation and reinforcing the plausibility of the neural model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Douglas, Jay; Gordon, Andrew S.; Pighin, Frédéric; Velson, Martin
Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters Proceedings Article
In: Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference, Acapulco, Mexico, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_guided_2003,
title = {Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters},
author = {Randall W. Hill and Jay Douglas and Andrew S. Gordon and Frédéric Pighin and Martin Velson},
url = {http://ict.usc.edu/pubs/Guided%20Conversations%20about%20Leadership-%20Mentoring%20with%20Movies%20and%20Interactive%20Characters.pdf},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference},
address = {Acapulco, Mexico},
abstract = {Think Like a Commander - Excellence in Leadership (TLAC-XL) is an application designed for learning leadership skills both from the experiences of others and through a structured dialogue about issues raised in a vignette. The participant watches a movie, interacts with a synthetic mentor and interviews characters in the story. The goal is to enable leaders to learn the human dimensions of leadership, addressing a gap in the training tools currently available to the U.S. Army. The TLAC-XL application employs a number of Artificial Intelligence technologies, including the use of a coordination architecture, a machine learning approach to natural language processing, and an algorithm for the automated animation of rendered human faces.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Kazemzadeh, Abe; Nair, Anish; Petrova, Milena
Recognizing Expressions of Commonsense Psychology in English Text Proceedings Article
In: Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL), Sapporo, Japan, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_recognizing_2003,
title = {Recognizing Expressions of Commonsense Psychology in English Text},
author = {Andrew S. Gordon and Abe Kazemzadeh and Anish Nair and Milena Petrova},
url = {http://ict.usc.edu/pubs/Recognizing%20Expressions%20of%20Commonsense%20Psychology%20in%20English%20Text.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)},
address = {Sapporo, Japan},
abstract = {Many applications of natural language processing technologies involve analyzing texts that concern the psychological states and processes of people, including their beliefs, goals, predictions, explanations, and plans. In this paper, we describe our efforts to create a robust, large-scale lexical-semantic resource for the recognition and classification of expressions of commonsense psychology in English Text. We achieve high levels of precision and recall by hand-authoring sets of local grammars for commonsense psychology concepts, and show that this approach can achieve classification performance greater than that obtained by using machine learning techniques. We demonstrate the utility of this resource for large-scale corpus analysis by identifying references to adversarial and competitive goal in political speeches throughout U.S. history.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Nair, Anish
Literary Evidence for the Cultural Development of a Theory of Mind Proceedings Article
In: Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci), Boston, MA, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_literary_2003,
title = {Literary Evidence for the Cultural Development of a Theory of Mind},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Literary%20Evidence%20for%20the%20Cultural%20Development%20of%20a%20Theory%20of%20Mind.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci)},
address = {Boston, MA},
abstract = {The term Theory of Mind is used within the cognitive sciences to refer to the abilities that people have to reason about their own mental states and the mental states of others. An important question is whether these abilities are culturally acquired or innate to our species. This paper outlines the argument that the mental models that serve as the basis for Theory of Mind abilities are the product of cultural development. To support this thesis, we present evidence gathered from the large-scale automated analysis of text corpora. We show that the Freudian conception of a subconscious desire is a relatively modern addition to our culturally shared Theory of Mind, as evidenced by a shift in the way these ideas appeared in 19th and 20th century English language novels.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Iuppa, Nicholas
Experience Management Using Storyline Adaptation Strategies Proceedings Article
In: Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment, Darmstadt, Germany, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_experience_2003,
title = {Experience Management Using Storyline Adaptation Strategies},
author = {Andrew S. Gordon and Nicholas Iuppa},
url = {http://ict.usc.edu/pubs/Experience%20Management%20Using%20Storyline%20Adaptation%20Strategies.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment},
address = {Darmstadt, Germany},
abstract = {The central problem of creating interactive drama is structuring a media experience for participants such that a good story is presented while enabling a high degree of meaningful interactivity. This paper presents a new approach to interactive drama, where pre-authored storylines are made interactive by adapting them at run-time by applying strategies that react to unexpected user behavior. The approach, called Experience Management, relies heavily on the explication of a broad range of adaptation strategies and a means of selecting which strategy is most appropriate given a particular story context. We describe a formal approach to storyline representation to enable the selection of applicable strategies, and a strategy formalization that allows for storyline modification. Finally, we discuss the application of this approach in the context of a story-based training system for military leadership skills, and the direction for continuing research.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Coverage and Competency in Formal Theories: A Commonsense Theory of Memory Proceedings Article
In: Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning, Stanford University, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_coverage_2003,
title = {Coverage and Competency in Formal Theories: A Commonsense Theory of Memory},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Coverage%20and%20Competency%20in%20Formal%20Theories-%20A%20Commonsense%20Theory%20of%20Memory.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Stanford University},
abstract = {The utility of formal theories of commonsense reasoning will depend both on their competency in solving problems and on their concemptual coverage. We argue that the problems of coverage and competency can be decoupled and solved with different methods for a given commonsense domain. We describe a methodology for identifying the coverage requirements of theories through the large-sclae analysis of planning strategies, with further refinements made by collecting and categorizing instances of natural language expressions pertaining to the domain. We demonstrate the effectiveness of this methodology in identifying the representational coverage requirements of theories of the commonsense psychology of human memory. We then apply traditional methods of formalization to produce a formal first-order theory of commonsense memory with a high degree of competency and coverage.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Joshi, Pushkar; Tien, Wen C.; Desbrun, Mathieu; Pighin, Frédéric
Learning Controls for Blend Shape Based Realistic Facial Animation Proceedings Article
In: Breen, D.; Lin, M. (Ed.): Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{joshi_learning_2003,
title = {Learning Controls for Blend Shape Based Realistic Facial Animation},
author = {Pushkar Joshi and Wen C. Tien and Mathieu Desbrun and Frédéric Pighin},
editor = {D. Breen and M. Lin},
url = {http://ict.usc.edu/pubs/Learning%20Controls%20for%20Blend%20Shape%20Based%20Realistic%20Facial%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Blend shape animation is the method of choice for keyframe facial animation: a set of blend shapes (key facial expressions) are used to deï¬ne a linear space of facial expressions. However, in order to capture a signiï¬cant range of complexity of human expressions, blend shapes need to be segmented into smaller regions where key idiosyncracies of the face being animated are present. Performing this segmentation by hand requires skill and a lot of time. In this paper, we propose an automatic, physically-motivated segmentation that learns the controls and parameters directly from the set of blend shapes. We show the usefulness and efï¬ciency of this technique for both,motion-capture animation and keyframing. We also provide a rendering algorithm to enhance the visual realism of a blend shape model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003-1,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
address = {Kloster Irsee, Germany},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cao, Yong; Faloutsos, Petros; Pighin, Frédéric
Unsupervised Learning for Speech Motion Editing Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_unsupervised_2003,
title = {Unsupervised Learning for Speech Motion Editing},
author = {Yong Cao and Petros Faloutsos and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Unsupervised%20Learning%20for%20Speech%20Motion%20Editing.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {We present a new method for editing speech related facial motions. Our method uses an unsupervised learning technique, Independent Component Analysis (ICA), to extract a set of meaningful parameters without any annotation of the data. With ICA, we are able to solve a blind source separation problem and describe the original data as a linear combination of two sources. One source captures content (speech) and the other captures style (emotion). By manipulating the independent components we can edit the motions in intuitive ways.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Pighin, Frédéric
Hybrid Control For Interactive Character Animation Proceedings Article
In: Proceedings of the 11th Pacific Conference on Computer Graphics and Applications, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{shapiro_hybrid_2003,
title = {Hybrid Control For Interactive Character Animation},
author = {Ari Shapiro and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Hybrid%20Control%20For%20Interactive%20Character%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications},
abstract = {We implement a framework for animating interactive characters by combining kinematic animation with physical simulation. The combination of animation techniques allows the characters to exploit the advantages of each technique. For example, characters can perform natural-looking kinematic gaits and react dynamically to unexpected situations.Kinematic techniques such as those based on motion capture data can create very natural-looking animation. However, motion capture based techniques are not suitable for modeling the complex interactions between dynamically interacting characters. Physical simulation, on the other hand, is well suited for such tasks. Our work develops kinematic and dynamic controllers and transition methods between the two control methods for interactive character animation. In addition, we utilize the motion graph technique to develop complex kinematic animation from shorter motion clips as a method of kinematic control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh
The Gestalt of Virtual Environments Proceedings Article
In: International Workshop on Presence, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_gestalt_2003,
title = {The Gestalt of Virtual Environments},
author = {Jacquelyn Morie and Josh Williams},
url = {http://ict.usc.edu/pubs/The%20Gestalt%20of%20Virtual%20Environments.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Presence},
abstract = {The majority of research in the field of virtual reality to date has focused on increasing the fidelity of the environments created and trying to determine the quality of the participant experience. Efforts have been made to quantify such aspects, especially in regards to visuals and sound, and to a lesser extent to the user experience. Recent thinking has tended towards the assumption that ever-greater fidelity would ensure a better user experience. However, such emphasis on photo-realism and audio-realism does not take into account the collective results of our multimodal sensory inputs with their intertwined effects. Our design philosophy for the creation of virtual environments attempts to replicate the human experience, and asks the question: Is there an underlying fidelity of feels-real through which the quality of the participant experience could be improved?},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
2002
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of Spatial Averaging Equalization Methods: A Statistical Approach Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002-1,
title = {Robustness of Spatial Averaging Equalization Methods: A Statistical Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Spatial%20Averaging%20Equalization%20Methods-%20A%20Statistical%20Approach.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of room responses. However, the performance of averaging based equalization, at the listeners, may be affected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to performance of spatial averaging based equalization. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Perceptual Multiple Location Equalization with Clustering Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_perceptual_2002,
title = {Perceptual Multiple Location Equalization with Clustering},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Perceptual%20Multiple%20Location%20Equalization%20with%20Clustering.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Typically, room equalization techniques do not focus on designing ï¬lters that equalize the room transfer functions on perceptually relevant spectral features. In this paper we address the problem of room equalization for multiple listeners, simultaneously, using a perceptually designed equalization ï¬lter based on pattern recognition techniques. Some features of the proposed ï¬lter are, its ability to perform simultaneous equalization at multiple locations, a reduced order, and a psychoacoustically motivated design. In summary, the simultaneous multiple location equalization, using a pattern recognition method, is performed over perceptually relevant spectral components derived from the auditory ï¬ltering mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Hilmes, Philip; Kyriakakis, Chris
Robustness of Multiple Listener Equalization With Magnitude Response Averaging Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002,
title = {Robustness of Multiple Listener Equalization With Magnitude Response Averaging},
author = {Sunil Bharitkar and Philip Hilmes and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Multiple%20Listener%20Equalization%20With%20Magnitude%20Response%20Averaging.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of magnitude responses at locations of interest. However, the performance of averaging based equalization, at the listeners, may be a!ected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to a performance metric of equalization for magnitude response averaging. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alternative_2002,
title = {An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/An%20Alternative%20Model%20for%20Sound%20Signals%20Encountered%20in%20Reverberant%20Environments%3b%20Robust%20Maximum%20Likelihood%20Localization%20and%20Parameter%20Estimation%20Based%20on%20a%20Sub-Gaussian%20Model.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {In this paper we investigate an alternative to the Gaussian density for modeling signals encountered in audio environments. The observation that sound signals are impulsive in nature, combined with the reverberation e!ects commonly encountered in audio, motivates the use of the Sub-Gaussian density. The new Sub-Gaussian statistical model and the separable solution of its Maximum Likelihood estimator are derived. These are used in an array scenario to demonstrate with both simulations and two different microphone arrays the achievable performance gains. The simulations exhibit the robustness of the sub-Gaussian based method while the real world experiments reveal a signiï¬cant performance gain, supporting the claim that the sub-Gaussian model is better suited for sound signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, W. Lewis; Narayanan, Shrikanth; Whitney, Richard; Das, Rajat; Labore, Catherine
Limited Domain Synthesis of Expressive Military Speech for Animated Characters Proceedings Article
In: IEEE 2002 Workshop on Speech Synthesis, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{johnson_limited_2002,
title = {Limited Domain Synthesis of Expressive Military Speech for Animated Characters},
author = {W. Lewis Johnson and Shrikanth Narayanan and Richard Whitney and Rajat Das and Catherine Labore},
url = {http://ict.usc.edu/pubs/Limited%20Domain%20Synthesis%20of%20Expressive%20Military%20Speech%20for%20Animated%20Characters.pdf},
year = {2002},
date = {2002-09-01},
booktitle = {IEEE 2002 Workshop on Speech Synthesis},
abstract = {Text-to-speech synthesis can play an important role in interactive education and training applications, as voices for animated agents. Such agents need high-quality voices capable of expressing intent and emotion. This paper presents preliminary results in an effort aimed at synthesizing expressive military speech for training applications. Such speech has acoustic and prosodic characteristics that can differ markedly from ordinary conversational speech. A limited domain synthesis approach is used employing samples of expressive speech, classified according to speaking style. The resulting synthesizer was tested both in isolation and in the context of a virtual reality training scenario with animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Theory of Mind in Strategy Representations Proceedings Article
In: Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, George Mason University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_theory_2002,
title = {The Theory of Mind in Strategy Representations},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Theory%20of%20Mind%20in%20Strategy%20Representations.PDF},
year = {2002},
date = {2002-08-01},
booktitle = {Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {George Mason University},
abstract = {Many scientific fields continue to explore cognition related to Theory of Mind abilities, where people reason about the mental states of themselves and others. Experimental and theoretical approaches to this problem have largely avoided issues concerning the contents of representations employed in this class of reasoning. In this paper, we describe a new approach to the investigation of representations related to Theory of Mind abilities that is based on the analysis of commonsense strategies. We argue that because the mental representations of strategies must include concepts of mental states and processes, the large-scale analysis of strategies can be informative of the representational scope of Theory of Mind abilities. The results of an analysis of this sort are presented as a description of thirty representational areas that organize the breadth of Theory of Mind concepts. Implications for Theory Theories and Simulation Theories of Theory of Mind reasoning are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Angros, Richard Jr.; Johnson, W. Lewis; Rickel, Jeff; Scholer, Andrew
Learning Domain Knowledge for Teaching Procedural Skills Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{angros_learning_2002,
title = {Learning Domain Knowledge for Teaching Procedural Skills},
author = {Richard Jr. Angros and W. Lewis Johnson and Jeff Rickel and Andrew Scholer},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Skills.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
abstract = {This paper describes a method for acquiring procedural knowledge for use by pedagogical agents in interactive simulation-based learning environments. Such agents need to be able to adapt their behavior to the changing conditions of the simulated world, and respond appropriately in mixed-initiative interactions with learners. This requires a good understanding of the goals and causal dependencies in the procedures being taught. Our method, inspired by human tutorial dialog, combines direct speciï¬cation, demonstration, and experimentation. The human instructor demonstrates the skill being taught, while the agent observes the effects of the procedure on the simulated world. The agent then autonomously experiments with the procedure, making modiï¬cations to it, in order to understand the role of each step in the procedure. At various points the instructor can provide clariï¬cations, and modify the developing procedural description as needed. This method is realized in a system called Diligent, which acquires procedural knowledge for the STEVE animated pedagogical agent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kovar, Lucas; Gleicher, Michael; Pighin, Frédéric
Motion Graphs Proceedings Article
In: Proceedings of SIGGRAPH '02, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{kovar_motion_2002,
title = {Motion Graphs},
author = {Lucas Kovar and Michael Gleicher and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Motion%20Graphs.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {Proceedings of SIGGRAPH '02},
address = {San Antonio, TX},
abstract = {n this paper we present a novel method for creating realistic, controllable motion. Given a corpus of motion capture data, we automatically construct a directed graph called a motion graph that encapsulates connections among the database. The motion graph consists both of pieces of original motion and automatically generated transitions. Motion can be generated simply by building walks on the graph. We present a general framework for extracting particular graph walks that meet a user's specifications. We then show how this framework can be applied to the specific problem of generating different styles of locomotion along arbitrary paths.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}