Publications
Search
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Szeliski, Richard; Salesin, David H.
Modeling and Animating Realistic Faces from Images Journal Article
In: International Journal on Computer Vision, vol. 50, pp. 143–169, 2002.
@article{pighin_modeling_2002,
title = {Modeling and Animating Realistic Faces from Images},
author = {Frédéric Pighin and Richard Szeliski and David H. Salesin},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Animating%20Realistic%20Faces%20from%20Images.pdf},
year = {2002},
date = {2002-01-01},
journal = {International Journal on Computer Vision},
volume = {50},
pages = {143–169},
abstract = {We present a new set of techniques f or mo deling and animating realistic f aces f rom photographs and videos. Given a set of face photographs taken simultaneously, our modeling technique allows the interactive recovery of a textured 3D face model. By repeating this process for several facial expressions, we acquire a set of faces models that can be linearly combined to express a wide range of expressions. Given a video sequence, this linear face model can be used to estimate the face position, orientation, and facial expression at each frame. We illustrate these techniques on several datasets and demonstrate robust estimations of detailed face geometry and motion.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rickel, Jeff; Johnson, W. Lewis
Extending Virtual Human to Support Team Training in Virtual Reality Book Section
In: Lakemeyer, G.; Nebel, B. (Ed.): Exploring Artificial Intelligence in the New Millennium, Morgan Kaufmann Publishers, San Francisco, CA, 2002.
@incollection{rickel_extending_2002,
title = {Extending Virtual Human to Support Team Training in Virtual Reality},
author = {Jeff Rickel and W. Lewis Johnson},
editor = {G. Lakemeyer and B. Nebel},
url = {http://ict.usc.edu/pubs/Extending%20Virtual%20Humans%20to%20Support%20Team%20Training%20in%20Virtual%20Reality.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Exploring Artificial Intelligence in the New Millennium},
publisher = {Morgan Kaufmann Publishers},
address = {San Francisco, CA},
abstract = {This paper describes the use of virtual humans and distributed virtual reality to support team training, where students must learn their individual role in the team as well as how to coordinate their actions with their teammates. Students, instructors, and virtual humans cohabit a three-dimensional, interactive, simulated mock-up of their work environment, where they can practice together in realistic situations. The virtual humans can serve as instructors for individual students, and they can substitute for missing team members, allowing students to practive team tasks when some or all human instructors and teammates are unavailable. The paper describes our learning environment, the issues that arise in developing virtual humans for team training, and our design for the virtual humans, which is an extension of our Steve agent previously used for one-on-one tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Morie, Jacquelyn; Iyer, Kumar; Valanejad, R.; Sadek, Ramy; Miraglia, D.; Milam, D.
Emotionally Evocative Environments for Training Proceedings Article
In: Proceedings of the 23th Army Science Conference, Orlando, FL, 2002.
@inproceedings{morie_emotionally_2002,
title = {Emotionally Evocative Environments for Training},
author = {Jacquelyn Morie and Kumar Iyer and R. Valanejad and Ramy Sadek and D. Miraglia and D. Milam},
url = {http://ict.usc.edu/pubs/EMOTIONALLY%20EVOCATIVE%20ENVIRONMENTS%20FOR%20TRAINING.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of the 23th Army Science Conference},
address = {Orlando, FL},
abstract = {This paper describes a project currently in progress at the University of Southern California's Institute for Creative Technologies (ICT). Much of the research at ICT involves developing better graphics, sound and artificial intelligence to be used in creating the next generation of training tools for the United States Army. Our project focuses on the use of emotional responses as an enhancement for training. Research indicates that an emotional connection is a strong factor in how and what we remember. In addition, real world situations often evoke surprising and significant emotional reactions that soldiers must deal with. Few current immersive training scenarios, however, focus on the emotional state of the trainee, limiting training scenarios to basic objective elements. The Sensory Environments Evaluation (SEE) Project at ICT is investigating the potential of emotionally compelling environments for more effective training. We do this by skillfully combining the sensory inputs available in virtual environments. Our current efforts concentrate on sight and sound; smell will be included as scent delivery methods improve. Evaluation studies are planned to determine the effectiveness of the techniques we are developing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation Proceedings Article
In: IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, New Paltz, New York, 2001.
@inproceedings{bharitkar_robustness_2001,
title = {Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/ROBUSTNESS%20OF%20THE%20EIGENFILTER%20FOR%20VARIATIONS%20IN%20LISTENER%20RESPONSES%20FOR%20SELECTIVE%20SIGNAL%20CANCELLATION.pdf},
year = {2001},
date = {2001-10-01},
booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
address = {New Paltz, New York},
abstract = {Selectively cancelling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. We have proposed the eigenfilter for selectively cancelling signals in one direction, while attempting to retain them at unintentional directions. In this paper we investigate the behaviour of the performance measure (i.e., the gain) for a vowel and an unvoiced fricative, when the listener moves his head, in an automobile type environment. We show that in such a situation, a large energy in the difference between the impulse responses at a listener's location may affect the gain substantially. listeners in which only a subset wish to listen to the audio signal.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Browsing Image Collections with Representations of Commonsense Activities Journal Article
In: Journal of the American Society for Information Science and Technology, vol. 52, no. 11, pp. 925–929, 2001.
@article{gordon_browsing_2001,
title = {Browsing Image Collections with Representations of Commonsense Activities},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Browsing%20Image%20Collections%20with%20Representations%20of%20Commonsense%20Activities.PDF},
year = {2001},
date = {2001-01-01},
journal = {Journal of the American Society for Information Science and Technology},
volume = {52},
number = {11},
pages = {925–929},
abstract = {To support browsing-based subject access to image collections, it is necessary to provide users with networks of subject terms that are organized in an intuitive, richly interconnected manner. A principled approach to this task is to organize the subject terms by their relationship to activity contexts that are commonly understood among users. This article describes a methodology for creating networks of subject terms by manually representing a large number of common-sense activities that are broadly related to image subject terms. The application of this methodology to the Library of Congress Thesaurus for Graphic Materials produced 768 representations that supported users of a prototype browsing-based retrieval system in searching large, indexed photograph collections.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, pp. 278–285, Montreal, Canada, 2001.
@inproceedings{gratch_tears_2001,
title = {Tears and Fears: Modeling emotions and emotional behaviors in synthetic agents},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Tears%20and%20Fears-%20Modeling%20emotions%20and%20emotional%20behaviors%20in%20synthetic%20agents.pdf},
year = {2001},
date = {2001-01-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
pages = {278–285},
address = {Montreal, Canada},
abstract = {Emotions play a critical role in creating engaging and believable characters to populate virtual worlds. Our goal is to create general computational models to support characters that act in virtual environments, make decisions, but whose behavior also suggests an underlying emotional current. In service of this goal, we integrate two complementary approaches to emotional modeling into a single unified system. Gratch's Émile system focuses on the problem of emotional appraisal: how emotions arise from an evaluation of how environmental events relate to an agent's plans and goals. Marsella et al. 's IPD system focuses more on the impact of emotions on behavior, including the impact on the physical expressions of emotional state through suitable choice of gestures and body language. This integrated model is layered atop Steve, a pedagogical agent architecture, and exercised within the context of the Mission Rehearsal Exercise, a prototype system designed to teach decision- making skills in highly evocative situations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
Sorry, no publications matched your criteria.