Publications
Search
Nye, Benjamin D.; Hu, Xiangen
Conceptualizing and Representing Domains to Guide Tutoring Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 15–18, US Army Research Laboratory, Orlando, FL, 2016.
@incollection{nye_conceptualizing_2016,
title = {Conceptualizing and Representing Domains to Guide Tutoring},
author = {Benjamin D. Nye and Xiangen Hu},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA15&dq=%22data.+This+chapter+presents+an+excellent+overview+of+current+research+on+Q-matrices%22+%22edge+work+on+ensemble+methods+that+achieve+state+of+the+art+performance+by+combining%22+&ots=6MJhm1XHVV&sig=i14eJyin69Cy-jms2lWIFF4K3CU},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {15–18},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Any discussion about how intelligent tutoring system (ITS) domains must begin with considering how ITS conceptualize and represent domains. This process requires building formal, mathematically-specifiable operationalization of the often implicit knowledge about learning domains and their pedagogy. Across different domains and pedagogical approaches, a wide variety of methods have been taken: a scope that would be better-covered by an encyclopedia rather than a single book. Since this section could not possibly cover every possible approach to domain modeling, the chapters within this section were instead chosen to cover a representative range of fundamentally-different approaches to domain modeling.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Boyce, Michael W.; Sottilare, Robert
Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 19–37, US Army Research Laboratory, Orlando, FL, 2016, ISBN: 978-0-9893923-9-6.
@incollection{nye_defining_2016,
title = {Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy},
author = {Benjamin D. Nye and Michael W. Boyce and Robert Sottilare},
url = {https://gifttutoring.org/attachments/download/1736/Design%20Recommendations%20for%20ITS_Volume%204%20-%20Domain%20Modeling%20Book_web%20version_final.pdf},
isbn = {978-0-9893923-9-6},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {19–37},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Attempts to define ill-defined domains in intelligent tutoring system (ITS) research has been approached a number of times (Fournier-Viger, Nkambou, & Nguifo, 2010; Lynch, Ashley, Pinkwart, & Aleven, 2009; Mitrovic & Weerasinghe, 2009; Jacovina, Snow, Dai, & McNamara, 2015; Woods, Stensrud, Wray, Haley, & Jones, 2015). Related research has tried to determine levels of ill-definedness for a domain (Le, Loll, & Pinkwart, 2013). Despite such attempts, the field has not yet converged on common guidelines to distinguish between well-defined versus ill-defined domains. We argue that such guidelines struggle to converge because a domain is too large to meaningfully categorize: every domain contains a mixture of well-defined and ill-defined tasks. While the co-existence of well-defined and ill-defined tasks in a single domain is nearly universally-agreed upon by researchers; this key point is often quickly buried by an extensive discussion about what makes certain domain tasks ill-defined (e.g., disagreement about ideal solutions, multiple solution paths). In this chapter, we first take a step back to consider what is meant by a domain in the context of learning. Next, based on this definition for a domain, we map out the components that are in a learning domain, since each component may have ill-defined parts. This leads into a discussion about the strategies that have been used to make ill-defined domains tractable for certain types of pedagogy. Examples of ITS research that applies these strategies are noted. Finally, we conclude with practical how-to considerations and open research questions for approaching ill-defined domains. This chapter should be considered a companion piece to our chapter in the prior volume of this series (Nye, Goldberg, & Hu, 2015). This chapter focuses on how to understand and transform ill-defined parts of domains, while the prior chapter discusses commonly-used learning tasks and authoring approaches for both well-defined and ill-defined tasks. As such, this chapter is intended to help the learner understand if and how different parts of the domain are ill-defined (and what to do about them). The companion piece in the authoring tools volume discusses different categories of well and ill-defined tasks, from the standpoint of attempting to author and maintain an ITS.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification Journal Article
In: Journal of Artificial General Intelligence, 2016, ISSN: 1946-0163.
@article{rosenbloom_sigma_2016,
title = {The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://www.degruyter.com/view/j/jagi.ahead-of-print/jagi-2016-0001/jagi-2016-0001.xml},
doi = {10.1515/jagi-2016-0001},
issn = {1946-0163},
year = {2016},
date = {2016-07-01},
journal = {Journal of Artificial General Intelligence},
abstract = {Sigma (Σ) is a cognitive architecture and system whose development is driven by a combination of four desiderata: grand unification, generic cognition, functional elegance, and sufficient efficiency. Work towards these desiderata is guided by the graphical architecture hypothesis, that key to progress on them is combining what has been learned from over three decades’ worth of separate work on cognitive architectures and graphical models. In this article, these four desiderata are motivated and explained, and then combined with the graphical architecture hypothesis to yield a rationale for the development of Sigma. The current state of the cognitive architecture is then introduced in detail, along with the graphical architecture that sits below it and implements it. Progress in extending Sigma beyond these architectures and towards a full cognitive system is then detailed in terms of both a systematic set of higher level cognitive idioms that have been developed and several virtual humans that are built from combinations of these idioms. Sigma as a whole is then analyzed in terms of how well the progress to date satisfies the desiderata. This article thus provides the first full motivation, presentation and analysis of Sigma, along with a diversity of more specific results that have been generated during its development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks Proceedings Article
In: International Conference on Artificial General Intelligence, pp. 84–94, Springer, New York, NY, 2016, ISBN: 978-3-319-41649-6.
@inproceedings{rosenbloom_rethinking_2016,
title = {Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://link.springer.com/chapter/10.1007/978-3-319-41649-6_9},
doi = {10.1007/978-3-319-41649-6_9},
isbn = {978-3-319-41649-6},
year = {2016},
date = {2016-07-01},
booktitle = {International Conference on Artificial General Intelligence},
volume = {9782},
pages = {84–94},
publisher = {Springer},
address = {New York, NY},
abstract = {The status of Sigma’s grounding in graphical models is challenged by the ways in which their semantics has been violated while incorporating rule-based reasoning into them. This has led to a rethinking of what goes on in its graphical architecture, with results that include a straightforward extension to feedforward neural networks (although not yet with learning).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jalal-Kamali, Ali; Pynadath, David V.
Toward a Bayesian Network Model of Events in International Relations Proceedings Article
In: Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation, Springer, Washington D.C., 2016.
@inproceedings{jalal-kamali_toward_2016,
title = {Toward a Bayesian Network Model of Events in International Relations},
author = {Ali Jalal-Kamali and David V. Pynadath},
url = {https://books.google.com/books?id=_HGADAAAQBAJ&pg=PA321&lpg=PA321&dq=Toward+a+Bayesian+network+model+of+events+in+international+relations&source=bl&ots=JBOYm4KCF2&sig=eqmzgrWXwDroEtoLyxZxSjxDIAs&hl=en&sa=X&ved=0ahUKEwiIgoSS8o_PAhUUzGMKHWnaDlEQ6AEILjAC#v=onepage&q=Toward%20a%20Bayesian%20network%20model%20of%20events%20in%20international%20relations&f=false},
year = {2016},
date = {2016-07-01},
booktitle = {Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation},
publisher = {Springer},
address = {Washington D.C.},
abstract = {Formal models of international relations have a long history of exploiting representations and algorithms from artificial intelligence. As more news sources move online, there is an increasing wealth of data that can inform the creation of such models. The Global Database of Events, Language, and Tone (GDELT) extracts events from news articles from around the world, where the events represent actions taken by geopolitical actors, reflecting the actors’ relationships. We can apply existing machine-learning algorithms to automatically construct a Bayesian network that represents the distribution over the actions between actors. Such a network model allows us to analyze the interdependencies among events and generate the relative likelihoods of different events. By examining the accuracy of the learned network over different years and different actor pairs, we are able to identify aspects of international relations from a data-driven approach.We are also able to identify weaknesses in the model that suggest needs for additional domain knowledge.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Norrholm, Seth Davin; Jovanovic, Tanja; Gerardi, Maryrose; Breazeale, Kathryn G.; Price, Matthew; Davis, Michael; Duncan, Erica; Ressler, Kerry J.; Bradley, Bekh; Rizzo, Albert; Tuerk, Peter W.; Rothbaum, Barbara O.
Baseline psychophysiological and cortisol reactivity as a predictor of PTSD treatment outcome in virtual reality exposure therapy Journal Article
In: Behaviour Research and Therapy, vol. 82, pp. 28–37, 2016, ISSN: 00057967.
@article{norrholm_baseline_2016,
title = {Baseline psychophysiological and cortisol reactivity as a predictor of PTSD treatment outcome in virtual reality exposure therapy},
author = {Seth Davin Norrholm and Tanja Jovanovic and Maryrose Gerardi and Kathryn G. Breazeale and Matthew Price and Michael Davis and Erica Duncan and Kerry J. Ressler and Bekh Bradley and Albert Rizzo and Peter W. Tuerk and Barbara O. Rothbaum},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0005796716300663},
doi = {10.1016/j.brat.2016.05.002},
issn = {00057967},
year = {2016},
date = {2016-07-01},
journal = {Behaviour Research and Therapy},
volume = {82},
pages = {28–37},
abstract = {Baseline cue-dependent physiological reactivity may serve as an objective measure of posttraumatic stress disorder (PTSD) symptoms. Additionally, prior animal model and psychological studies would suggest that subjects with greatest symptoms at baseline may have the greatest violation of expectancy to danger when undergoing exposure based psychotherapy; thus treatment approaches which enhanced the learning under these conditions would be optimal for those with maximal baseline cue-dependent reactivity. However methods to study this hypothesis objectively are lacking. Virtual reality (VR) methodologies have been successfully employed as an enhanced form of imaginal prolonged exposure therapy for the treatment of PTSD. Our goal was to examine the predictive nature of initial psychophysiological (e.g., startle, skin conductance, heart rate) and stress hormone responses (e.g., cortisol) during presentation of VR-based combat-related stimuli on PTSD treatment outcome. Combat veterans with PTSD underwent 6 weeks of VR exposure therapy combined with either D-cycloserine (DCS), alprazolam (ALP), or placebo (PBO). In the DCS group, startle response to VR scenes prior to initiation of treatment accounted for 76% of the variance in CAPS change scores, p textbackslashtextbackslashtextbackslashtextless 0.001, in that higher responses predicted greater changes in symptom severity over time. Additionally, baseline cortisol reactivity was inversely associated with treatment response in the ALP group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Chih-Fan; Bolas, Mark; Suma, Evan
Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, pp. 1–2, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{chen_real-time_2016,
title = {Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2945162},
doi = {10.1145/2945078.2945162},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
pages = {1–2},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {With the recent proliferation of high-fidelity head-mounted displays (HMDs), there is increasing demand for realistic 3D content that can be integrated into virtual reality environments. However, creating photorealistic models is not only difficult but also time consuming. A simpler alternative involves scanning objects in the real world and rendering their digitized counterpart in the virtual world. Capturing objects can be achieved by performing a 3D scan using widely available consumer-grade RGB-D cameras. This process involves reconstructing the geometric model from depth images generated using a structured light or time-of-flight sensor. The colormap is determined by fusing data from multiple color images captured during the scan. Existing methods compute the color of each vertex by averaging the colors from all these images. Blending colors in this manner creates low-fidelity models that appear blurry. (Figure 1 right). Furthermore, this approach also yields textures with fixed lighting that is baked on the model. This limitation becomes more apparent when viewed in head-tracked virtual reality, as the illumination (e.g. specular reflections) does not change appropriately based on the user's viewpoint},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113–120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nolin, Pierre; Stipanicic, Annie; Henry, Mylène; Lachapelle, Yves; Lussier-Desrochers, Dany; Rizzo, Albert “Skip”; Allain, Philippe
ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents Journal Article
In: Computers in Human Behavior, vol. 59, pp. 327–333, 2016, ISSN: 07475632.
@article{nolin_clinicavr_2016,
title = {ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents},
author = {Pierre Nolin and Annie Stipanicic and Mylène Henry and Yves Lachapelle and Dany Lussier-Desrochers and Albert “Skip” Rizzo and Philippe Allain},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0747563216300759},
doi = {10.1016/j.chb.2016.02.023},
issn = {07475632},
year = {2016},
date = {2016-06-01},
journal = {Computers in Human Behavior},
volume = {59},
pages = {327–333},
abstract = {Having garnered interest both in clinic and research areas, the Virtual Classroom (Rizzo et al., 2000) assesses children's attention in a virtual context. The Digital MediaWorks team (www.dmw.ca) has evolved the original basic classroom concept over a number of iterations to form the ClinicaVR Suite containing the Classroom-CPT as one of its components. The present study has three aims: investigate certain validity and reliability aspects of the tool; examine the relationship between performance in the virtual test and the attendant sense of presence and cybersickness experienced by participants; assess potential effects of gender and age on performance in the test. The study was conducted with 102 children and adolescents from Grade 2 to Grade 10. All participants were enrolled in a regular school program. Results support both concurrent and construct validity as well as temporal stability of ClinicaVR: Classroom-Continuous Performance Test (CPT). Gender exerted no effect on performance, while age did. The test did not cause much cybersickness. We recommend ClinicaVR: Classroom-CPT as an assessment tool for selective and sustained attention, and inhibition, in clinic and research domains.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Book Section
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107–118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mizukami, Masahiro; Traum, David; Yoshino, Koichiro; Neubig, Graham; Nakamura, Satoshi
Word and Dialogue Act Entrainment Analysis based on User Profile Proceedings Article
In: Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence, Kitakyushu, Japan, 2016.
@inproceedings{mizukami_word_2016,
title = {Word and Dialogue Act Entrainment Analysis based on User Profile},
author = {Masahiro Mizukami and David Traum and Koichiro Yoshino and Graham Neubig and Satoshi Nakamura},
url = {https://kaigi.org/jsai/webprogram/2016/pdf/356.pdf},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence},
address = {Kitakyushu, Japan},
abstract = {Patterns of dialogue act and word selection are observable in dialogue. Entrainment is the factor that might account for these patterns. We test the entrainment hypotheses using the switchboard corpus, comparing speech of different speakers from different parts of the dialogue, but also speech of the same speaker at different points. Our ⬚ndings replicate previous studies that dialogue participants converge toward each other in word choice, but we also investigate novel measures of entrainment of dialogue act selection, and word choice for speci⬚c dialogue acts. These studies inform a design for dialogue systems that would show human-like degrees of entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.
ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 26, no. 2, pp. 756–770, 2016, ISSN: 1560-4292, 1560-4306.
@article{nye_its_2016,
title = {ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem},
author = {Benjamin D. Nye},
url = {http://link.springer.com/10.1007/s40593-016-0098-8},
doi = {10.1007/s40593-016-0098-8},
issn = {1560-4292, 1560-4306},
year = {2016},
date = {2016-06-01},
journal = {International Journal of Artificial Intelligence in Education},
volume = {26},
number = {2},
pages = {756–770},
abstract = {Advanced learning technologies are reaching a new phase of their evolution where they are finally entering mainstream educational contexts, with persistent user bases. However, as AIED scales, it will need to follow recent trends in service-oriented and ubiquitous computing: breaking AIED platforms into distinct services that can be composed for different platforms (web, mobile, etc.) and distributed across multiple systems. This will represent a move from learning platforms to an ecosystem of interacting learning tools. Such tools will enable new opportunities for both user-adaptation and experimentation. Traditional macro-adaptation (problem selection) and step-based adaptation (hints and feedback) will be extended by meta-adaptation (adaptive system selection) and micro-adaptation (event-level optimization). The existence of persistent and widely-used systems will also support new paradigms for experimentation in education, allowing researchers to understand interactions and boundary conditions for learning principles. New central research questions for the field will also need to be answered due to these changes in the AIED landscape.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
2004
Gordon, Andrew S.; Nair, Anish
Expressions Related to Knowledge and Belief in Children's Speech Proceedings Article
In: Proceedings of the 26th Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, Chicago, IL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_expressions_2004,
title = {Expressions Related to Knowledge and Belief in Children's Speech},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Expressions%20Related%20to%20Knowledge%20and%20Belief%20in%20Childrens%20Speech.PDF},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of the 26th Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {Chicago, IL},
abstract = {Children develop certain abilities related to Theory of Mind reasoning, particularly concerning the False-belief Task, between the ages of 3 and 5. This paper investigates whether there is a corresponding change in the frequency of linguistic expressions related to knowledge and belief produced by children around these ages. Automated corpus analysis techniques are used to tag each expression related to knowledge and belief in a large corpus of transcripts of speech from normally developing English-learning children. Results indicate that the frequency of expressions related to knowledge and belief increases steadily from the beginning of children's language production. Tracking of individual concepts related to knowledge and belief indicates that there are no clear qualitative changes in the set of concepts that are expressed by children of different ages. The implications for the relationship between language and the development of Theory of Mind reasoning abilities in children are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Carpenter, Paul; McAlinden, Ryan; Tan, Poey Guan
A Tactical and Strategic AI Interface for Real-Time Strategy Games Proceedings Article
In: AAAI Technical Workshop Challenges in Game Artificial Intelligence, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_lent_tactical_2004,
title = {A Tactical and Strategic AI Interface for Real-Time Strategy Games},
author = {Michael Lent and Paul Carpenter and Ryan McAlinden and Poey Guan Tan},
url = {http://ict.usc.edu/pubs/A%20Tactical%20and%20Strategic%20AI%20Interface%20for%20Real-Time%20Strategy%20Games.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {AAAI Technical Workshop Challenges in Game Artificial Intelligence},
abstract = {Real Time Strategy (RTS) games present a wide range of AI challenges at the tactical and strategic level. Unfortunately, the lack of flexible “mod” interfaces to these games has made it difficult for AI researchers to explore these challenges in the context of RTS games. We are addressing this by building two AI interfaces into Full Spectrum Command, a real time strategy training aid built for the U.S. Army. The tactical AI interface will allow AI systems, such as Soar and Simulation Based Tactics Mining, to control the tactical behavior of platoons and squads within the environment. The strategic AI interface will allow AI planners to generate and adapt higher-level battle plans which can in turn be executed by the tactical AI. This paper describes these two interfaces and our plans for identifying and addressing the research challenges involved in developing and deploying tactical and strategic AI systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Small group discussion simulation for middle Level of Detail Crowds Proceedings Article
In: 8th Workshop on Semantics and Pragmatics of Dialogue, Barcelona, Spain, 2004.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_small_2004,
title = {Small group discussion simulation for middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Small%20group%20discussion%20simulation%20for%20middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {8th Workshop on Semantics and Pragmatics of Dialogue},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Fisher, William; Mancuso, Michael
An Explainable Artificial Intelligence System for Small-unit Tactical Behavior Proceedings Article
In: National Conference on Artificial Intelligence, pp. 900–907, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_lent_explainable_2004,
title = {An Explainable Artificial Intelligence System for Small-unit Tactical Behavior},
author = {Michael Lent and William Fisher and Michael Mancuso},
url = {http://ict.usc.edu/pubs/An%20Explainable%20Artificial%20Intelligence%20System%20for%20Small-unit%20Tactical%20Behavior.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {National Conference on Artificial Intelligence},
pages = {900–907},
address = {San Jose, CA},
abstract = {As the artificial intelligence (AI) systems in military simulations and computer games become more complex, their actions become increasingly difficult for users to understand. Expert systems for medical diagnosis have addressed this challenge though the addition of explanation generation systems that explain a system's internal processes. This paper describes the AI architecture and associated explanation capability used by Full Spectrum Command, a training system developed for the US Army by commercial game developers and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Authoring Branching Storylines for Training Applications Proceedings Article
In: Proceedings of the Sixth International Conference of the Learning Sciences (ICLS), Santa Monica, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_authoring_2004,
title = {Authoring Branching Storylines for Training Applications},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Authoring%20Branching%20Storylines%20for%20Training%20Applications.PDF},
year = {2004},
date = {2004-06-01},
booktitle = {Proceedings of the Sixth International Conference of the Learning Sciences (ICLS)},
address = {Santa Monica, CA},
abstract = {Progress in the area of interactive training applications has led to the formulation of methodologies that have been successfully transitioned out of research labs and into the practices of commercial developers. This paper reviews the academic origins of a methodology for developing training applications that incorporate branching storylines to engage users in a firstperson learn-by-doing experience, originally referred to as Outcome-Driven Simulations. Innovations and modifications to this methodology from the commercial sector are then reviewed, and the steps in this methodology are described, as implemented in current best practices. Finally, new research efforts based on this methodology are examined, including the introduction of natural language processing technology to enable human-computer conversations and the integration of branching storylines into real-time virtual reality environments. A prototype application to support leadership development within the U.S. Army that includes these advances is described.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a General Model of Emotional Appraisal and Coping Proceedings Article
In: AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations, Palo Alto, CA, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004-1,
title = {Evaluating a General Model of Emotional Appraisal and Coping},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20General%20Model%20of%20Emotional%20Appraisal%20and%20Coping.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations},
address = {Palo Alto, CA},
abstract = {Introduction: In our research, we have developed a general computational model of human emotion. The model attempts to account for both the factors that give rise to emotions as well as the wide-ranging impact emotions have on cognitive and behavioral responses. Emotions influence our beliefs, our decision-making and how we adapt our behavior to the world around us. While most apparent in moments of great stress, emotions sway even the mundane decisions we face in everyday life. Emotions also infuse our social relationships. Our interactions with each other are a source of many emotions and we have developed a range of behaviors that can communicate emotional information as well as an ability to recognize and be influenced by the emotional arousal of others. By virtue of their central role and wide influence, emotion arguably provides the means to coordinate the diverse mental and physical components required to respond to the world in a coherent fashion. (1st Paragraph)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
A Utility-Based Approach to Intention Recognition Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_utility-based_2004,
title = {A Utility-Based Approach to Intention Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Utility-Based%20Approach%20to%20Intention%20Recognition.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Based on the assumption that a rational agent will adopt a plan that maximizes the expected utility, we present a utility-based approach to plan recognition problem in this paper. The approach explicitly takes the observed agent's preferences into consideration, and computes the estimated expected utilities of plans to disambiguate competing hypotheses. Online plan recognition is realized by incrementally using plan knowledge and observations to change state probabilities. We also discuss the work and compare it with other probabilistic models in the paper.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
Expressive Behaviors for Virtual Worlds Book Section
In: Life-Like Characters: Tools, Affective Functions, and Applications, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{marsella_expressive_2004,
title = {Expressive Behaviors for Virtual Worlds},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Expressive%20Behaviors%20for%20Virtual%20Worlds.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {Life-Like Characters: Tools, Affective Functions, and Applications},
abstract = {A person's behavior provides signi⬚cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a wide range of cognitive and motor capabilities, a model of task-oriented emotional appraisal and socially situated planning, and a model of how emotions and coping impact physical behavior. We describe the key research issues and approach in each of these prior systems, as well as our integration and its initial implementation in a leadership training system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Debevec, Paul; Gardner, Andrew; Tchou, Chris; Hawkins, Tim
Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 05.2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_postproduction_2004,
title = {Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting},
author = {Paul Debevec and Andrew Gardner and Chris Tchou and Tim Hawkins},
url = {http://ict.usc.edu/pubs/Postproduction%20Re-Illumination%20of%20Live%20Action%20Using%20Time-Multiplexed%20Lighting.pdf},
year = {2004},
date = {2004-06-01},
number = {ICT TR 05.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this work, we present a technique for capturing a time-varying human performance in such a way that it can be re-illuminated in postproduction. The key idea is to illuminate the subject with a variety of rapidly changing time-multiplexed basis lighting conditions, and to record these lighting conditions with a fast enough video camera so that several or many different basis lighting conditions are recorded during the span of the final video's desired frame rate. In this poster we present two versions of such a system and propose plans for creating a complete, production-ready device.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Tough Love Between Artificial Intelligence and Interactive Entertainment Proceedings Article
In: Proceedings of IE2004: Australian Workshop on Interactive Entertainment, Sydney, Australia, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_tough_2004,
title = {Tough Love Between Artificial Intelligence and Interactive Entertainment},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Tough%20Love%20Between%20Artificial%20Intelligence%20and%20Interactive%20Entertainment.PDF},
year = {2004},
date = {2004-02-01},
booktitle = {Proceedings of IE2004: Australian Workshop on Interactive Entertainment},
address = {Sydney, Australia},
abstract = {Burgeoning interest in Interactive Entertainment has led many computer scientists with roots in Artificial Intelligence toward the exploration of ideas in mass-market entertainment applications. Increasing numbers of workshops, journals, and funding programs for Interactive Entertainment indicate that AI researchers in this area have a good sense for following hot new trends, but are they vanguards of a fruitful science or misguided opportunists? In this IE2004 invited talk, I'll explore the relationship between AI research and the Interactive Entertainment field, from its seductive courtship through its rocky marriage, and offer some relationship advice for the future.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Hyeok-Soo; Gratch, Jonathan
A Planner-Independent Collaborative Planning Assistant Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 766–773, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_planner-independent_2004,
title = {A Planner-Independent Collaborative Planning Assistant},
author = {Hyeok-Soo Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Planner-Independent%20Collaborative%20Planning%20Assistant.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {766–773},
address = {New York, NY},
abstract = {This article introduces a novel approach to the problem of collaborative planning. We present a method that takes classical one-shot planning techniques - that take a fixed set of goals, initial state, and a domain theory - and adapts them to support the incremental, hierarchical and exploratory nature of collaborative planning that occurs between human planners, and that multi-agent planning systems attempt to support. This approach is planner-independent - in that it could be applied to any classical planning technique - and recasts the problem of collaborative planning as a search through a space of possible inputs to a classical planning system. This article outlines the technique and describes its application to the Mission Rehearsal Exercise, a multi-agent training system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Formalizations of Commonsense Psychology Journal Article
In: AI Magazine, vol. 24, no. 5, pp. 49–62, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_formalizations_2004,
title = {Formalizations of Commonsense Psychology},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Formalizations%20of%20Commonsense%20Psychology.pdf},
year = {2004},
date = {2004-01-01},
journal = {AI Magazine},
volume = {24},
number = {5},
pages = {49–62},
abstract = {The central challenge in commonsense knowledge representation research is to develop content theories that achieve a high degree of both competency and coverage. We describe a new methodology for constructing formal theories in commonsense knowledge domains that complements traditional knowledge representation approaches by first addressing issues of coverage. We show how a close examination of a very general task (strategic planning) leads to a catalog of the concepts and facts that must be encoded for general commonsense reasoning. These concepts are sorted into a manageable number of coherent domains, one of which is the representational area of commonsense human memory. We then elaborate on these concepts using textual corpus-analysis techniques, where the conceptual distinctions made in natural language are used to improve the definitions of the concepts that should be expressible in our formal theories. These representational areas are then analyzed using more traditional knowledge representation techniques, as demonstrated in this article by our treatment of commonsense human memory.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Technical Details of a Domain-independent Framework for Modeling Emotion Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 04.2004, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{gratch_technical_2004,
title = {Technical Details of a Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Technical%20Details%20of%20a%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 04.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This technical report elaborates on the technical details of the EMA model of emotional appraisal and coping. It should be seen as an appendix to the journal article on this topic (Gratch & Marsella, to appear)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Mao, Wenji; Gratch, Jonathan
Decision-Theoretic Approach to Plan Recognition Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2004, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_decision-theoretic_2004,
title = {Decision-Theoretic Approach to Plan Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Decision-Theoretic%20Approach%20to%20Plan%20Recognition.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 01.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this report, first we give a survey of the work in plan recognition field, including the evolution of different approaches, their strength and weaknesses. Then we propose two decision-theoretic approaches to plan recognition problem, which explicitly take outcome utilities into consideration. One is an extension within the probabilistic reasoning framework, by adding utility nodes to belief nets. The other is based on maximizing the estimated expected utility of possible plan. Illustrative examples are given to explain the approaches. Finally, we compare the two approaches presented in the report and summarize the work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David
Issues in Multiparty Dialogues Journal Article
In: Advances in Agent Communication, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_issues_2004,
title = {Issues in Multiparty Dialogues},
author = {David Traum},
editor = {F. Dignum},
url = {http://ict.usc.edu/pubs/Issues%20in%20Multiparty%20Dialogues.pdf},
year = {2004},
date = {2004-01-01},
journal = {Advances in Agent Communication},
abstract = {This article examines some of the issues in representation of, processing, and automated agent participation in natural language dialgue, considering expansion from two-party dialogue to multi-party dialogue. These issues include some regarding the roles agents play in dialogue, interactive factors, and content management factors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Social Judgment in Multiagent Interactions Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 210–217, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2004,
title = {Social Judgment in Multiagent Interactions},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Judgment%20in%20Multiagent%20Interactions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {210–217},
address = {New York, NY},
abstract = {Social judgment is a process of social explanation whereby one evaluates which entities deserve credit or blame for multi-agent activities. Such explanations are a key aspect of inference in a social environment and a model of this process can advance several design components of multi-agent systems. Social judgment underlies social planning, social learning, natural language pragmatics and computational model of emotion. Based on psychological attribution theory, this paper presents a computational approach to forming social judgment based on an agents causal knowledge and communicative interactions with other agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cao, Yong; Faloutsos, Petros; Kohler, Eddie; Pighin, Frédéric
Real-time Speech Motion Synthesis from Recorded Motions Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_real-time_2004,
title = {Real-time Speech Motion Synthesis from Recorded Motions},
author = {Yong Cao and Petros Faloutsos and Eddie Kohler and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Real-time%20Speech%20Motion%20Synthesis%20from%20Recorded%20Motions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Data-driven approaches have been successfully used for realistic visual speech synthesis. However, little effort has been devoted to real-time lip-synching for interactive applications. In particular, algorithms that are based on a graph of motions are notorious for their exponential complexity. In this paper, we present a greedy graph search algorithm that yields vastly superior performance and allows real-time motion synthesis from a large database of motions. The time complexity of the algorithm is linear with respect to the size of an input utterance. In our experiments, the synthesis time for an input sentence of average length is under a second.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Larson, Peter; Kratz, Kris; Thiebaux, Marcus; Bluestein, Brendon; Buckwalter, John Galen; Rizzo, Albert
Sex differences in mental rotation and spatial rotation in a virtual environment Journal Article
In: Neuropsychologia, vol. 42, pp. 555–562, 2004.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_sex_2004,
title = {Sex differences in mental rotation and spatial rotation in a virtual environment},
author = {Thomas D. Parsons and Peter Larson and Kris Kratz and Marcus Thiebaux and Brendon Bluestein and John Galen Buckwalter and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Sex%20differences%20in%20mental%20rotation%20and%20spatial%20rotation%20in%20a%20virtual%20environment.pdf},
doi = {10.1016/j.neuropsychologia.2003.08.014},
year = {2004},
date = {2004-01-01},
journal = {Neuropsychologia},
volume = {42},
pages = {555–562},
abstract = {The visuospatial ability referred to as mental rotation has been shown to produce one of the largest and most consistent sex differences, in favor of males, in the cognitive literature. The current study utilizes both a paper-and-pencil version of the mental rotations test (MRT) and a virtual environment for investigating rotational ability among 44 adult subjects. Results replicate sex differences traditionally seen on paper-and-pencil measures, while no sex effects were observed in the virtual environment. These ï¬ndings are discussed in terms of task demands and motor involvement. Sex differences were also seen in the patterns of correlations between rotation tasks and other neuropsychological measures. Current results suggest men may rely more on left hemisphere processing than women when engaged in rotational tasks. © 2003 Elsevier Ltd. All rights reserved.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert; Buckwalter, John Galen
Backpropagation and Regression: Comparative Utility for Neuropsychologists Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 26, no. 1, pp. 95–104, 2004.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_backpropagation_2004,
title = {Backpropagation and Regression: Comparative Utility for Neuropsychologists},
author = {Thomas D. Parsons and Albert Rizzo and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Backpropagation%20and%20Regression-%20Comparative%20Utility%20for%20Neuropsychologists.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {26},
number = {1},
pages = {95–104},
abstract = {The aim of this research was to compare the data analytic applicability of a backpropagated neural network with that of regression analysis. Thirty individuals between the ages of 64 and 86 (Mean age = 73.6; Mean years education = 15.4; % women = 50) participated in a study designed to validate a new test of spatial ability administered in virtual reality. As part of this project a standard neuropsychological battery was administered. Results from the multiple regression model (R2 = .21, p textbackslashtextbackslashtextbackslashtextbackslashtextless .28; Standard Error = 18.01) were compared with those of a backpropagated ANN (R2 = .39, p textbackslashtextbackslashtextbackslashtextbackslashtextless .02; Standard Error = 13.07). This 18% increase in prediction of a common neuropsychological problem demonstrated that an ANN has the potential to outperform a regression.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
A Domain-independent Framework for Modeling Emotion Journal Article
In: Journal of Cognitive Systems Research, vol. 5, no. 4, pp. 269–306, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_domain-independent_2004,
title = {A Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/A%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Cognitive Systems Research},
volume = {5},
number = {4},
pages = {269–306},
abstract = {In this article, we show how psychological theories of emotion shed light on the interaction between emotion and cognition, and thus can inform the design of human-like autonomous agents that must convey these core aspects of human behavior. We lay out a general computational framework of appraisal and coping as a central organizing principle for such systems. We then discuss a detailed domain-independent model based on this framework, illustrating how it has been applied to the problem of generating behavior for a significant social training application. The model is useful not only for deriving emotional state, but also for informing a number of the behaviors that must be modeled by virtual humans such as facial expressions, dialogue management, planning, reacting, and social understanding. Thus, the work is of potential interest to models of strategic decision-making, action selection, facial animation, and social intelligence.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hawkins, Tim; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Goransson, Fredrik; Debevec, Paul
Animatable Facial Reflectance Fields Proceedings Article
In: Eurographics Symposium on Rendering, Norkoping, Sweden, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_animatable_2004,
title = {Animatable Facial Reflectance Fields},
author = {Tim Hawkins and Andreas Wenger and Chris Tchou and Andrew Gardner and Fredrik Goransson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Animatable%20Facial%20Re%EF%AC%82ectance%20Fields.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Eurographics Symposium on Rendering},
address = {Norkoping, Sweden},
abstract = {We present a technique for creating an animatable image-based appearance model of a human face, able to capture appearance variation over changing facial expression, head pose, view direction, and lighting condition. Our capture process makes use of a specialized lighting apparatus designed to rapidly illuminate the subject sequentially from many different directions in just a few seconds. For each pose, the subject remains still while six video cameras capture their appearance under each of the directions of lighting. We repeat this process for approximately 60 different poses, capturing different expressions, visemes, head poses, and eye positions. The images for each of the poses and camera views are registered to each other semi-automatically with the help of fiducial markers. The result is a model which can be rendered realistically under any linear blend of the captured poses and under any desired lighting condition by warping, scaling, and blending data from the original images. Finally, we show how to drive the model with performance capture data, where the pose is not necessarily a linear combination of the original captured poses.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Strategy Representation: An Analysis of Planning Knowledge Book
Lawrence Erlbaum Associates/Psychology Press, Mahwah, NJ, 2004, ISBN: 0-8058-4527-5.
Abstract | Links | BibTeX | Tags: The Narrative Group
@book{gordon_strategy_2004,
title = {Strategy Representation: An Analysis of Planning Knowledge},
author = {Andrew S. Gordon},
url = {http://people.ict.usc.edu/ gordon/sr.html},
isbn = {0-8058-4527-5},
year = {2004},
date = {2004-01-01},
publisher = {Lawrence Erlbaum Associates/Psychology Press},
address = {Mahwah, NJ},
abstract = {Strategy Representation: An Analysis of Planning Knowledge describes an innovative methodology for investigating the conceptual structures that underlie human reasoning. This work explores the nature of planning strategies-the abstract patterns of planning behavior that people recognize across a broad range of real world situations. With a sense of scale that is rarely seen in the cognitive sciences, this book catalogs 372 strategies across 10 different planning domains: business practices, education, object counting, Machiavellian politics, warfare, scientific discovery, personal relationships, musical performance, and the anthropomorphic strategies of animal behavior and cellular immunology. Noting that strategies often serve as the basis for analogies that people draw across planning situations, this work attempts to explain these analogies by defining the fundamental concepts that are common across all instances of each strategy. By aggregating evidence from each of the strategy definitions provided, the representational requirements of strategic planning are identified. The important finding is that the concepts that underlie strategic reasoning are of incredibly broad scope. Nearly 1,000 fundamental concepts are identified, covering every existing area of knowledge representation research and many areas that have not yet been adequately formalized, particularly those related to common sense understanding of mental states and processes. An organization of these concepts into 48 fundamental areas of knowledge and representation is provided, offering an invaluable roadmap for progress within the field.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {book}
}
Gordon, Andrew S.; Lent, Michael; Velson, Martin; Carpenter, Paul; Jhala, Arnav
Branching Storylines in Virtual Reality Environments for Leadership Development Proceedings Article
In: Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04), pp. 844–851, AAAI Press, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_branching_2004,
title = {Branching Storylines in Virtual Reality Environments for Leadership Development},
author = {Andrew S. Gordon and Michael Lent and Martin Velson and Paul Carpenter and Arnav Jhala},
url = {http://ict.usc.edu/pubs/Branching%20Storylines%20in%20Virtual%20Reality%20Environments%20for%20Leadership%20Development.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of the 16th Innovative Applications of Artificial Intelligence Conference (IAAI-04)},
pages = {844–851},
publisher = {AAAI Press},
address = {San Jose, CA},
abstract = {Simulation-based training is increasingly being used within the military to practice and develop the skills of successful soldiers. For the skills associated with successful military leadership, our inability to model human behavior to the necessary degree of fidelity in constructive simulations requires that new interactive designs be developed. The ICT Leaders project supports leadership development through the use of branching storylines realized within a virtual reality environment. Trainees assume a role in a fictional scenario, where the decisions that they make in this environment ultimately affect the success of a mission. All trainee decisions are made in the context of natural language conversations with virtual characters. The ICT Leaders project advances a new form of interactive training by incorporating a suite of Artificial Intelligence technologies, including control architectures, agents of mixed autonomy, and natural language processing algorithms.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.
Everything in perspective Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 03.2004, 2004.
@techreport{muller_everything_2004,
title = {Everything in perspective},
author = {T. J. Muller},
url = {http://ict.usc.edu/pubs/Everything%20in%20perspective.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 03.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Huang, Hesu; Kyriakakis, Chris
Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation Proceedings Article
In: Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers, pp. 259–262, Pacific Grove, CA, 2004, ISBN: 0-7803-8622-1.
Abstract | Links | BibTeX | Tags:
@inproceedings{huang_real-valued_2004,
title = {Real-valued Delayless Subband Affine Projection Algorithm for Acoustic Echo Cancellation},
author = {Hesu Huang and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Real-valued%20Delayless%20Subband%20Affine%20Projection%20Algorithm%20for%20Acoustic%20Echo%20Cancellation.pdf},
doi = {10.1109/ACSSC.2004.1399131},
isbn = {0-7803-8622-1},
year = {2004},
date = {2004-01-01},
booktitle = {Conference Record of the Thirty-Eighth Asilomar Conference on Signals, Systems and Computers},
volume = {1},
pages = {259–262},
address = {Pacific Grove, CA},
abstract = {Acoustic echo cancellation (AEC) often involves adaptive filters with large numbers of taps, which results in poor performance in real-time applications. The utilization of delayless subband adaptive filter (DSAF) helps reduce computations and improve the overall performance. However, conventional oversampled subband adaptive filters mainly use DFT or GDFT based analysts/synthesis filter banks and generate "complex-valued" subband signals. This is particularly inefficient when applying the affine projection algorithm (APA), a popular adaptive algorithm for AEC problem, to each subband. For APA implementation, real-valued signals show higher efficiency than complex signals. In this paper, we present a real-valued delayless subband APA and study both its computational complexity and performance on AEC problems. Compared to the complex valued approach, our method achieves a better performance with lower computational cost.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Tchou, Chris; Gardner, Andrew; Hawkins, Tim; Poullis, Charis; Stumpfel, Jessi; Jones, Andrew; Yun, Nathaniel; Einarsson, Per; Lundgren, Therese; Fajardo, Marcos; Martinez, Philippe
Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 06 2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_estimating_2004,
title = {Estimating Surface Reflectance Properties of a Complex Scene under Captured Natural Illumination},
author = {Paul Debevec and Chris Tchou and Andrew Gardner and Tim Hawkins and Charis Poullis and Jessi Stumpfel and Andrew Jones and Nathaniel Yun and Per Einarsson and Therese Lundgren and Marcos Fajardo and Philippe Martinez},
url = {http://ict.usc.edu/pubs/ICT-TR-06.2004.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 06 2004},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a process for estimating spatially-varying surface re- flectance of a complex scene observed under natural illumination conditions. The process uses a laser-scanned model of the scene's geometry, a set of digital images viewing the scene's surfaces under a variety of natural illumination conditions, and a set of correspond- ing measurements of the scene's incident illumination in each pho- tograph. The process then employs an iterative inverse global illu- mination technique to compute surface colors for the scene which, when rendered under the recorded illumination conditions, best re- produce the scene's appearance in the photographs. In our process we measure BRDFs of representative surfaces in the scene to better model the non-Lambertian surface reflectance. Our process uses a novel lighting measurement apparatus to record the full dynamic range of both sunlit and cloudy natural illumination conditions. We employ Monte-Carlo global illumination, multiresolution geome- try, and a texture atlas system to perform inverse global illumina- tion on the scene. The result is a lighting-independent model of the scene that can be re-illuminated under any form of lighting. We demonstrate the process on a real-world archaeological site, show- ing that the technique can produce novel illumination renderings consistent with real photographs as well as reflectance properties that are consistent with ground-truth reflectance measurements.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David; Marsella, Stacy C.; Gratch, Jonathan
Emotion and Dialogue in the MRE Virtual Humans Proceedings Article
In: Lecture Notes in Computer Science, pp. 117–127, Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_emotion_2004,
title = {Emotion and Dialogue in the MRE Virtual Humans},
author = {David Traum and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emotion%20and%20Dialogue%20in%20the%20MRE%20Virtual%20Humans.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Lecture Notes in Computer Science},
volume = {3068},
pages = {117–127},
address = {Kloster Irsee, Germany},
abstract = {We describe the emotion and dialogue aspects of the virtual agents used in the MRE project at USC. The models of emotion and dialogue started independently, though each makes crucial use of a central task model. In this paper we describe the task model, dialogue model, and emotion model, and the interactions between them.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Gandhe, Sudeep; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Wang, Dagen
The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews Proceedings Article
In: Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{narayanan_transonics_2004,
title = {The Transonics Spoken Dialogue Translator: An aid for English-Persian Doctor-Patient interviews},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and Sudeep Gandhe and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and Dagen Wang},
url = {http://ict.usc.edu/pubs/The%20Transonics%20Spoken%20Dialogue%20Translator-%20An%20aid%20for%20English-Persian%20Doctor-Patient%20interviews.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Working Notes of the AAAI Fall Symposium on Dialogue Systems for Health Communication},
abstract = {In this paper we describe our spoken english-persian medical dialogue translation system. We describe the data collection effort and give an overview of the component technologies, including speech recognition, translation, dialogue management, and user interface design. The individual modules and system are designed for flexibility, and to be able to leverage different amounts of available resources to maximize the ability for communication between medical care-giver and patient.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Thompson, E.; Buckwalter, John Galen; Bluestein, Brendon
Pregnancy History and Cognition During and After Pregnancy Journal Article
In: International Journal of Neuroscience, vol. 114, pp. 1099–1110, 2004, ISSN: 0020-7454.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_pregnancy_2004,
title = {Pregnancy History and Cognition During and After Pregnancy},
author = {Thomas D. Parsons and E. Thompson and John Galen Buckwalter and Brendon Bluestein},
url = {http://ict.usc.edu/pubs/Pregnancy%20History%20and%20Cognition%20During%20and%20After%20Pregnancy.pdf},
doi = {10.1080/00207450490475544},
issn = {0020-7454},
year = {2004},
date = {2004-01-01},
journal = {International Journal of Neuroscience},
volume = {114},
pages = {1099–1110},
abstract = {An increasing body of literature confirms anecdotal reports that cognitive changes occur during pregnancy. This article assessed whether prior pregnancy, which alters a woman's subsequent hormonal environment, is associated with a specific cognitive profile during and after pregnancy. Seven primigravids and nine multigravids were compared, equivalent for age and education. No differences between groups were found during pregnancy. After delivery, multigravids performed better than primigravids on verbal memory tasks. After controlling for mood, a significant difference in verbal memory remained. A neuroadaptive mechanism may develop after first pregnancy that increases the ability to recover from some cognitive deficits after later pregnancies.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Muller, T. J.
Interaction on Emotions Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 02.2004, 2004.
Abstract | Links | BibTeX | Tags:
@techreport{hartholt_interaction_2004,
title = {Interaction on Emotions},
author = {Arno Hartholt and T. J. Muller},
url = {http://ict.usc.edu/pubs/Interaction%20on%20emotions.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 02.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the addition of an emotion dialogue to the Mission Rehearsal Exercise (MRE) system. The goal of the MRE system is to provide an immersive learning environment for army officer recruits. The user can engage in conversation with several intelligent agents in order to accomplish the goals within a certain scenario. Although these agents did already posses emotions, they were unable to express them verbally. A question - answer dialogue has been implemented to this purpose. The implementation makes use of proposition states for modelling knowledge, keyword scanning for natural language understanding and templates for natural language generation. The system is implemented using Soar and TCL. An agent can understand emotion related questions in four different domains, type, intensity, state, and the combination of responsible-agent and blameworthiness. Some limitations arise due to the techniques used and to the relative short time frame in which the assignment was to be executed. Main issues are that the existing natural language understanding and generation modules could not be fully used, that very little context about the conversation is available and that the emotion states simplify the emotional state of an agent. These limitations and other thoughts give rise to the following recommendations for further work: * Make full use of references. * Use coping strategies for generating agent's utterances. * Use focus mechanisms for generating agent's utterances. * Extend known utterances. * Use NLU and NLG module. * Use emotion dialogue and states to influence emotions. * Fix known bugs.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gordon, Andrew S.
The Representation of Planning Strategies Journal Article
In: Artificial Intelligence, vol. 153, pp. 287–305, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_representation_2004,
title = {The Representation of Planning Strategies},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Representation%20of%20Planning%20Strategies.PDF},
year = {2004},
date = {2004-01-01},
journal = {Artificial Intelligence},
volume = {153},
pages = {287–305},
abstract = {An analysis of strategies, recognizable abstract patterns of planned behavior, highlights the difference between the assumptions that people make about their own planning processes and the representational commitments made in current automated planning systems. This article describes a project to collect and represent strategies on a large scale to identify the representational components of our commonsense understanding of intentional action. Three hundred and seventy-two strategies were collected from ten different planning domains. Each was represented in a pre-formal manner designed to reveal the assumptions that these strategies make concerning the human planning process. The contents of these representations, consisting of nearly one thousand unique concepts, were then collected and organized into forty-eight groups that outline the representational requirements of strategic planning systems.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
2003
Narayanan, Shrikanth; Ananthakrishnan, S.; Belvin, R.; Ettaile, E.; Ganjavi, S.; Georgiou, Panayiotis G.; Hein, C. M.; Kadambe, S.; Knight, K.; Marcu, D.; Neely, H. E.; Srinivasamurthy, Naveen; Traum, David; Wang, D.
Transonics: A Speech to Speech System for English-Persian Interactions Proceedings Article
In: Proceedings of Automatic Speech Recognition and Understanding Workshop, U.S. Virgin Islands, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{narayanan_transonics_2003,
title = {Transonics: A Speech to Speech System for English-Persian Interactions},
author = {Shrikanth Narayanan and S. Ananthakrishnan and R. Belvin and E. Ettaile and S. Ganjavi and Panayiotis G. Georgiou and C. M. Hein and S. Kadambe and K. Knight and D. Marcu and H. E. Neely and Naveen Srinivasamurthy and David Traum and D. Wang},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2003},
date = {2003-12-01},
booktitle = {Proceedings of Automatic Speech Recognition and Understanding Workshop},
address = {U.S. Virgin Islands},
abstract = {In this paper we describe the ï¬rst phase of development of our speech-to-speech system between English and Modern Persian under the DARPA Babylon program. We give an overview of the various system components: the front end ASR, the machine translation system and the speech generation system. Challenges such as the sparseness of available spoken language data and solutions that have been employed to maximize the obtained beneï¬ts from using these limited resources are examined. Efforts in the creation of the user interface and the underlying dialog management system for mediated communication are described.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
Image-Based Techniques for Digitizing Environments and Artifacts Proceedings Article
In: 4th International Conference on 3-D Digital Imaging and Modeling (3DIM), 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_image-based_2003,
title = {Image-Based Techniques for Digitizing Environments and Artifacts},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Techniques%20for%20Digitizing%20Environments%20and%20Artifacts.pdf},
year = {2003},
date = {2003-10-01},
booktitle = {4th International Conference on 3-D Digital Imaging and Modeling (3DIM)},
abstract = {This paper presents an overview of techniques for generating photoreal computer graphics models of real-world places and objects. Our group's early efforts in modeling scenes involved the development of Facade, an interactive photogrammetric modeling system that uses geometric primitives to model the scene, and projective texture mapping to produce the scene appearance properties. Subsequent work has produced techniques to model the incident illumination within scenes, which we have shown to be useful for realistically adding computer-generated objects to image-based models. More recently, our work has focussed on recovering lighting-independent models of scenes and objects, capturing how each point on an object reflects light. Our latest work combines three-dimensional range scans, digital photographs, and incident illumination measurements to produce lighting-independent models of complex objects and environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Robinson, Susan; Garg, Saurabh
Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio Proceedings Article
In: Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue), Saarbruecken Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_functions_2003,
title = {Functions and Patterns of Speaker and Addressee Identifications in Distributed Complex Organizational Tasks Over Radio},
author = {Bilyana Martinovski and David Traum and Susan Robinson and Saurabh Garg},
url = {http://ict.usc.edu/pubs/Functions%20and%20Patterns%20of%20Speaker%20and%20Addressee%20Identifications%20in%20Distributed%20Complex%20Organizational%20Tasks%20Over%20Radio.pdf},
year = {2003},
date = {2003-09-01},
booktitle = {Proceedings of Diabruck (7th Workshop on the Semantics and Pragmatics of Dialogue)},
address = {Saarbruecken Germany},
abstract = {In multiparty dialogue speakers must identify who they are addressing (at least to the addressee, and perhaps to overhearers as well). In non face-toface situations, even the speaker's identity can be unclear. For talk within organizational teams working on critical tasks, such miscommunication must be avoided, and so organizational conventions have been adopted to signal addressee and speaker, (e.g., military radio communications). However, explicit guidelines, such as provided by the military are not always exactly followed (see also (Churcher et al., 1996)). Moreover, even simple actions like identiï¬cations of speaker and hearer can be performed in a variety of ways, for a variety of purposes. The purpose of this paper is to contribute to the understanding and predictability of identiï¬cations of speaker and addressee in radio mediated organization of work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Itti, Laurent; Dhavale, Nitin; Pighin, Frédéric
Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention Proceedings Article
In: Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology, San Diego, CA, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{itti_realistic_2003,
title = {Realistic Avatar Eye and Head Animation Using a Neurobiological Model of Visual Attention},
author = {Laurent Itti and Nitin Dhavale and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Realistic%20Avatar%20Eye%20and%20Head%20Animation%20Using%20a%20Neurobiological%20Model%20of%20Visual%20Attention.pdf},
doi = {10.1117/12.512618},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of SPIE 48th Annual International Symposium on Optical Science and Technology},
address = {San Diego, CA},
abstract = {We describe a neurobiological model of visual attention and eye/head movements in primates, and its application to the automatic animation of a realistic virtual human head watching an unconstrained variety of visual inputs. The bottom-up (image-based) attention model is based on the known neurophysiology of visual processing along the occipito-parietal pathway of the primate brain, while the eye/head movement model is derived from recordings in freely behaving Rhesus monkeys. The system is successful at autonomously saccading towards and tracking salient targets in a variety of video clips, including synthetic stimuli, real outdoors scenes and gaming console outputs. The resulting virtual human eye/head animation yields realistic rendering of the simulation results, both suggesting applicability of this approach to avatar animation and reinforcing the plausibility of the neural model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hill, Randall W.; Douglas, Jay; Gordon, Andrew S.; Pighin, Frédéric; Velson, Martin
Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters Proceedings Article
In: Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference, Acapulco, Mexico, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_guided_2003,
title = {Guided Conversations about Leadership: Mentoring with Movies and Interactive Characters},
author = {Randall W. Hill and Jay Douglas and Andrew S. Gordon and Frédéric Pighin and Martin Velson},
url = {http://ict.usc.edu/pubs/Guided%20Conversations%20about%20Leadership-%20Mentoring%20with%20Movies%20and%20Interactive%20Characters.pdf},
year = {2003},
date = {2003-08-01},
booktitle = {Proceedings of the 15th Innovative Applications of Artificial Intelligence Conference},
address = {Acapulco, Mexico},
abstract = {Think Like a Commander - Excellence in Leadership (TLAC-XL) is an application designed for learning leadership skills both from the experiences of others and through a structured dialogue about issues raised in a vignette. The participant watches a movie, interacts with a synthetic mentor and interviews characters in the story. The goal is to enable leaders to learn the human dimensions of leadership, addressing a gap in the training tools currently available to the U.S. Army. The TLAC-XL application employs a number of Artificial Intelligence technologies, including the use of a coordination architecture, a machine learning approach to natural language processing, and an algorithm for the automated animation of rendered human faces.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Kazemzadeh, Abe; Nair, Anish; Petrova, Milena
Recognizing Expressions of Commonsense Psychology in English Text Proceedings Article
In: Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL), Sapporo, Japan, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_recognizing_2003,
title = {Recognizing Expressions of Commonsense Psychology in English Text},
author = {Andrew S. Gordon and Abe Kazemzadeh and Anish Nair and Milena Petrova},
url = {http://ict.usc.edu/pubs/Recognizing%20Expressions%20of%20Commonsense%20Psychology%20in%20English%20Text.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 41st Annual Meeting of the Association for Computational Linguistics (ACL)},
address = {Sapporo, Japan},
abstract = {Many applications of natural language processing technologies involve analyzing texts that concern the psychological states and processes of people, including their beliefs, goals, predictions, explanations, and plans. In this paper, we describe our efforts to create a robust, large-scale lexical-semantic resource for the recognition and classification of expressions of commonsense psychology in English Text. We achieve high levels of precision and recall by hand-authoring sets of local grammars for commonsense psychology concepts, and show that this approach can achieve classification performance greater than that obtained by using machine learning techniques. We demonstrate the utility of this resource for large-scale corpus analysis by identifying references to adversarial and competitive goal in political speeches throughout U.S. history.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Nair, Anish
Literary Evidence for the Cultural Development of a Theory of Mind Proceedings Article
In: Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci), Boston, MA, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_literary_2003,
title = {Literary Evidence for the Cultural Development of a Theory of Mind},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Literary%20Evidence%20for%20the%20Cultural%20Development%20of%20a%20Theory%20of%20Mind.PDF},
year = {2003},
date = {2003-07-01},
booktitle = {Proceedings of the 25th Annual Meeting of the Cognitive Science Society (CogSci)},
address = {Boston, MA},
abstract = {The term Theory of Mind is used within the cognitive sciences to refer to the abilities that people have to reason about their own mental states and the mental states of others. An important question is whether these abilities are culturally acquired or innate to our species. This paper outlines the argument that the mental models that serve as the basis for Theory of Mind abilities are the product of cultural development. To support this thesis, we present evidence gathered from the large-scale automated analysis of text corpora. We show that the Freudian conception of a subconscious desire is a relatively modern addition to our culturally shared Theory of Mind, as evidenced by a shift in the way these ideas appeared in 19th and 20th century English language novels.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Hill, Randall W.; McAlinden, Ryan; Brobst, Paul
2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards Technical Report
no. AFRL-HE-WP-TP-2007-0008, 2003.
Abstract | Links | BibTeX | Tags:
@techreport{van_lent_2002_2003,
title = {2002 Defense Modeling and Simulation Office (DMSO) Laboratory for Human Behavior Model Interchange Standards},
author = {Michael Lent and Randall W. Hill and Ryan McAlinden and Paul Brobst},
url = {http://ict.usc.edu/pubs/2002%20Defense%20Modeling%20and%20Simulation%20Office%20(DMSO)%20Laboratory%20for%20Human%20Behavior%20Model%20Interchange%20Standards.pdf},
year = {2003},
date = {2003-07-01},
number = {AFRL-HE-WP-TP-2007-0008},
abstract = {This report describes the effort to address the following research objective: "To begin to define, prototype, and demonstrate an interchange standard among Human Behavior Modeling (HEM) -related models in the Department of Defense (DoD), Industry, Academia, and other Government simulations by establishing a Laboratory for the Study of Human Behavior Representation Interchange Standard." With experience, expertise, and technologies of the commercial computer game industry, the academic research community, and DoD simulation developers, the Institute for Creative Technologies discusses their design and implementation for a prototype HBM interface standard and also describes their demonstration of that standard in a game-based simulation environment that combines HBM models from the entertainment industry and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Gratch, Jonathan; Marsella, Stacy C.
Fight the Way You Train:The Role and Limits of Emotions in Training for Combat Journal Article
In: Brown Journal of World Affairs, vol. X, pp. 63–76, 2003.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_fight_2003,
title = {Fight the Way You Train:The Role and Limits of Emotions in Training for Combat},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Fight%20the%20Way%20You%20Train-The%20Role%20and%20Limits%20of%20Emotions%20in%20Training%20for%20Combat.pdf},
year = {2003},
date = {2003-06-01},
journal = {Brown Journal of World Affairs},
volume = {X},
pages = {63–76},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Gratch, Jonathan; Marsella, Stacy C.; Swartout, William; Traum, David
Virtual Humans in the Mission Rehearsal Exercise System Proceedings Article
In: Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents), 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{hill_virtual_2003,
title = {Virtual Humans in the Mission Rehearsal Exercise System},
author = {Randall W. Hill and Jonathan Gratch and Stacy C. Marsella and William Swartout and David Traum},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20in%20the%20Mission%20Rehearsal%20Exercise%20System.pdf},
year = {2003},
date = {2003-06-01},
booktitle = {Kunstliche Intelligenzi (KI) (special issue on Embodied Conversational Agents)},
abstract = {How can simulation be made more compelling and effective as a tool for learning? This is the question that the Institute for Creative Technologies (ICT) set out to answer when it was formed at the University of Southern California in 1999, to serve as a nexus between the simulation and entertainment communities. The ultimate goal of the ICT is to create the Experience Learning System (ELS), which will advance the state of the art in virtual reality immersion through use of high-resolution graphics, immersive audio, virtual humans and story-based scenarios. Once fully realized, ELS will make it possible for participants to enter places in time and space where they can interact with believable characters capable of conversation and action, and where they can observe and participate in events that are accessible only through simulation.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Iuppa, Nicholas
Experience Management Using Storyline Adaptation Strategies Proceedings Article
In: Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment, Darmstadt, Germany, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_experience_2003,
title = {Experience Management Using Storyline Adaptation Strategies},
author = {Andrew S. Gordon and Nicholas Iuppa},
url = {http://ict.usc.edu/pubs/Experience%20Management%20Using%20Storyline%20Adaptation%20Strategies.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the First International Conference on Technologies for Digital Storytelling and Entertainment},
address = {Darmstadt, Germany},
abstract = {The central problem of creating interactive drama is structuring a media experience for participants such that a good story is presented while enabling a high degree of meaningful interactivity. This paper presents a new approach to interactive drama, where pre-authored storylines are made interactive by adapting them at run-time by applying strategies that react to unexpected user behavior. The approach, called Experience Management, relies heavily on the explication of a broad range of adaptation strategies and a means of selecting which strategy is most appropriate given a particular story context. We describe a formal approach to storyline representation to enable the selection of applicable strategies, and a strategy formalization that allows for storyline modification. Finally, we discuss the application of this approach in the context of a story-based training system for military leadership skills, and the direction for continuing research.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Coverage and Competency in Formal Theories: A Commonsense Theory of Memory Proceedings Article
In: Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning, Stanford University, 2003.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_coverage_2003,
title = {Coverage and Competency in Formal Theories: A Commonsense Theory of Memory},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Coverage%20and%20Competency%20in%20Formal%20Theories-%20A%20Commonsense%20Theory%20of%20Memory.PDF},
year = {2003},
date = {2003-03-01},
booktitle = {Proceedings of the 2003 AAAI Spring Symposium on Logical Formalizations of Commonsense Reasoning},
address = {Stanford University},
abstract = {The utility of formal theories of commonsense reasoning will depend both on their competency in solving problems and on their concemptual coverage. We argue that the problems of coverage and competency can be decoupled and solved with different methods for a given commonsense domain. We describe a methodology for identifying the coverage requirements of theories through the large-sclae analysis of planning strategies, with further refinements made by collecting and categorizing instances of natural language expressions pertaining to the domain. We demonstrate the effectiveness of this methodology in identifying the representational coverage requirements of theories of the commonsense psychology of human memory. We then apply traditional methods of formalization to produce a formal first-order theory of commonsense memory with a high degree of competency and coverage.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Fleischman, Michael; Hovy, Eduard
NL Generation for Virtual Humans in a Complex Social Environment Proceedings Article
In: AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue, pp. 151–158, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_nl_2003,
title = {NL Generation for Virtual Humans in a Complex Social Environment},
author = {David Traum and Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/NL%20Generation%20for%20Virtual%20Humans%20in%20a%20Complex%20Social%20Environment.pdf},
year = {2003},
date = {2003-03-01},
booktitle = {AAAI Spring Symposium on Natural Language Generation in Spoken and Written Dialogue},
pages = {151–158},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Semantics and Pragmatics of Questions and Answers for Dialogue Agents Proceedings Article
In: International Workshop on Computational Semantics, 2003.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_semantics_2003,
title = {Semantics and Pragmatics of Questions and Answers for Dialogue Agents},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Semantics%20and%20Pragmatics%20of%20Questions%20and%20Answers%20for%20Dialogue%20Agents.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Computational Semantics},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Uhrmacher, Adelinde; Swartout, William
Agent-Oriented Simulation Journal Article
In: Applied System Simulation, pp. 215–239, 2003.
Abstract | Links | BibTeX | Tags:
@article{uhrmacher_agent-oriented_2003,
title = {Agent-Oriented Simulation},
author = {Adelinde Uhrmacher and William Swartout},
url = {http://link.springer.com/chapter/10.1007/978-1-4419-9218-5_10},
year = {2003},
date = {2003-01-01},
journal = {Applied System Simulation},
pages = {215–239},
abstract = {Metaphors play a key role in computer science and engineering. Agents bring the notion of locality of information (as in object-oriented programming) together with locality of intent or purpose. The relation between multi-agent and simulation systems is multi-facetted. Simulation systems are used to evaluate software agents in virtual dynamic environments. Agents become part of the model design, if autonomous entities in general, and human or social actors in particular shall be modeled. A couple of research projects shall illuminate some of these facets.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Joshi, Pushkar; Tien, Wen C.; Desbrun, Mathieu; Pighin, Frédéric
Learning Controls for Blend Shape Based Realistic Facial Animation Proceedings Article
In: Breen, D.; Lin, M. (Ed.): Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{joshi_learning_2003,
title = {Learning Controls for Blend Shape Based Realistic Facial Animation},
author = {Pushkar Joshi and Wen C. Tien and Mathieu Desbrun and Frédéric Pighin},
editor = {D. Breen and M. Lin},
url = {http://ict.usc.edu/pubs/Learning%20Controls%20for%20Blend%20Shape%20Based%20Realistic%20Facial%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Blend shape animation is the method of choice for keyframe facial animation: a set of blend shapes (key facial expressions) are used to deï¬ne a linear space of facial expressions. However, in order to capture a signiï¬cant range of complexity of human expressions, blend shapes need to be segmented into smaller regions where key idiosyncracies of the face being animated are present. Performing this segmentation by hand requires skill and a lot of time. In this paper, we propose an automatic, physically-motivated segmentation that learns the controls and parameters directly from the set of blend shapes. We show the usefulness and efï¬ciency of this technique for both,motion-capture animation and keyframing. We also provide a rendering algorithm to enhance the visual realism of a blend shape model.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}