Publications
Search
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification Journal Article
In: Journal of Artificial General Intelligence, 2016, ISSN: 1946-0163.
@article{rosenbloom_sigma_2016,
title = {The Sigma Cognitive Architecture and System: Towards Functionally Elegant Grand Unification},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://www.degruyter.com/view/j/jagi.ahead-of-print/jagi-2016-0001/jagi-2016-0001.xml},
doi = {10.1515/jagi-2016-0001},
issn = {1946-0163},
year = {2016},
date = {2016-07-01},
journal = {Journal of Artificial General Intelligence},
abstract = {Sigma (Σ) is a cognitive architecture and system whose development is driven by a combination of four desiderata: grand unification, generic cognition, functional elegance, and sufficient efficiency. Work towards these desiderata is guided by the graphical architecture hypothesis, that key to progress on them is combining what has been learned from over three decades’ worth of separate work on cognitive architectures and graphical models. In this article, these four desiderata are motivated and explained, and then combined with the graphical architecture hypothesis to yield a rationale for the development of Sigma. The current state of the cognitive architecture is then introduced in detail, along with the graphical architecture that sits below it and implements it. Progress in extending Sigma beyond these architectures and towards a full cognitive system is then detailed in terms of both a systematic set of higher level cognitive idioms that have been developed and several virtual humans that are built from combinations of these idioms. Sigma as a whole is then analyzed in terms of how well the progress to date satisfies the desiderata. This article thus provides the first full motivation, presentation and analysis of Sigma, along with a diversity of more specific results that have been generated during its development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks Proceedings Article
In: International Conference on Artificial General Intelligence, pp. 84–94, Springer, New York, NY, 2016, ISBN: 978-3-319-41649-6.
@inproceedings{rosenbloom_rethinking_2016,
title = {Rethinking Sigma’s Graphical Architecture: An Extension to Neural Networks},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://link.springer.com/chapter/10.1007/978-3-319-41649-6_9},
doi = {10.1007/978-3-319-41649-6_9},
isbn = {978-3-319-41649-6},
year = {2016},
date = {2016-07-01},
booktitle = {International Conference on Artificial General Intelligence},
volume = {9782},
pages = {84–94},
publisher = {Springer},
address = {New York, NY},
abstract = {The status of Sigma’s grounding in graphical models is challenged by the ways in which their semantics has been violated while incorporating rule-based reasoning into them. This has led to a rethinking of what goes on in its graphical architecture, with results that include a straightforward extension to feedforward neural networks (although not yet with learning).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Gratch, Jonathan
Predictive Models of Malicious Behavior in Human Negotiations Journal Article
In: Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence, pp. 855–861, 2016.
@article{nazari_predictive_2016,
title = {Predictive Models of Malicious Behavior in Human Negotiations},
author = {Zahra Nazari and Jonathan Gratch},
url = {http://www.ijcai.org/Proceedings/16/Papers/126.pdf},
year = {2016},
date = {2016-07-01},
journal = {Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence},
pages = {855–861},
abstract = {Human and artificial negotiators must exchange information to find efficient negotiated agreements, but malicious actors could use deception to gain unfair advantage. The misrepresentation game is a game-theoretic formulation of how deceptive actors could gain disproportionate rewards while seeming honest and fair. Previous research proposed a solution to this game but this required restrictive assumptions that might render it inapplicable to realworld settings. Here we evaluate the formalism against a large corpus of human face-to-face negotiations. We confirm that the model captures how dishonest human negotiators win while seeming fair, even in unstructured negotiations. We also show that deceptive negotiators give-off signals of their malicious behavior, providing the opportunity for algorithms to detect and defeat this malicious tactic.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jalal-Kamali, Ali; Pynadath, David V.
Toward a Bayesian Network Model of Events in International Relations Proceedings Article
In: Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation, Springer, Washington D.C., 2016.
@inproceedings{jalal-kamali_toward_2016,
title = {Toward a Bayesian Network Model of Events in International Relations},
author = {Ali Jalal-Kamali and David V. Pynadath},
url = {https://books.google.com/books?id=_HGADAAAQBAJ&pg=PA321&lpg=PA321&dq=Toward+a+Bayesian+network+model+of+events+in+international+relations&source=bl&ots=JBOYm4KCF2&sig=eqmzgrWXwDroEtoLyxZxSjxDIAs&hl=en&sa=X&ved=0ahUKEwiIgoSS8o_PAhUUzGMKHWnaDlEQ6AEILjAC#v=onepage&q=Toward%20a%20Bayesian%20network%20model%20of%20events%20in%20international%20relations&f=false},
year = {2016},
date = {2016-07-01},
booktitle = {Procedings of the 2016 International Conference on Social Computing, Behavioral-Cultural Modeling & Prediction and Behavior Representation in Modeling and Simulation},
publisher = {Springer},
address = {Washington D.C.},
abstract = {Formal models of international relations have a long history of exploiting representations and algorithms from artificial intelligence. As more news sources move online, there is an increasing wealth of data that can inform the creation of such models. The Global Database of Events, Language, and Tone (GDELT) extracts events from news articles from around the world, where the events represent actions taken by geopolitical actors, reflecting the actors’ relationships. We can apply existing machine-learning algorithms to automatically construct a Bayesian network that represents the distribution over the actions between actors. Such a network model allows us to analyze the interdependencies among events and generate the relative likelihoods of different events. By examining the accuracy of the learned network over different years and different actor pairs, we are able to identify aspects of international relations from a data-driven approach.We are also able to identify weaknesses in the model that suggest needs for additional domain knowledge.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Norrholm, Seth Davin; Jovanovic, Tanja; Gerardi, Maryrose; Breazeale, Kathryn G.; Price, Matthew; Davis, Michael; Duncan, Erica; Ressler, Kerry J.; Bradley, Bekh; Rizzo, Albert; Tuerk, Peter W.; Rothbaum, Barbara O.
Baseline psychophysiological and cortisol reactivity as a predictor of PTSD treatment outcome in virtual reality exposure therapy Journal Article
In: Behaviour Research and Therapy, vol. 82, pp. 28–37, 2016, ISSN: 00057967.
@article{norrholm_baseline_2016,
title = {Baseline psychophysiological and cortisol reactivity as a predictor of PTSD treatment outcome in virtual reality exposure therapy},
author = {Seth Davin Norrholm and Tanja Jovanovic and Maryrose Gerardi and Kathryn G. Breazeale and Matthew Price and Michael Davis and Erica Duncan and Kerry J. Ressler and Bekh Bradley and Albert Rizzo and Peter W. Tuerk and Barbara O. Rothbaum},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0005796716300663},
doi = {10.1016/j.brat.2016.05.002},
issn = {00057967},
year = {2016},
date = {2016-07-01},
journal = {Behaviour Research and Therapy},
volume = {82},
pages = {28–37},
abstract = {Baseline cue-dependent physiological reactivity may serve as an objective measure of posttraumatic stress disorder (PTSD) symptoms. Additionally, prior animal model and psychological studies would suggest that subjects with greatest symptoms at baseline may have the greatest violation of expectancy to danger when undergoing exposure based psychotherapy; thus treatment approaches which enhanced the learning under these conditions would be optimal for those with maximal baseline cue-dependent reactivity. However methods to study this hypothesis objectively are lacking. Virtual reality (VR) methodologies have been successfully employed as an enhanced form of imaginal prolonged exposure therapy for the treatment of PTSD. Our goal was to examine the predictive nature of initial psychophysiological (e.g., startle, skin conductance, heart rate) and stress hormone responses (e.g., cortisol) during presentation of VR-based combat-related stimuli on PTSD treatment outcome. Combat veterans with PTSD underwent 6 weeks of VR exposure therapy combined with either D-cycloserine (DCS), alprazolam (ALP), or placebo (PBO). In the DCS group, startle response to VR scenes prior to initiation of treatment accounted for 76% of the variance in CAPS change scores, p textbackslashtextbackslashtextbackslashtextless 0.001, in that higher responses predicted greater changes in symptom severity over time. Additionally, baseline cortisol reactivity was inversely associated with treatment response in the ALP group},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
LeGendre, Chloe; Yu, Xueming; Liu, Dai; Busch, Jay; Jones, Andrew; Pattanaik, Sumanta; Debevec, Paul
Practical Multispectral Lighting Reproduction Journal Article
In: ACM Transactions on Graphics, vol. 35, no. 4, pp. 1–11, 2016, ISSN: 07300301.
@article{legendre_practical_2016,
title = {Practical Multispectral Lighting Reproduction},
author = {Chloe LeGendre and Xueming Yu and Dai Liu and Jay Busch and Andrew Jones and Sumanta Pattanaik and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2925934},
doi = {10.1145/2897824.2925934},
issn = {07300301},
year = {2016},
date = {2016-07-01},
journal = {ACM Transactions on Graphics},
volume = {35},
number = {4},
pages = {1–11},
abstract = {We present a practical framework for reproducing omnidirectional incident illumination conditions with complex spectra using a light stage with multispectral LED lights. For lighting acquisition, we augment standard RGB panoramic photography with one or more observations of a color chart with numerous reflectance spectra. We then solve for how to drive the multispectral light sources so that they best reproduce the appearance of the color charts in the original lighting. Even when solving for non-negative intensities, we show that accurate lighting reproduction is achievable using just four or six distinct LED spectra for a wide range of incident illumination spectra. A significant benefit of our approach is that it does not require the use of specialized equipment (other than the light stage) such as monochromators, spectroradiometers, or explicit knowledge of the LED power spectra, camera spectral response functions, or color chart reflectance spectra. We describe two simple devices for multispectral lighting capture, one for slow measurements of detailed angular spectral detail, and one for fast measurements with coarse angular detail. We validate the approach by realistically compositing real subjects into acquired lighting environments, showing accurate matches to how the subject would actually look within the environments, even for those including complex multispectral illumination. We also demonstrate dynamic lighting capture and playback using the technique.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Chih-Fan; Bolas, Mark; Suma, Evan
Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, pp. 1–2, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{chen_real-time_2016,
title = {Real-time 3D rendering using depth-based geometry reconstruction and view-dependent texture mapping},
author = {Chih-Fan Chen and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2945162},
doi = {10.1145/2945078.2945162},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
pages = {1–2},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {With the recent proliferation of high-fidelity head-mounted displays (HMDs), there is increasing demand for realistic 3D content that can be integrated into virtual reality environments. However, creating photorealistic models is not only difficult but also time consuming. A simpler alternative involves scanning objects in the real world and rendering their digitized counterpart in the virtual world. Capturing objects can be achieved by performing a 3D scan using widely available consumer-grade RGB-D cameras. This process involves reconstructing the geometric model from depth images generated using a structured light or time-of-flight sensor. The colormap is determined by fusing data from multiple color images captured during the scan. Existing methods compute the color of each vertex by averaging the colors from all these images. Blending colors in this manner creates low-fidelity models that appear blurry. (Figure 1 right). Furthermore, this approach also yields textures with fixed lighting that is baked on the model. This limitation becomes more apparent when viewed in head-tracked virtual reality, as the illumination (e.g. specular reflections) does not change appropriately based on the user's viewpoint},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113–120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nolin, Pierre; Stipanicic, Annie; Henry, Mylène; Lachapelle, Yves; Lussier-Desrochers, Dany; Rizzo, Albert “Skip”; Allain, Philippe
ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents Journal Article
In: Computers in Human Behavior, vol. 59, pp. 327–333, 2016, ISSN: 07475632.
@article{nolin_clinicavr_2016,
title = {ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents},
author = {Pierre Nolin and Annie Stipanicic and Mylène Henry and Yves Lachapelle and Dany Lussier-Desrochers and Albert “Skip” Rizzo and Philippe Allain},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0747563216300759},
doi = {10.1016/j.chb.2016.02.023},
issn = {07475632},
year = {2016},
date = {2016-06-01},
journal = {Computers in Human Behavior},
volume = {59},
pages = {327–333},
abstract = {Having garnered interest both in clinic and research areas, the Virtual Classroom (Rizzo et al., 2000) assesses children's attention in a virtual context. The Digital MediaWorks team (www.dmw.ca) has evolved the original basic classroom concept over a number of iterations to form the ClinicaVR Suite containing the Classroom-CPT as one of its components. The present study has three aims: investigate certain validity and reliability aspects of the tool; examine the relationship between performance in the virtual test and the attendant sense of presence and cybersickness experienced by participants; assess potential effects of gender and age on performance in the test. The study was conducted with 102 children and adolescents from Grade 2 to Grade 10. All participants were enrolled in a regular school program. Results support both concurrent and construct validity as well as temporal stability of ClinicaVR: Classroom-Continuous Performance Test (CPT). Gender exerted no effect on performance, while age did. The test did not cause much cybersickness. We recommend ClinicaVR: Classroom-CPT as an assessment tool for selective and sustained attention, and inhibition, in clinic and research domains.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Book Section
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107–118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mizukami, Masahiro; Traum, David; Yoshino, Koichiro; Neubig, Graham; Nakamura, Satoshi
Word and Dialogue Act Entrainment Analysis based on User Profile Proceedings Article
In: Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence, Kitakyushu, Japan, 2016.
@inproceedings{mizukami_word_2016,
title = {Word and Dialogue Act Entrainment Analysis based on User Profile},
author = {Masahiro Mizukami and David Traum and Koichiro Yoshino and Graham Neubig and Satoshi Nakamura},
url = {https://kaigi.org/jsai/webprogram/2016/pdf/356.pdf},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence},
address = {Kitakyushu, Japan},
abstract = {Patterns of dialogue act and word selection are observable in dialogue. Entrainment is the factor that might account for these patterns. We test the entrainment hypotheses using the switchboard corpus, comparing speech of different speakers from different parts of the dialogue, but also speech of the same speaker at different points. Our ⬚ndings replicate previous studies that dialogue participants converge toward each other in word choice, but we also investigate novel measures of entrainment of dialogue act selection, and word choice for speci⬚c dialogue acts. These studies inform a design for dialogue systems that would show human-like degrees of entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.
ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 26, no. 2, pp. 756–770, 2016, ISSN: 1560-4292, 1560-4306.
@article{nye_its_2016,
title = {ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem},
author = {Benjamin D. Nye},
url = {http://link.springer.com/10.1007/s40593-016-0098-8},
doi = {10.1007/s40593-016-0098-8},
issn = {1560-4292, 1560-4306},
year = {2016},
date = {2016-06-01},
journal = {International Journal of Artificial Intelligence in Education},
volume = {26},
number = {2},
pages = {756–770},
abstract = {Advanced learning technologies are reaching a new phase of their evolution where they are finally entering mainstream educational contexts, with persistent user bases. However, as AIED scales, it will need to follow recent trends in service-oriented and ubiquitous computing: breaking AIED platforms into distinct services that can be composed for different platforms (web, mobile, etc.) and distributed across multiple systems. This will represent a move from learning platforms to an ecosystem of interacting learning tools. Such tools will enable new opportunities for both user-adaptation and experimentation. Traditional macro-adaptation (problem selection) and step-based adaptation (hints and feedback) will be extended by meta-adaptation (adaptive system selection) and micro-adaptation (event-level optimization). The existence of persistent and widely-used systems will also support new paradigms for experimentation in education, allowing researchers to understand interactions and boundary conditions for learning principles. New central research questions for the field will also need to be answered due to these changes in the AIED landscape.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mühlberger, Andreas; Jekel, K.; Probst, Thomas; Schecklmann, Martin; Conzelmann, A.; Andreatta, M.; Rizzo, A. A.; Pauli, P.; Romanos, M.
The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test Journal Article
In: Journal of attention disorders, 2016.
@article{muhlberger_influence_2016,
title = {The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test},
author = {Andreas Mühlberger and K. Jekel and Thomas Probst and Martin Schecklmann and A. Conzelmann and M. Andreatta and A. A. Rizzo and P. Pauli and M. Romanos},
url = {http://journals.sagepub.com/doi/abs/10.1177/1087054716647480},
doi = {10.1177/1087054716647480},
year = {2016},
date = {2016-05-01},
journal = {Journal of attention disorders},
abstract = {This study compares the performance in a continuous performance test within a virtual reality classroom (CPT-VRC) between medicated children with ADHD, unmedicated children with ADHD, and healthy children. Method:N = 94 children with ADHD (n = 26 of them received methylphenidate and n = 68 were unmedicated) and n = 34 healthy children performed the CPT-VRC. Omission errors, reaction time/variability, commission errors, and body movements were assessed. Furthermore, ADHD questionnaires were administered and compared with the CPT-VRC measures. Results: The unmedicated ADHD group exhibited more omission errors and showed slower reaction times than the healthy group. Reaction time variability was higher in the unmedicated ADHD group compared with both the healthy and the medicated ADHD group. Omission errors and reaction time variability were associated with inattentiveness ratings of experimenters. Head movements were correlated with hyperactivity ratings of parents and experimenters. Conclusion: Virtual reality is a promising technology to assess ADHD symptoms in an ecologically valid environment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2005
Kock, Arien; Gratch, Jonathan
An Evaluation of Automatic Lip-syncing Methods for Game Environments Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{kock_evaluation_2005,
title = {An Evaluation of Automatic Lip-syncing Methods for Game Environments},
author = {Arien Kock and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR.01.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 01 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Lip-synching is the production of articulator motion corresponding to a given audible utterance. The Mission Rehearsal Exercise training system requires lip-synching to increase the believability of its virtual agents. In this report I document the selection, exploration, evaluation and comparison of several candidate lip-synching systems, ending with a recommendation. The evaluation focuses on the believability of articulators' expression, the foreseeable difficulty of integration into MRE’s architecture, the support for facial expressions related to semantics and prosodic features as well as the scalability of each system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Pighin, Frédéric; Patel, Sanjit; Cohen, Jonathan; Chu, Anson
Oriented Particle Level Set for Fluid Simulation Book
2005.
Abstract | Links | BibTeX | Tags:
@book{pighin_oriented_2005,
title = {Oriented Particle Level Set for Fluid Simulation},
author = {Frédéric Pighin and Sanjit Patel and Jonathan Cohen and Anson Chu},
url = {http://ict.usc.edu/pubs/Oriented%20Particle%20Level%20Set%20for%20Fluid%20Simulation.pdf},
year = {2005},
date = {2005-01-01},
abstract = {The particle level set technique has been adopted in computer graphics as the method of choice for tracking the surface of simulated liquids. In this poster, we describe a novel technique for modeling such an interface. Our technique is based on a set of oriented particles that provides a piecewise linear approximation to the interface. Using this improved model, we obtain a more accurate representation of the water surface and reduced mass loss during simulation.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Kim, Youngjun; Hill, Randall W.; Traum, David
Controlling the Focus of Perceptual Attention in Embodied Conversational Agents Proceedings Article
In: Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems, 2005, ISBN: 1-59593-093-0.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_controlling_2005,
title = {Controlling the Focus of Perceptual Attention in Embodied Conversational Agents},
author = {Youngjun Kim and Randall W. Hill and David Traum},
url = {http://ict.usc.edu/pubs/Controlling%20the%20Focus%20of%20Perceptual%20Attention%20in%20Embodied%20Conversational%20Agents.pdf},
doi = {10.1145/1082473.1082641},
isbn = {1-59593-093-0},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 4th International Joint Conference on Autonomous Agents and Multiagent Systems},
abstract = {In this paper, we present a computational model of dynamic perceptual attention for virtual humans. The computational models of perceptual attention that we surveyed fell into one of two camps: top-down and bottom-up. Biologically inspired computational models [2] typically focus on the bottom-up aspects of attention, while most virtual humans [1,3,7] implement a top-down form of attention. Bottom-up attention models only consider the sensory information without taking into consideration the saliency based on tasks or goals. As a result, the outcome of a purely bottom-up model will not consistently match the behavior of real humans in certain situations. Modeling perceptual attention as a purely top-down process, however, is also not sufficient for implementing a virtual human. A purely top-down model does not take into account the fact that virtual humans need to react to perceptual stimuli vying for attention. Top-down systems typically handle this in an ad hoc manner by encoding special rules to catch certain conditions in the environment. The problem with this approach is that it does not provide a principled way of integrating the ever-present bottom-up perceptual stimuli with top-down control of attention. This model extends the prior model [7] with perceptual resolution based on psychological theories of human perception [4]. This model allows virtual humans to dynamically interact with objects and other individuals, balancing the demands of goal-directed behavior with those of attending to novel stimuli. This model has been implemented and tested with the MRE Project [5].},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Fictionalization of Lessons Learned Journal Article
In: IEEE Multimedia, vol. 12, no. 4, pp. 12–14, 2005.
Links | BibTeX | Tags: The Narrative Group
@article{gordon_fictionalization_2005,
title = {The Fictionalization of Lessons Learned},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Fictionalization%20of%20Lessons%20Learned.pdf},
year = {2005},
date = {2005-01-01},
journal = {IEEE Multimedia},
volume = {12},
number = {4},
pages = {12–14},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Responsive Behavior of a Listening Agent Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2005, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{maatman_responsive_2005,
title = {Responsive Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 02 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The purpose of this assignment is twofold. First the possibility of generating real time responsive behavior is evaluated in order to create a more human-like agent. Second, the effect of the behavior of the agent on the human interactor is evaluated. The main motivation for the focus on responsive gestures is because much research has been done already on gestures that accompany the speaker, and nothing on gesture that accompany the listener, although responsiveness is a crucial part of a conversation. The responsive behavior of a virtual agent consists of performing gestures during the time a human is speaking to the agent. To generate the correct gestures, first a literature research is carried out, from which is concluded that with the current of the current Natural Language Understanding technology, it is not possible to extract semantic features of the human speech in real time. Thus, other features have to be considered. The result of the literature research is a basic mapping between real time obtainable features and their correct responsive behavior: - if the speech contains a relatively long period of low pitch then perform a head nod. - if the speech contains relatively high intensity then perform a head nod - if the speech contains disfluency then perform a posture shift, gazing behavior or a frown - if the human performs a posture shift then mirror this posture shift - if the human performs a head shake then mirror this head shake - if the human performs major gazing behavior then mimic this behavior A design has been made to implement this mapping into the behavior of a virtual agent and this design has been implemented which results in two programs. One to mirror the physical features of the human and one to extract the speech features from the voice of the human. The two programs are combined and the effect of the resulting behavior on the human interactor has been tested. The results of these tests are that the performing of responsive behavior has a positive effect on the natural behavior of a virtual agent and thus looks promising for future research. However, the gestures proposed by this mapping are not always context-independent. Thus, much refinement is still to be done and more functionality can be added to improve the responsive behavior. The conclusion of this research is twofold. First the performing of responsive behaviors in real time is possible with the presented mapping and this results in a more natural behaving agent. Second, some responsive behavior is still dependant of semantic information. This leaves open the further enhancement of the presented mapping in order to increase the responsive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Alpaslan, Z. Y.; Yeh, S. -C.; Rizzo, Albert; Sawchuk, Alexander A.
Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays Proceedings Article
In: Stereoscopic Displays and Virtual Reality Systems XII Symposium, San Jose, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{alpaslan_quantitative_2005,
title = {Quantitative Comparison of Interaction with Shutter Glasses and Autostereoscopic Displays},
author = {Z. Y. Alpaslan and S. -C. Yeh and Albert Rizzo and Alexander A. Sawchuk},
url = {http://ict.usc.edu/pubs/Quantitative%20Comparison%20of%20Interaction%20with%20Shutter%20Glasses%20and%20Autostereoscopic%20Displays.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Stereoscopic Displays and Virtual Reality Systems XII Symposium},
address = {San Jose, CA},
abstract = {In this paper we describe experimental measurements and comparison of human interaction with three different types of stereo computer displays. We compare traditional shutter glasses-based viewing with three-dimensional (3D) autostereoscopic viewing on displays such as the Sharp LL-151-3D display and StereoGraphics SG 202 display. The method of interaction is a sphere-shaped "cyberprop" containing an Ascension Flock-of-Birds tracker that allows a user to manipulate objects by imparting the motion of the sphere to the virtual object. The tracking data is processed with OpenGL to manipulate objects in virtual 3D space, from which we synthesize two or more images as seen by virtual cameras observing them. We concentrate on the quantitative measurement and analysis of human performance for interactive object selection and manipulation tasks using standardized and scalable configurations of 3D block objects. The experiments use a series of progressively more complex block configurations that are rendered in stereo on various 3D displays. In general, performing the tasks using shutter glasses required less time as compared to using the autostereoscopic displays. While both male and female subjects performed almost equally fast with shutter glasses, male subjects performed better with the LL-151-3D display, while female subjects performed better with the SG202 display. Interestingly, users generally had a slightly higher efficiency in completing a task set using the two autostereoscopic displays as compared to the shutter glasses, although the differences for all users among the displays was relatively small. There was a preference for shutter glasses compared to autostereoscopic displays in the ease of performing tasks, and glasses were slightly preferred for overall image quality and stereo image quality. However, there was little difference in display preference in physical comfort and overall preference. We present some possible explanations of these results and point out the importance of the autostereoscopic "sweet spot" in relation to the user's head and body position.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Mao, Wenji; Gratch, Jonathan; Marsella, Stacy C.
Mitigation Theory: An Integrated Approach Proceedings Article
In: Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci), Stresa, Italy, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{martinovski_mitigation_2005,
title = {Mitigation Theory: An Integrated Approach},
author = {Bilyana Martinovski and Wenji Mao and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Mitigation%20Theory-%20An%20Integrated%20Approach.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 27th Annual Conference of the Cognitive Science Society (CogSci)},
address = {Stresa, Italy},
abstract = {The purpose of this paper is to develop a theoretical model of mitigation by integrating cognitive and discourse approaches to appraisal and coping. Mitigation involves strategic, emotional, linguistic, and Theory of Mind processes on different levels of consciousness. We emphasize that discourse analysis can assist our understanding of these processes.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Evaluating Social Causality and Responsibility Models: An Initial Report Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 03 2005, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_evaluating_2005,
title = {Evaluating Social Causality and Responsibility Models: An Initial Report},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT-TR-03-2005.pdf},
year = {2005},
date = {2005-01-01},
number = {ICT TR 03 2005},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent sys- tems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich believability and cognitive capabili- ties of social intelligent agents. In this report, we present a general computational model of social causality and responsibility, and empirical results of a preliminary evaluation of the model in comparison with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Maatman, R. M.; Gratch, Jonathan; Marsella, Stacy C.
Natural Behavior of a Listening Agent Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA), pp. 25–36, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{maatman_natural_2005,
title = {Natural Behavior of a Listening Agent},
author = {R. M. Maatman and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Natural%20Behavior%20of%20a%20Listening%20Agent.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Working Conference on Intelligent Virtual Agents (IVA)},
pages = {25–36},
address = {Kos, Greece},
abstract = {In contrast to the variety of listening behaviors produced in human-to-human interaction, most virtual agents sit or stand passively when a user speaks. This is a reflection of the fact that although the correct responsive behavior of a listener during a conversation is often related to the semantics, the state of current speech understanding technology is such that semantic information is unavailable until after an utterance is complete. This paper will illustrate that appropriate listening behavior can also be generated by other features of a speaker's behavior that are available in real time such as speech quality, posture shifts and head movements. This paper presents a mapping from these real-time obtainable features of a human speaker to agent listening behaviors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Pair, Jarrell; McNerney, Peter J.; Eastlund, Ernie; Manson, Brian; Gratch, Jonathan; Hill, Randall W.; Swartout, William
Development of a VR Therapy Application for Iraq War Military Personnel with PTSD Book Section
In: Studies in Health Technology and Informatics, vol. 111, no. 13, pp. 407+413, 13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@incollection{rizzo_development_2005-1,
title = {Development of a VR Therapy Application for Iraq War Military Personnel with PTSD},
author = {Albert Rizzo and Jarrell Pair and Peter J. McNerney and Ernie Eastlund and Brian Manson and Jonathan Gratch and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20VR%20Therapy%20Application%20for%20Iraq%20War%20Veterans%20with%20PTSD.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Studies in Health Technology and Informatics},
volume = {111},
number = {13},
pages = {407+413},
address = {13th Annual Medicine Meets Virtual Reality Conference, Long Beach, CA},
series = {Medicine Meets Virtual Reality},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experiences including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that 1 out of 6 returning Iraq War military personnel are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) exposure therapy has been used in previous treatments of PTSD patients with reports of positive outcomes. The aim of the current paper is to specify the rationale, design and development of an Iraq War PTSD VR application that is being created from the virtual assets that were initially developed for theX-Box game entitled Full Spectrum Warrior which was inspired by a combat tactical training simulation, Full Spectrum Command.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert; Morie, Jacquelyn; Williams, Josh; Pair, Jarrell; Buckwalter, John Galen
Human Emotional State and its Relevance for Military VR Training Proceedings Article
In: Proceedings of the 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans, Virtual Worlds
@inproceedings{rizzo_human_2005,
title = {Human Emotional State and its Relevance for Military VR Training},
author = {Albert Rizzo and Jacquelyn Morie and Josh Williams and Jarrell Pair and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Human%20Emotional%20State%20and%20its%20Relevance%20for%20Military%20VR%20Training.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Combat environments by their nature can produce a dramatic range of emotional responses in military personnel. When immersed in the emotional "fog of war," the potential exists for optimal human decision-making and performance of goal-directed activities to be seriously compromised. This may be especially true when combat training is conducted under conditions that lack emotional engagement by the soldier. Real world military training often naturally includes stress induction that aims to promote a similarity of internal emotional stimulus cues with what is expected to be present on the battlefield. This approach to facilitating optimal training effectiveness is supported by a long history of learning theory research. Current Virtual Reality military training approaches are noteworthy in their emphasis on creating hi-fidelity graphic and audio realism with the aim to foster better transfer of training. However, less emphasis is typically placed on the creation of emotionally evocative virtual training scenarios that can induce emotional stress in a manner similar to what is typically experienced under real world training conditions. As well, emotional issues in the post-combat aftermath need to be addressed, as can be seen in the devastating emotional difficulties that occur in some military personnel following combat. This is evidenced by the number of recent medical reports that suggest the incidence of "Vietnam-levels" of combat-related Post Traumatic Stress Disorder symptomatology in returning military personnel from the Iraq conflict. In view of these issues, the USC Institute for Creative Technologies (ICT) has initiated a research program to study emotional issues that are relevant to VR military applications. This paper will present the rationale and status of two ongoing VR research programs at the ICT that address sharply contrasting ends of the emotional spectrum relevant to the military: 1. The Sensory Environments Evaluation (SEE) Project is examining basic factors that underlie emotion as it occurs within VR training environments and how this could impact transfer of training, and 2. The Full Spectrum Warrior (FSW) Post Traumatic Stress Disorder Project which is currently in the process of converting the existing FSW combat tactical simulation training scenario (and X-Box game) into a VR treatment system for the conduct of graduated exposure therapy in Iraq war military personnel with Post Traumatic Stress Disorder.},
keywords = {MedVR, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Hawkins, Tim; Einarsson, Per; Debevec, Paul
Acquisition of Time-Varying Participating Media Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{hawkins_acquisition_2005,
title = {Acquisition of Time-Varying Participating Media},
author = {Tim Hawkins and Per Einarsson and Paul Debevec},
url = {http://ict.usc.edu/pubs/Acquisition%20of%20Time-Varying%20Participating%20Media.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {We present a technique for capturing time-varying volumetric data of participating media. A laser sheet is swept repeatedly through the volume, and the scattered light is imaged using a high-speed camera. Each sweep of the laser provides a near-simultaneous volume of density values. We demonstrate rendered animations under changing viewpoint and illumination, making use of measured values for the scattering phase function and albedo.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Klimchuck, Dean; Mitura, Roman; Bowerly, Todd; Buckwalter, John Galen; Kerns, Kim; Randall, Karherine; Adams, Rebecca; Finn, Paul; Tarnanas, Ioannis; Sirbu, Cristian; Ollendick, Thomas H.; Yeh, Shih-Ching
A Virtual Reality Scenario for All Seasons: The Virtual Classroom Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{rizzo_virtual_2005,
title = {A Virtual Reality Scenario for All Seasons: The Virtual Classroom},
author = {Albert Rizzo and Dean Klimchuck and Roman Mitura and Todd Bowerly and John Galen Buckwalter and Kim Kerns and Karherine Randall and Rebecca Adams and Paul Finn and Ioannis Tarnanas and Cristian Sirbu and Thomas H. Ollendick and Shih-Ching Yeh},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Reality%20Scenario%20for%20All%20Seasons-%20The%20Virtual%20Classroom%20(HCI).pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {Rather than relying on costly physical mock-ups of functional assessment and rehabilitation environments, VR offers the option to produce and distribute identical "standard" environments. Within such digital assessment and rehabilitation scenarios, normative data can be accumulated for performance comparisons needed for assessment, diagnosis and for training purposes. As well, in this manner, reusable archetypic virtual environments constructed for one purpose, could also be applied for clinical applications addressing other purposes. This has now been done with the Virtual Classroom scenario. While originally developed as a controlled stimulus environment in which attention processes could be systematically assessed in children while in the presence of varying levels of distraction, the system is now finding use for other clinical targets. Such applications that are being developed and tested using the Virtual Classroom for other purposes include: 1. Expansion of the range of attention assessment tests (i.e., a _Stroop` Interference testing system for all ages). 2. A wide field of view system to study eye tracking under distracting conditions with ADHD children using an Elumens VisionStation®. 3. Development of the Virtual Classroom as a tool for anxiety assessment and graduated exposure therapy for children with Social Anxiety Disorder. 4. An extension to the class to include a maze of halls leading out of the school for an earthquake safety training application with persons with developmental and learning disabilities.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Core, Mark; Lane, H. Chad; Lent, Michael; Solomon, Steve; Gomboc, Dave; Carpenter, Paul
Toward Question Answering for Simulations Proceedings Article
In: International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions, Edinburgh, Scotland, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{core_toward_2005,
title = {Toward Question Answering for Simulations},
author = {Mark Core and H. Chad Lane and Michael Lent and Steve Solomon and Dave Gomboc and Paul Carpenter},
url = {http://ict.usc.edu/pubs/Toward%20Question%20Answering%20for%20Simulations.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {International Joint Conference on Artificial Intelligence (IJCAI) Workshop on Knowledge and Reasoning for Answering Questions},
address = {Edinburgh, Scotland},
abstract = {The new research area of explainable artiï¬cial intelligence (XAI) allows users to question simulated entities whose motivations would otherwise be hidden. Here, we focus on the knowledge representation issues involved in building such systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
Social Causality and Responsibility: Modeling and Evaluation Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA), pp. 191–204, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2005,
title = {Social Causality and Responsibility: Modeling and Evaluation},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Causality%20and%20Responsibility-%20Modeling%20and%20Evaluation.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 5th International Workshop on Intelligent Virtual Agents (IVA)},
pages = {191–204},
address = {Kos, Greece},
abstract = {Intelligent virtual agents are typically embedded in a social environment and must reason about social cause and effect. Social causal reasoning is qualitatively different from physical causal reasoning that underlies most current intelligent systems. Besides physical causality, the assessments of social cause emphasize epistemic variables including intentions, foreknowledge and perceived coercion. Modeling the process and inferences of social causality can enrich the believability and the cognitive capabilities of social intelligent agents. In this paper, we present a general computational model of social causality and responsibility, and empirically evaluate and compare the model with several other approaches.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dialog Simulation for Background Characters Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dialog_2005,
title = {Dialog Simulation for Background Characters},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dialog%20Simulation%20for%20Background%20Characters.pdf},
year = {2005},
date = {2005-01-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Background characters in virtual environments do not require the same amount of processing that is usually required by main characters, however we want simulation that is more believable than random behavior. We describe an algorithm that generates bhavior for background characters involved in conversation that supports dynamic changes to conversation group structure. We present an evaluation of this algorithm and make suggestions on how to further improve believability of the simulation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
2004
Hill, Randall W.; Gordon, Andrew S.; Kim, Julia
Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{hill_learning_2004,
title = {Learning the Lessons of Leadership Experience: Tools for Interactive Case Method Analysis},
author = {Randall W. Hill and Andrew S. Gordon and Julia Kim},
url = {http://ict.usc.edu/pubs/LEARNING%20THE%20LESSONS%20OF%20LEADERSHIP%20EXPERIENCE-%20TOOLS%20FOR%20INTERACTIVE%20CASE%20METHOD%20ANALYSIS.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {The Army Excellence in Leadership (AXL) project at the University of Southern California's Institute for Creative Technologies is aimed at supporting the acquisition of tacit knowledge of military leadership through the development of compelling filmed narratives of leadership scenarios and interactive training technologies. The approach taken in the AXL project is to leverage the best practices of case-method teaching and use Hollywood storytelling techniques to create fictional case studies (as filmed media) addressing specific leadership issues. In addition to authoring compelling cases for analysis, we have developed software prototypes that instantiate the case-method teaching approach. These systems engage individual trainees in human-computer dialogues that are focused on the leadership issues that have been embedded in the fictional cases.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Korris, James H.
Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{korris_full_2004,
title = {Full Spectrum Warrior: How the Institute for Creative Technologies Built a Cognitive Training Tool for the XBox},
author = {James H. Korris},
url = {http://ict.usc.edu/pubs/FULL%20SPECTRUM%20WARRIOR-%20HOW%20THE%20INSTITUTE%20FOR%20CREATIVE%20TECHNOLOGIES%20BUILT%20A%20COGNITIVE%20TRAINING%20TOOL%20FOR%20THE%20XBOX.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Microsoft's popular game console, the Xbox, combined the possibility of compelling training efficiencies with formidable obstacles to development, both in terms of the business model, the limitation of the Windows 2000 computer inside it and the systemDs standard human-machine interface. In its mission to leverage the capabilities of the entertainment industry to develop next-generation simulation tools, the Institute for Creative Technologies turned to this inexpensive, powerful platform for its Squad level cognitive tactical trainer. This paper will describe the pedagogical and technological challenges and unique processes that translated Squad level command doctrine to a commercial game interface and a cost-effective, universally-accessible computational medium.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Towards a Validated Model of the Influence of Emotion on Human Performance Proceedings Article
In: Proceedings of the 24th Army Science Conference, 2004.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2004,
title = {Towards a Validated Model of the Influence of Emotion on Human Performance},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/TOWARDS%20A%20VALIDATED%20MODEL%20OF%20THE%20INFLUENCE%20OF%20EMOTION%20ON%20HUMAN%20PERFORMANCE.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Poullis, Charalambos; Gardner, Andrew; Debevec, Paul
Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{poullis_photogrammetric_2004,
title = {Photogrammetric Modeling and Image-based Rendering for Rapid Virtual Environement Creation},
author = {Charalambos Poullis and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/PHOTOGRAMMETRIC%20MODELING%20AND%20IMAGE-BASED%20RENDERING%20FOR%20RAPID%20VIRTUAL%20ENVIRONMENT%20CREATION.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {For realistic simulations, architecture is one of the most important elements to model and render photorealistically. Current techniques of converting architectural plans or survey data to CAD models are labor intensive, and methods for rendering such models are generally not photorealistic. In this work, we present a new approach for modeling and rendering existing architectural scenes from a sparse set of still photographs. For modeling, we use photogrammetric modeling techniques to recover a the geometric representation of the architecture. The photogrammetric modeling approach presented in this paper is effective, robust and powerful because it fully exploits structural symmetries and constraints which are characteristic of architectural scenes. For rendering, we use view-dependent texture mapping, a method for compositing multiple images of a scene to create renderings from novel views. Lastly, we present a software package, named Fac¸ade, which uses the techniques described to recover the geometry and appearance of architectural scenes directly from a sparse set of photographs.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Leuski, Anton; Traum, David
First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_first_2004,
title = {First Steps Toward Linking Dialogues: Mediating Between Free-text Questions and Pre-recorded Video Answers},
author = {Sudeep Gandhe and Andrew S. Gordon and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20Toward%20Linking%20Dialogues-%20Mediating%20Between%20Free-text%20Questions%20and%20Pre-recorded%20Video%20Answers.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {Pre-recorded video segments can be very compelling for a variety of immersive training purposes, including providing answers to questions in after-action reviews. Answering questions fluently using pre-recorded video poses challenges, however. When humans interact, answers are constructed after questions are posed. When answers are pre-recorded, even if a correct answer exists in a library of video segments, the answer may be phrased in a way that is not coherent with the question. This paper reports on basic research experiments with short "linking dialogues" that mediate between the question and answer to reduce (or eliminate) the incoherence, resulting in more natural human-system interaction. A set of experiments were performed in which links were elicited to bridge between questions from users of an existing training application and selected answers from the system, and then comparisons made with unlinked answers. The results show that a linking dialogue can signiï¬cantly increase the perceived relevance of the system's answers.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Simulation of Small Group Discussions for Middle Level of Detail Crowds Proceedings Article
In: Proceedings of the 24th Army Science Conference, Orlando, FL, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_simulation_2004,
title = {Simulation of Small Group Discussions for Middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Simulation%20of%20Small%20Group%20Discussions%20for%20Middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-12-01},
booktitle = {Proceedings of the 24th Army Science Conference},
address = {Orlando, FL},
abstract = {We present an algorithm for animating middle level of detail crowds engaged in conversation. Based on previous work from Padilha and Carletta, this algorithm is used to provide gestures for group characters in an embedded virtual world. The algorithm is implemented and used within the Mission Rehearsal Exercise project at ICT to control Bosnian crowd members.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Stumpfel, Jessi; Jones, Andrew; Wenger, Andreas; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Direct HDR Capture of the Sun and Sky Proceedings Article
In: Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa, Stellenbosch, South Africa, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{stumpfel_direct_2004,
title = {Direct HDR Capture of the Sun and Sky},
author = {Jessi Stumpfel and Andrew Jones and Andreas Wenger and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Direct%20HDR%20Capture%20of%20the%20Sun%20and%20Sky.pdf},
year = {2004},
date = {2004-11-01},
booktitle = {Proceedings of the 3rd International Conference on Computer Graphics, Virtual Reality, Visualisation and Interaction in Africa},
address = {Stellenbosch, South Africa},
abstract = {We present a technique for capturing the extreme dynamic range of natural illumination environments that include the sun and sky, which has presented a challenge for traditional high dynamic range photography processes. We find that through careful selection of exposure times, aperture, and neutral density filters that this full range can be covered in seven exposures with a standard digital camera. We discuss the particular calibration issues such as lens vignetting, in- frared sensitivity, and spectral transmission of neutral den- sity filters which must be addressed. We present an adap- tive exposure range adjustment technique for minimizing the number of exposures necessary. We demonstrate our results by showing time-lapse renderings of a complex scene illuminated by high-resolution, high dynamic range natural illumination environments.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Sadek, Ramy
A Host-Based Real-Time Multichannel Immersive Sound Playback and Processing System Proceedings Article
In: Proceedings of the 117th Audio Engineering Society Convention, San Francisco, CA, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{sadek_host-based_2004,
title = {A Host-Based Real-Time Multichannel Immersive Sound Playback and Processing System},
author = {Ramy Sadek},
url = {http://ict.usc.edu/pubs/A%20Host-Based%20Real-Time%20Multichannel%20Immersive%20Sound%20Playback%20and%20Processing%20System.pdf},
year = {2004},
date = {2004-10-01},
booktitle = {Proceedings of the 117th Audio Engineering Society Convention},
address = {San Francisco, CA},
abstract = {This paper presents ARIA (Application Rendering Immersive Audio). This system provides a means for the research community to easily test and integrate algorithms into a multichannel playback/recording system. ARIA uses a host-based architecture, meaning that programs can be developed and debugged in standard C++ without the need for expensive, specialized DSP programming and testing tools. ARIA allows developers to exploit the speed and low cost of modern CPUs, provides cross-platform portability, and simplifies the modification and sharing of codes. This system is designed for real-time playback and processing, thus closing the gap between research testbed and delivery systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Cohen, Jonathan; Shah, Maurya
Modeling and Editing Flows Using Advected Radial Basis Functions Proceedings Article
In: Proceedings of ACM SIGGRAPH/Eurographics Symposium on Computer Animation, Grenoble, France, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{pighin_modeling_2004,
title = {Modeling and Editing Flows Using Advected Radial Basis Functions},
author = {Frédéric Pighin and Jonathan Cohen and Maurya Shah},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Editing%20Flows%20Using%20Advected%20Radial%20Basis%20Functions.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of ACM SIGGRAPH/Eurographics Symposium on Computer Animation},
address = {Grenoble, France},
abstract = {uid simulations are notoriously difficult to predict and control. As a result, authoring fluid flows often involves a tedious trial and error process. There is to date no convenient way of editing a fluid after it has been simulated. In particular, the Eulerian approach to fluid simulation is not suitable for flow editing since it does not provide a convenient spatio-temporal parameterization of the simulated flows. In this research, we develop a new technique to learn such parameterization. This technique is based on a new representation, the Advected Radial Basis Function. It is a time-varying kernel that models the local properties of the fluid. We describe this representation and demonstrate its use for interactive three-dimensional flow editing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Muller, T. J.; Hartholt, Arno; Marsella, Stacy C.; Gratch, Jonathan; Traum, David
Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Kloster Irsee, Germany, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{muller_you_2004,
title = {Do You Want To Talk About It? A First Step Towards Emotion Integrated Dialogue},
author = {T. J. Muller and Arno Hartholt and Stacy C. Marsella and Jonathan Gratch and David Traum},
url = {http://ict.usc.edu/pubs/Do%20you%20want%20to%20talk%20about%20it.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Kloster Irsee, Germany},
abstract = {In this paper, we descrribe an implemented system for emotion-referring dialogue. An agen can engage in emotion-referring dialogue if it first has a model of its own emotions, and secondly has a way of talking about them. We create this facility in MRE Project's virtual humans, building upon the existing emotion and dialogue facilities of these agents.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Iuppa, Nicholas; Weltman, Gershon; Gordon, Andrew S.
Bringing Hollywood Storytelling Techniques to Branching Storylines for Training Applications Proceedings Article
In: Proceedings of the Third International Conference for Narrative and Interactive Learning Environments, Edinburgh, Scotland, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{iuppa_bringing_2004,
title = {Bringing Hollywood Storytelling Techniques to Branching Storylines for Training Applications},
author = {Nicholas Iuppa and Gershon Weltman and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Bringing%20Hollywood%20Storytelling%20Techniques%20to%20Branching%20Storylines%20for%20Training%20Applications.PDF},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of the Third International Conference for Narrative and Interactive Learning Environments},
address = {Edinburgh, Scotland},
abstract = {This paper describes the value of capitalizing on Hollywood storytelling techniques in the design of story-based training applications built around branching storylines. After reviewing the design of Outcome-Driven Simulations and the technical aspects of our application prototype, we describe storytelling techniques that greatly improve the level of user engagement in training simulations based on this design. These techniques concern the overall development of the story, the use of a story arc, the critical decisions in a story, notions of pay off and climax, dramatic sequences, character bibles, characters as a Greek chorus, and the significance of consequences and outcomes. Examples of each of these storytelling techniques are given in the context of the ICT Leaders Project, a prototype leadership development application for the US Army.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating the modeling and use of emotion in virtual humans Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004,
title = {Evaluating the modeling and use of emotion in virtual humans},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20the%20modeling%20and%20use%20of%20emotion%20in%20virtual%20humans.pdf},
year = {2004},
date = {2004-08-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we begin to evaluate them against the phenomena they purport to model. In this paper, we present one methodology to evaluate an emotion model. The methodology is based on comparing the behavior of the computational model against human behavior, using a standard clinical instrument for assessing human emotion and coping. We use this methodology to evaluate the EMA model of emotion. The model did quite well. And, as expected, the comparison helped identify where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Nair, Anish
Expressions Related to Knowledge and Belief in Children's Speech Proceedings Article
In: Proceedings of the 26th Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, Chicago, IL, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_expressions_2004,
title = {Expressions Related to Knowledge and Belief in Children's Speech},
author = {Andrew S. Gordon and Anish Nair},
url = {http://ict.usc.edu/pubs/Expressions%20Related%20to%20Knowledge%20and%20Belief%20in%20Childrens%20Speech.PDF},
year = {2004},
date = {2004-08-01},
booktitle = {Proceedings of the 26th Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {Chicago, IL},
abstract = {Children develop certain abilities related to Theory of Mind reasoning, particularly concerning the False-belief Task, between the ages of 3 and 5. This paper investigates whether there is a corresponding change in the frequency of linguistic expressions related to knowledge and belief produced by children around these ages. Automated corpus analysis techniques are used to tag each expression related to knowledge and belief in a large corpus of transcripts of speech from normally developing English-learning children. Results indicate that the frequency of expressions related to knowledge and belief increases steadily from the beginning of children's language production. Tracking of individual concepts related to knowledge and belief indicates that there are no clear qualitative changes in the set of concepts that are expressed by children of different ages. The implications for the relationship between language and the development of Theory of Mind reasoning abilities in children are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Carpenter, Paul; McAlinden, Ryan; Tan, Poey Guan
A Tactical and Strategic AI Interface for Real-Time Strategy Games Proceedings Article
In: AAAI Technical Workshop Challenges in Game Artificial Intelligence, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_lent_tactical_2004,
title = {A Tactical and Strategic AI Interface for Real-Time Strategy Games},
author = {Michael Lent and Paul Carpenter and Ryan McAlinden and Poey Guan Tan},
url = {http://ict.usc.edu/pubs/A%20Tactical%20and%20Strategic%20AI%20Interface%20for%20Real-Time%20Strategy%20Games.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {AAAI Technical Workshop Challenges in Game Artificial Intelligence},
abstract = {Real Time Strategy (RTS) games present a wide range of AI challenges at the tactical and strategic level. Unfortunately, the lack of flexible “mod” interfaces to these games has made it difficult for AI researchers to explore these challenges in the context of RTS games. We are addressing this by building two AI interfaces into Full Spectrum Command, a real time strategy training aid built for the U.S. Army. The tactical AI interface will allow AI systems, such as Soar and Simulation Based Tactics Mining, to control the tactical behavior of platoons and squads within the environment. The strategic AI interface will allow AI planners to generate and adapt higher-level battle plans which can in turn be executed by the tactical AI. This paper describes these two interfaces and our plans for identifying and addressing the research challenges involved in developing and deploying tactical and strategic AI systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Jigish; Parker, Robert; Traum, David
Small group discussion simulation for middle Level of Detail Crowds Proceedings Article
In: 8th Workshop on Semantics and Pragmatics of Dialogue, Barcelona, Spain, 2004.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{patel_small_2004,
title = {Small group discussion simulation for middle Level of Detail Crowds},
author = {Jigish Patel and Robert Parker and David Traum},
url = {http://ict.usc.edu/pubs/Small%20group%20discussion%20simulation%20for%20middle%20Level%20of%20Detail%20Crowds.pdf},
year = {2004},
date = {2004-07-01},
booktitle = {8th Workshop on Semantics and Pragmatics of Dialogue},
address = {Barcelona, Spain},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lent, Michael; Fisher, William; Mancuso, Michael
An Explainable Artificial Intelligence System for Small-unit Tactical Behavior Proceedings Article
In: National Conference on Artificial Intelligence, pp. 900–907, San Jose, CA, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_lent_explainable_2004,
title = {An Explainable Artificial Intelligence System for Small-unit Tactical Behavior},
author = {Michael Lent and William Fisher and Michael Mancuso},
url = {http://ict.usc.edu/pubs/An%20Explainable%20Artificial%20Intelligence%20System%20for%20Small-unit%20Tactical%20Behavior.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {National Conference on Artificial Intelligence},
pages = {900–907},
address = {San Jose, CA},
abstract = {As the artificial intelligence (AI) systems in military simulations and computer games become more complex, their actions become increasingly difficult for users to understand. Expert systems for medical diagnosis have addressed this challenge though the addition of explanation generation systems that explain a system's internal processes. This paper describes the AI architecture and associated explanation capability used by Full Spectrum Command, a training system developed for the US Army by commercial game developers and academic researchers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Authoring Branching Storylines for Training Applications Proceedings Article
In: Proceedings of the Sixth International Conference of the Learning Sciences (ICLS), Santa Monica, CA, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_authoring_2004,
title = {Authoring Branching Storylines for Training Applications},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Authoring%20Branching%20Storylines%20for%20Training%20Applications.PDF},
year = {2004},
date = {2004-06-01},
booktitle = {Proceedings of the Sixth International Conference of the Learning Sciences (ICLS)},
address = {Santa Monica, CA},
abstract = {Progress in the area of interactive training applications has led to the formulation of methodologies that have been successfully transitioned out of research labs and into the practices of commercial developers. This paper reviews the academic origins of a methodology for developing training applications that incorporate branching storylines to engage users in a firstperson learn-by-doing experience, originally referred to as Outcome-Driven Simulations. Innovations and modifications to this methodology from the commercial sector are then reviewed, and the steps in this methodology are described, as implemented in current best practices. Finally, new research efforts based on this methodology are examined, including the introduction of natural language processing technology to enable human-computer conversations and the integration of branching storylines into real-time virtual reality environments. A prototype application to support leadership development within the U.S. Army that includes these advances is described.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a General Model of Emotional Appraisal and Coping Proceedings Article
In: AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations, Palo Alto, CA, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_evaluating_2004-1,
title = {Evaluating a General Model of Emotional Appraisal and Coping},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20General%20Model%20of%20Emotional%20Appraisal%20and%20Coping.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {AAAI Spring Symposium on Architectures for Modeling Emotion: Cross-disciplinary Foundations},
address = {Palo Alto, CA},
abstract = {Introduction: In our research, we have developed a general computational model of human emotion. The model attempts to account for both the factors that give rise to emotions as well as the wide-ranging impact emotions have on cognitive and behavioral responses. Emotions influence our beliefs, our decision-making and how we adapt our behavior to the world around us. While most apparent in moments of great stress, emotions sway even the mundane decisions we face in everyday life. Emotions also infuse our social relationships. Our interactions with each other are a source of many emotions and we have developed a range of behaviors that can communicate emotional information as well as an ability to recognize and be influenced by the emotional arousal of others. By virtue of their central role and wide influence, emotion arguably provides the means to coordinate the diverse mental and physical components required to respond to the world in a coherent fashion. (1st Paragraph)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
A Utility-Based Approach to Intention Recognition Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_utility-based_2004,
title = {A Utility-Based Approach to Intention Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Utility-Based%20Approach%20to%20Intention%20Recognition.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {New York, NY},
abstract = {Based on the assumption that a rational agent will adopt a plan that maximizes the expected utility, we present a utility-based approach to plan recognition problem in this paper. The approach explicitly takes the observed agent's preferences into consideration, and computes the estimated expected utilities of plans to disambiguate competing hypotheses. Online plan recognition is realized by incrementally using plan knowledge and observations to change state probabilities. We also discuss the work and compare it with other probabilistic models in the paper.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
Expressive Behaviors for Virtual Worlds Book Section
In: Life-Like Characters: Tools, Affective Functions, and Applications, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{marsella_expressive_2004,
title = {Expressive Behaviors for Virtual Worlds},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Expressive%20Behaviors%20for%20Virtual%20Worlds.pdf},
year = {2004},
date = {2004-06-01},
booktitle = {Life-Like Characters: Tools, Affective Functions, and Applications},
abstract = {A person's behavior provides signi⬚cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a wide range of cognitive and motor capabilities, a model of task-oriented emotional appraisal and socially situated planning, and a model of how emotions and coping impact physical behavior. We describe the key research issues and approach in each of these prior systems, as well as our integration and its initial implementation in a leadership training system.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Debevec, Paul; Gardner, Andrew; Tchou, Chris; Hawkins, Tim
Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 05.2004, 2004.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{debevec_postproduction_2004,
title = {Postproduction Re-Illumination of Live Action Using Time-Multiplexed Lighting},
author = {Paul Debevec and Andrew Gardner and Chris Tchou and Tim Hawkins},
url = {http://ict.usc.edu/pubs/Postproduction%20Re-Illumination%20of%20Live%20Action%20Using%20Time-Multiplexed%20Lighting.pdf},
year = {2004},
date = {2004-06-01},
number = {ICT TR 05.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this work, we present a technique for capturing a time-varying human performance in such a way that it can be re-illuminated in postproduction. The key idea is to illuminate the subject with a variety of rapidly changing time-multiplexed basis lighting conditions, and to record these lighting conditions with a fast enough video camera so that several or many different basis lighting conditions are recorded during the span of the final video's desired frame rate. In this poster we present two versions of such a system and propose plans for creating a complete, production-ready device.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Garg, Saurabh; Martinovski, Bilyana; Robinson, Susan; Stephan, Jens; Tetreault, Joel; Traum, David
Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{garg_evaluation_2004,
title = {Evaluation of Transcription and Annotation tools for a Multi-modal, Multi-party dialogue corpus},
author = {Saurabh Garg and Bilyana Martinovski and Susan Robinson and Jens Stephan and Joel Tetreault and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Transcription%20and%20Annotation%20tools%20for%20a%20Multi-modal,%20Multi-party%20dialogue%20corpus.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper reviews nine available transcription and annotation tools, considering in particular the special difï¬culties arising from transcribing and annotating multi-party, multi-modal dialogue. Tools are evaluated as to the ability to support the user's annotation scheme, ability to visualize the form of the data, compatibility with other tools, flexibility of data representation, and general user-friendliness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Robinson, Susan; Stephan, Jens
Evaluation of multi-party virtual reality dialogue interaction Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_evaluation_2004,
title = {Evaluation of multi-party virtual reality dialogue interaction},
author = {David Traum and Susan Robinson and Jens Stephan},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20multi-party%20virtual%20reality%20dialogue%20interaction.pdf},
year = {2004},
date = {2004-05-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {We describe a dialogue evaluation plan for a multi-character virtual reality training simulation. A multi-component evaluation plan is presented, including user satisfaction, intended task completion, recognition rate, and a new annotation scheme for appropriateness. Preliminary results for formative tests are also presented.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Tough Love Between Artificial Intelligence and Interactive Entertainment Proceedings Article
In: Proceedings of IE2004: Australian Workshop on Interactive Entertainment, Sydney, Australia, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_tough_2004,
title = {Tough Love Between Artificial Intelligence and Interactive Entertainment},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Tough%20Love%20Between%20Artificial%20Intelligence%20and%20Interactive%20Entertainment.PDF},
year = {2004},
date = {2004-02-01},
booktitle = {Proceedings of IE2004: Australian Workshop on Interactive Entertainment},
address = {Sydney, Australia},
abstract = {Burgeoning interest in Interactive Entertainment has led many computer scientists with roots in Artificial Intelligence toward the exploration of ideas in mass-market entertainment applications. Increasing numbers of workshops, journals, and funding programs for Interactive Entertainment indicate that AI researchers in this area have a good sense for following hot new trends, but are they vanguards of a fruitful science or misguided opportunists? In this IE2004 invited talk, I'll explore the relationship between AI research and the Interactive Entertainment field, from its seductive courtship through its rocky marriage, and offer some relationship advice for the future.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Martinovski, Bilyana; Garg, Saurabh; Stephan, Jens; Traum, David
Issues in corpus development for multi-party multi-modal task-oriented dialogue Proceedings Article
In: International Conference on Language Resources and Evaluation (LREC), Lisbon, Portugal, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_issues_2004,
title = {Issues in corpus development for multi-party multi-modal task-oriented dialogue},
author = {Susan Robinson and Bilyana Martinovski and Saurabh Garg and Jens Stephan and David Traum},
url = {http://ict.usc.edu/pubs/Issues%20in%20corpus%20development%20for%20multi-party%20multi-modal%20task-oriented%20dialogue.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Language Resources and Evaluation (LREC)},
address = {Lisbon, Portugal},
abstract = {This paper describes the development of a multi-modal corpus based on multi-party multi-task driven common goal oriented spoken language interaction. The data consists of approximately 10 hours of audio human simulation radio data and nearly 5 hours of video and audio face-to-face sessions between human trainees and virtual agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Hyeok-Soo; Gratch, Jonathan
A Planner-Independent Collaborative Planning Assistant Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 766–773, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kim_planner-independent_2004,
title = {A Planner-Independent Collaborative Planning Assistant},
author = {Hyeok-Soo Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Planner-Independent%20Collaborative%20Planning%20Assistant.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {766–773},
address = {New York, NY},
abstract = {This article introduces a novel approach to the problem of collaborative planning. We present a method that takes classical one-shot planning techniques - that take a fixed set of goals, initial state, and a domain theory - and adapts them to support the incremental, hierarchical and exploratory nature of collaborative planning that occurs between human planners, and that multi-agent planning systems attempt to support. This approach is planner-independent - in that it could be applied to any classical planning technique - and recasts the problem of collaborative planning as a search through a space of possible inputs to a classical planning system. This article outlines the technique and describes its application to the Mission Rehearsal Exercise, a multi-agent training system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Hobbs, Jerry R.
Formalizations of Commonsense Psychology Journal Article
In: AI Magazine, vol. 24, no. 5, pp. 49–62, 2004.
Abstract | Links | BibTeX | Tags: The Narrative Group
@article{gordon_formalizations_2004,
title = {Formalizations of Commonsense Psychology},
author = {Andrew S. Gordon and Jerry R. Hobbs},
url = {http://ict.usc.edu/pubs/Formalizations%20of%20Commonsense%20Psychology.pdf},
year = {2004},
date = {2004-01-01},
journal = {AI Magazine},
volume = {24},
number = {5},
pages = {49–62},
abstract = {The central challenge in commonsense knowledge representation research is to develop content theories that achieve a high degree of both competency and coverage. We describe a new methodology for constructing formal theories in commonsense knowledge domains that complements traditional knowledge representation approaches by first addressing issues of coverage. We show how a close examination of a very general task (strategic planning) leads to a catalog of the concepts and facts that must be encoded for general commonsense reasoning. These concepts are sorted into a manageable number of coherent domains, one of which is the representational area of commonsense human memory. We then elaborate on these concepts using textual corpus-analysis techniques, where the conceptual distinctions made in natural language are used to improve the definitions of the concepts that should be expressible in our formal theories. These representational areas are then analyzed using more traditional knowledge representation techniques, as demonstrated in this article by our treatment of commonsense human memory.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
Technical Details of a Domain-independent Framework for Modeling Emotion Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 04.2004, 2004.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@techreport{gratch_technical_2004,
title = {Technical Details of a Domain-independent Framework for Modeling Emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Technical%20Details%20of%20a%20Domain-independent%20Framework%20for%20Modeling%20Emotion.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 04.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This technical report elaborates on the technical details of the EMA model of emotional appraisal and coping. It should be seen as an appendix to the journal article on this topic (Gratch & Marsella, to appear)},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Mao, Wenji; Gratch, Jonathan
Decision-Theoretic Approach to Plan Recognition Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2004, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@techreport{mao_decision-theoretic_2004,
title = {Decision-Theoretic Approach to Plan Recognition},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Decision-Theoretic%20Approach%20to%20Plan%20Recognition.pdf},
year = {2004},
date = {2004-01-01},
number = {ICT TR 01.2004},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {In this report, first we give a survey of the work in plan recognition field, including the evolution of different approaches, their strength and weaknesses. Then we propose two decision-theoretic approaches to plan recognition problem, which explicitly take outcome utilities into consideration. One is an extension within the probabilistic reasoning framework, by adding utility nodes to belief nets. The other is based on maximizing the estimated expected utility of possible plan. Illustrative examples are given to explain the approaches. Finally, we compare the two approaches presented in the report and summarize the work.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Traum, David
Issues in Multiparty Dialogues Journal Article
In: Advances in Agent Communication, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{traum_issues_2004,
title = {Issues in Multiparty Dialogues},
author = {David Traum},
editor = {F. Dignum},
url = {http://ict.usc.edu/pubs/Issues%20in%20Multiparty%20Dialogues.pdf},
year = {2004},
date = {2004-01-01},
journal = {Advances in Agent Communication},
abstract = {This article examines some of the issues in representation of, processing, and automated agent participation in natural language dialgue, considering expansion from two-party dialogue to multi-party dialogue. These issues include some regarding the roles agents play in dialogue, interactive factors, and content management factors.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Mao, Wenji; Gratch, Jonathan
Social Judgment in Multiagent Interactions Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 210–217, New York, NY, 2004.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2004,
title = {Social Judgment in Multiagent Interactions},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Social%20Judgment%20in%20Multiagent%20Interactions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {210–217},
address = {New York, NY},
abstract = {Social judgment is a process of social explanation whereby one evaluates which entities deserve credit or blame for multi-agent activities. Such explanations are a key aspect of inference in a social environment and a model of this process can advance several design components of multi-agent systems. Social judgment underlies social planning, social learning, natural language pragmatics and computational model of emotion. Based on psychological attribution theory, this paper presents a computational approach to forming social judgment based on an agents causal knowledge and communicative interactions with other agents.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cao, Yong; Faloutsos, Petros; Kohler, Eddie; Pighin, Frédéric
Real-time Speech Motion Synthesis from Recorded Motions Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2004.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_real-time_2004,
title = {Real-time Speech Motion Synthesis from Recorded Motions},
author = {Yong Cao and Petros Faloutsos and Eddie Kohler and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Real-time%20Speech%20Motion%20Synthesis%20from%20Recorded%20Motions.pdf},
year = {2004},
date = {2004-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {Data-driven approaches have been successfully used for realistic visual speech synthesis. However, little effort has been devoted to real-time lip-synching for interactive applications. In particular, algorithms that are based on a graph of motions are notorious for their exponential complexity. In this paper, we present a greedy graph search algorithm that yields vastly superior performance and allows real-time motion synthesis from a large database of motions. The time complexity of the algorithm is linear with respect to the size of an input utterance. In our experiments, the synthesis time for an input sentence of average length is under a second.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Larson, Peter; Kratz, Kris; Thiebaux, Marcus; Bluestein, Brendon; Buckwalter, John Galen; Rizzo, Albert
Sex differences in mental rotation and spatial rotation in a virtual environment Journal Article
In: Neuropsychologia, vol. 42, pp. 555–562, 2004.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_sex_2004,
title = {Sex differences in mental rotation and spatial rotation in a virtual environment},
author = {Thomas D. Parsons and Peter Larson and Kris Kratz and Marcus Thiebaux and Brendon Bluestein and John Galen Buckwalter and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Sex%20differences%20in%20mental%20rotation%20and%20spatial%20rotation%20in%20a%20virtual%20environment.pdf},
doi = {10.1016/j.neuropsychologia.2003.08.014},
year = {2004},
date = {2004-01-01},
journal = {Neuropsychologia},
volume = {42},
pages = {555–562},
abstract = {The visuospatial ability referred to as mental rotation has been shown to produce one of the largest and most consistent sex differences, in favor of males, in the cognitive literature. The current study utilizes both a paper-and-pencil version of the mental rotations test (MRT) and a virtual environment for investigating rotational ability among 44 adult subjects. Results replicate sex differences traditionally seen on paper-and-pencil measures, while no sex effects were observed in the virtual environment. These ï¬ndings are discussed in terms of task demands and motor involvement. Sex differences were also seen in the patterns of correlations between rotation tasks and other neuropsychological measures. Current results suggest men may rely more on left hemisphere processing than women when engaged in rotational tasks. © 2003 Elsevier Ltd. All rights reserved.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert; Buckwalter, John Galen
Backpropagation and Regression: Comparative Utility for Neuropsychologists Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 26, no. 1, pp. 95–104, 2004.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_backpropagation_2004,
title = {Backpropagation and Regression: Comparative Utility for Neuropsychologists},
author = {Thomas D. Parsons and Albert Rizzo and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Backpropagation%20and%20Regression-%20Comparative%20Utility%20for%20Neuropsychologists.pdf},
year = {2004},
date = {2004-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {26},
number = {1},
pages = {95–104},
abstract = {The aim of this research was to compare the data analytic applicability of a backpropagated neural network with that of regression analysis. Thirty individuals between the ages of 64 and 86 (Mean age = 73.6; Mean years education = 15.4; % women = 50) participated in a study designed to validate a new test of spatial ability administered in virtual reality. As part of this project a standard neuropsychological battery was administered. Results from the multiple regression model (R2 = .21, p textbackslashtextbackslashtextbackslashtextbackslashtextless .28; Standard Error = 18.01) were compared with those of a backpropagated ANN (R2 = .39, p textbackslashtextbackslashtextbackslashtextbackslashtextless .02; Standard Error = 13.07). This 18% increase in prediction of a common neuropsychological problem demonstrated that an ANN has the potential to outperform a regression.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}