Publications
Search
Jones, Andrew; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Barreto, Joseph; Alexander, Oleg; Bolas, Mark; Debevec, Paul; Unger, Jonas
Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array Proceedings Article
In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18–26, Las Vegas, NV, 2016.
@inproceedings{jones_time-offset_2016,
title = {Time-Offset Conversations on a Life-Sized Automultiscopic Projector Array},
author = {Andrew Jones and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Joseph Barreto and Oleg Alexander and Mark Bolas and Paul Debevec and Jonas Unger},
url = {http://www.cv-foundation.org//openaccess/content_cvpr_2016_workshops/w16/papers/Jones_Time-Offset_Conversations_on_CVPR_2016_paper.pdf},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops},
pages = {18–26},
address = {Las Vegas, NV},
abstract = {We present a system for creating and displaying interactive life-sized 3D digital humans based on pre-recorded interviews. We use 30 cameras and an extensive list of questions to record a large set of video responses. Users access videos through a natural conversation interface that mimics face-to-face interaction. Recordings of answers, listening and idle behaviors are linked together to create a persistent visual image of the person throughout the interaction. The interview subjects are rendered using flowed light fields and shown life-size on a special rear-projection screen with an array of 216 video projectors. The display allows multiple users to see different 3D perspectives of the subject in proper relation to their viewpoints, without the need for stereo glasses. The display is effective for interactive conversations since it provides 3D cues such as eye gaze and spatial hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Grechkin, Timofey; Thomas, Jerald; Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Revisiting detection thresholds for redirected walking: combining translation and curvature gains Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, pp. 113–120, ACM Press, Anaheim, CA, 2016, ISBN: 978-1-4503-4383-1.
@inproceedings{grechkin_revisiting_2016,
title = {Revisiting detection thresholds for redirected walking: combining translation and curvature gains},
author = {Timofey Grechkin and Jerald Thomas and Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://dl.acm.org/citation.cfm?id=2931018},
doi = {10.1145/2931002.2931018},
isbn = {978-1-4503-4383-1},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
pages = {113–120},
publisher = {ACM Press},
address = {Anaheim, CA},
abstract = {Redirected walking enables the exploration of large virtual environments while requiring only a finite amount of physical space. Unfortunately, in living room sized tracked areas the effectiveness of common redirection algorithms such as Steer-to-Center is very limited. A potential solution is to increase redirection effectiveness by applying two types of perceptual manipulations (curvature and translation gains) simultaneously. This paper investigates how such combination may affect detection thresholds for curvature gain. To this end we analyze the estimation methodology and discuss selection process for a suitable estimation method. We then compare curvature detection thresholds obtained under different levels of translation gain using two different estimation methods: method of constant stimuli and Green’s maximum likelihood procedure. The data from both experiments shows no evidence that curvature gain detection thresholds were affected by the presence of translation gain (with test levels spanning previously estimated interval of undetectable translation gain levels). This suggests that in practice currently used levels of translation and curvature gains can be safely applied simultaneously. Furthermore, we present some evidence that curvature detection thresholds may be lower that previously reported. Our estimates indicate that users can be redirected on a circular arc with radius of either 11.6m or 6.4m depending on the estimation method vs. the previously reported value of 22m. These results highlight that the detection threshold estimates vary significantly with the estimation method and suggest the need for further studies to define efficient and reliable estimation methodology},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Yu, Xueming; Debevec, Paul
Optimal LED selection for multispectral lighting reproduction Proceedings Article
In: Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016, ACM, New York, NY, 2016, ISBN: 978-1-4503-4371-8.
@inproceedings{legendre_optimal_2016,
title = {Optimal LED selection for multispectral lighting reproduction},
author = {Chloe LeGendre and Xueming Yu and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=2945150},
doi = {10.1145/2945078.2945150},
isbn = {978-1-4503-4371-8},
year = {2016},
date = {2016-07-01},
booktitle = {Proceedings of the SIGGRAPH '16 ACM SIGGRAPH 2016},
publisher = {ACM},
address = {New York, NY},
abstract = {We demonstrate the sufficiency of using as few as five LEDs of distinct spectra for multispectral lighting reproduction and solve for the optimal set of five from 11 such commercially available LEDs. We leverage published spectral reflectance, illuminant, and camera spectral sensitivity datasets to show that two approaches of lighting reproduction, matching illuminant spectra directly and matching material color appearance observed by one or more cameras or a human observer, yield the same LED selections. Our proposed optimal set of five LEDs includes red, green, and blue with narrow emission spectra, along with white and amber with broader spectra.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul
Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture Book Section
In: Integrating Cognitive Architectures into Virtual Character Design, pp. 213 – 237, IGI Global, Hershey, PA, 2016, ISBN: 978-1-5225-0454-2.
@incollection{ustun_towards_2016,
title = {Towards Truly Autonomous Synthetic Characters with the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul Rosenbloom},
url = {http://services.igi-global.com/resolvedoi/resolve.aspx?doi=10.4018/978-1-5225-0454-2},
isbn = {978-1-5225-0454-2},
year = {2016},
date = {2016-06-01},
booktitle = {Integrating Cognitive Architectures into Virtual Character Design},
pages = {213 – 237},
publisher = {IGI Global},
address = {Hershey, PA},
abstract = {Realism is required not only for how synthetic characters look but also for how they behave. Many applications, such as simulations, virtual worlds, and video games, require computational models of intelligence that generate realistic and credible behavior for the participating synthetic characters. Sigma (Σ) is being built as a computational model of general intelligence with a long-term goal of understanding and replicating the architecture of the mind; i.e., the fixed structure underlying intelligent behavior. Sigma leverages probabilistic graphical models towards a uniform grand unification of not only traditional cognitive capabilities but also key non-cognitive aspects, creating unique opportunities for the construction of new kinds of non-modular behavioral models. These ambitions strive for the complete control of synthetic characters that behave as humanly as possible. In this paper, Sigma is introduced along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Swartout, William R.
Virtual Humans as Centaurs: Melding Real and Virtual Book Section
In: Virtual, Augmented and Mixed Reality, vol. 9740, pp. 356–359, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39906-5 978-3-319-39907-2.
@incollection{swartout_virtual_2016,
title = {Virtual Humans as Centaurs: Melding Real and Virtual},
author = {William R. Swartout},
url = {http://link.springer.com/10.1007/978-3-319-39907-2_34},
isbn = {978-3-319-39906-5 978-3-319-39907-2},
year = {2016},
date = {2016-06-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9740},
pages = {356–359},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Centaurs are man-machine teams that can work together on problems and can out-perform, either people or computers working alone in domains as varied as chess-playing and protein folding. But the centaur of Greek mythology was not a team, but rather a hybrid of man and horse with some of the characteristics of each. In this paper, we outline our efforts to build virtual humans, which might be considered hybrid centaurs, combining features of both people and machines. We discuss experimental evidence that shows that these virtual human hybrids can outperform both people and inanimate processes in some tasks such as medical interviewing.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nolin, Pierre; Stipanicic, Annie; Henry, Mylène; Lachapelle, Yves; Lussier-Desrochers, Dany; Rizzo, Albert “Skip”; Allain, Philippe
ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents Journal Article
In: Computers in Human Behavior, vol. 59, pp. 327–333, 2016, ISSN: 07475632.
@article{nolin_clinicavr_2016,
title = {ClinicaVR: Classroom-CPT: A virtual reality tool for assessing attention and inhibition in children and adolescents},
author = {Pierre Nolin and Annie Stipanicic and Mylène Henry and Yves Lachapelle and Dany Lussier-Desrochers and Albert “Skip” Rizzo and Philippe Allain},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0747563216300759},
doi = {10.1016/j.chb.2016.02.023},
issn = {07475632},
year = {2016},
date = {2016-06-01},
journal = {Computers in Human Behavior},
volume = {59},
pages = {327–333},
abstract = {Having garnered interest both in clinic and research areas, the Virtual Classroom (Rizzo et al., 2000) assesses children's attention in a virtual context. The Digital MediaWorks team (www.dmw.ca) has evolved the original basic classroom concept over a number of iterations to form the ClinicaVR Suite containing the Classroom-CPT as one of its components. The present study has three aims: investigate certain validity and reliability aspects of the tool; examine the relationship between performance in the virtual test and the attendant sense of presence and cybersickness experienced by participants; assess potential effects of gender and age on performance in the test. The study was conducted with 102 children and adolescents from Grade 2 to Grade 10. All participants were enrolled in a regular school program. Results support both concurrent and construct validity as well as temporal stability of ClinicaVR: Classroom-Continuous Performance Test (CPT). Gender exerted no effect on performance, while age did. The test did not cause much cybersickness. We recommend ClinicaVR: Classroom-CPT as an assessment tool for selective and sustained attention, and inhibition, in clinic and research domains.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Gainer, Alesia; Georgila, Kallirroi; Leuski, Anton; Shapiro, Ari; Traum, David
New Dimensions in Testimony Demonstration Proceedings Article
In: Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations, pp. 32–36, Association for Computational Linguistics, San Diego, California, 2016.
@inproceedings{artstein_new_2016,
title = {New Dimensions in Testimony Demonstration},
author = {Ron Artstein and Alesia Gainer and Kallirroi Georgila and Anton Leuski and Ari Shapiro and David Traum},
url = {http://www.aclweb.org/anthology/N16-3007},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Demonstrations},
pages = {32–36},
publisher = {Association for Computational Linguistics},
address = {San Diego, California},
abstract = {New Dimensions in Testimony is a prototype dialogue system that allows users to conduct a conversation with a real person who is not available for conversation in real time. Users talk to a persistent representation of Holocaust survivor Pinchas Gutter on a screen, while a dialogue agent selects appropriate responses to user utterances from a set of pre-recorded video statements, simulating a live conversation. The technology is similar to existing conversational agents, but to our knowledge this is the first system to portray a real person. The demonstration will show the system on a range of screens (from mobile phones to large TVs), and allow users to have individual conversations with Mr. Gutter.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Phan, Thai; Bolas, Mark; Krum, David M.
User Perceptions of a Virtual Human Over Mobile Video Chat Interactions Book Section
In: Human-Computer Interaction. Novel User Experiences, vol. 9733, pp. 107–118, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-39512-8 978-3-319-39513-5.
@incollection{kang_user_2016,
title = {User Perceptions of a Virtual Human Over Mobile Video Chat Interactions},
author = {Sin-Hwa Kang and Thai Phan and Mark Bolas and David M. Krum},
url = {http://download.springer.com/static/pdf/913/chp%253A10.1007%252F978-3-319-39513-5_10.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Fchapter%2F10.1007%2F978-3-319-39513-5_10&token2=exp=1474906977 acl=%2Fstatic%2Fpdf%2F913%2Fchp%25253A10.1007%25252F978-3-319-39513-5_10.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Fchapter%252F10.1007%252F978-3-319-39513-5_10* hmac=14d38ee320936bf1edfc65a0d3fcc0855c42e0baba46e0f3a9a81293698b8b68},
isbn = {978-3-319-39512-8 978-3-319-39513-5},
year = {2016},
date = {2016-06-01},
booktitle = {Human-Computer Interaction. Novel User Experiences},
volume = {9733},
pages = {107–118},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {We believe that virtual humans, presented over video chat services, such as Skype, and delivered using smartphones, can be an effective way to deliver innovative applications where social interactions are important, such as counseling and coaching. To explore this subject, we have built a hardware and software apparatus that allows virtual humans to initiate, receive, and interact over video calls using Skype or any similar service. With this platform, we conducted two experiments to investigate the applications and characteristics of virtual humans that interact over mobile video. In Experiment 1, we investigated user reactions to the physical realism of the background scene in which a virtual human was displayed. In Experiment 2, we examined how virtual characters can establish and maintain longer term relationships with users, using ideas from Social Exchange Theory to strengthen bonds between interactants. Experiment 2 involved repeated interactions with a virtual human over a period of time. Both studies used counseling-style interactions with users. The results demonstrated that males were more attracted socially to a virtual human that was presented over a realistic background than a featureless background while females were more socially attracted to a virtual human with a less realistic featureless background. The results further revealed that users felt the virtual human was a compassionate partner when they interacted with the virtual human over multiple calls, rather than just a single call.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Mizukami, Masahiro; Traum, David; Yoshino, Koichiro; Neubig, Graham; Nakamura, Satoshi
Word and Dialogue Act Entrainment Analysis based on User Profile Proceedings Article
In: Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence, Kitakyushu, Japan, 2016.
@inproceedings{mizukami_word_2016,
title = {Word and Dialogue Act Entrainment Analysis based on User Profile},
author = {Masahiro Mizukami and David Traum and Koichiro Yoshino and Graham Neubig and Satoshi Nakamura},
url = {https://kaigi.org/jsai/webprogram/2016/pdf/356.pdf},
year = {2016},
date = {2016-06-01},
booktitle = {Proceedings of The 30th Annual Conference of the Japanese Society for Artificial Intelligence},
address = {Kitakyushu, Japan},
abstract = {Patterns of dialogue act and word selection are observable in dialogue. Entrainment is the factor that might account for these patterns. We test the entrainment hypotheses using the switchboard corpus, comparing speech of different speakers from different parts of the dialogue, but also speech of the same speaker at different points. Our ⬚ndings replicate previous studies that dialogue participants converge toward each other in word choice, but we also investigate novel measures of entrainment of dialogue act selection, and word choice for speci⬚c dialogue acts. These studies inform a design for dialogue systems that would show human-like degrees of entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.
ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 26, no. 2, pp. 756–770, 2016, ISSN: 1560-4292, 1560-4306.
@article{nye_its_2016,
title = {ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem},
author = {Benjamin D. Nye},
url = {http://link.springer.com/10.1007/s40593-016-0098-8},
doi = {10.1007/s40593-016-0098-8},
issn = {1560-4292, 1560-4306},
year = {2016},
date = {2016-06-01},
journal = {International Journal of Artificial Intelligence in Education},
volume = {26},
number = {2},
pages = {756–770},
abstract = {Advanced learning technologies are reaching a new phase of their evolution where they are finally entering mainstream educational contexts, with persistent user bases. However, as AIED scales, it will need to follow recent trends in service-oriented and ubiquitous computing: breaking AIED platforms into distinct services that can be composed for different platforms (web, mobile, etc.) and distributed across multiple systems. This will represent a move from learning platforms to an ecosystem of interacting learning tools. Such tools will enable new opportunities for both user-adaptation and experimentation. Traditional macro-adaptation (problem selection) and step-based adaptation (hints and feedback) will be extended by meta-adaptation (adaptive system selection) and micro-adaptation (event-level optimization). The existence of persistent and widely-used systems will also support new paradigms for experimentation in education, allowing researchers to understand interactions and boundary conditions for learning principles. New central research questions for the field will also need to be answered due to these changes in the AIED landscape.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mühlberger, Andreas; Jekel, K.; Probst, Thomas; Schecklmann, Martin; Conzelmann, A.; Andreatta, M.; Rizzo, A. A.; Pauli, P.; Romanos, M.
The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test Journal Article
In: Journal of attention disorders, 2016.
@article{muhlberger_influence_2016,
title = {The Influence of Methylphenidate on Hyperactivity and Attention Deficits in Children With ADHD: A Virtual Classroom Test},
author = {Andreas Mühlberger and K. Jekel and Thomas Probst and Martin Schecklmann and A. Conzelmann and M. Andreatta and A. A. Rizzo and P. Pauli and M. Romanos},
url = {http://journals.sagepub.com/doi/abs/10.1177/1087054716647480},
doi = {10.1177/1087054716647480},
year = {2016},
date = {2016-05-01},
journal = {Journal of attention disorders},
abstract = {This study compares the performance in a continuous performance test within a virtual reality classroom (CPT-VRC) between medicated children with ADHD, unmedicated children with ADHD, and healthy children. Method:N = 94 children with ADHD (n = 26 of them received methylphenidate and n = 68 were unmedicated) and n = 34 healthy children performed the CPT-VRC. Omission errors, reaction time/variability, commission errors, and body movements were assessed. Furthermore, ADHD questionnaires were administered and compared with the CPT-VRC measures. Results: The unmedicated ADHD group exhibited more omission errors and showed slower reaction times than the healthy group. Reaction time variability was higher in the unmedicated ADHD group compared with both the healthy and the medicated ADHD group. Omission errors and reaction time variability were associated with inattentiveness ratings of experimenters. Head movements were correlated with hyperactivity ratings of parents and experimenters. Conclusion: Virtual reality is a promising technology to assess ADHD symptoms in an ecologically valid environment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Feng, Andrew; Alexander, Oleg; Fyffe, Graham; Debevec, Paul; Ichikari, Ryosuke; Li, Hao; Olszewski, Kyle; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshape Modeling from RGB-D Sensors Proceedings Article
In: Proceedings of the 29th International Conference on Computer Animation and Social Agents, pp. 121–129, ACM Press, Geneva, Switzerland, 2016, ISBN: 978-1-4503-4745-7.
@inproceedings{casas_rapid_2016,
title = {Rapid Photorealistic Blendshape Modeling from RGB-D Sensors},
author = {Dan Casas and Andrew Feng and Oleg Alexander and Graham Fyffe and Paul Debevec and Ryosuke Ichikari and Hao Li and Kyle Olszewski and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2915926.2915936},
doi = {10.1145/2915926.2915936},
isbn = {978-1-4503-4745-7},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 29th International Conference on Computer Animation and Social Agents},
pages = {121–129},
publisher = {ACM Press},
address = {Geneva, Switzerland},
abstract = {Creating and animating realistic 3D human faces is an important element of virtual reality, video games, and other areas that involve interactive 3D graphics. In this paper, we propose a system to generate photorealistic 3D blendshape-based face models automatically using only a single consumer RGB-D sensor. The capture and processing requires no artistic expertise to operate, takes 15 seconds to capture and generate a single facial expression, and approximately 1 minute of processing time per expression to transform it into a blendshape model. Our main contributions include a complete end-to-end pipeline for capturing and generating photorealistic blendshape models automatically and a registration method that solves dense correspondences between two face scans by utilizing facial landmarks detection and optical flows. We demonstrate the effectiveness of the proposed method by capturing different human subjects with a variety of sensors and puppeteering their 3D faces with real-time facial performance retargeting. The rapid nature of our method allows for just-in-time construction of a digital face. To that end, we also integrated our pipeline with a virtual reality facial performance capture system that allows dynamic embodiment of the generated faces despite partial occlusion of the user’s real face by the head-mounted display.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Nazari, Zahra; Johnson, Emmanuel
The Misrepresentation Game: How to win at negotiation while seeming like a nice guy Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 728–737, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{gratch_misrepresentation_2016,
title = {The Misrepresentation Game: How to win at negotiation while seeming like a nice guy},
author = {Jonathan Gratch and Zahra Nazari and Emmanuel Johnson},
url = {http://dl.acm.org/citation.cfm?id=2937031},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {728–737},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Recently, interest has grown in agents that negotiate with people: to teach negotiation, to negotiate on behalf of people, and as a chal-lenge problem to advance artificial social intelligence. Humans ne-gotiate differently from algorithmic approaches to negotiation: peo-ple are not purely self-interested but place considerable weight on norms like fairness; people exchange information about their men-tal state and use this to judge the fairness of a social exchange; and people lie. Here, we focus on lying. We present an analysis of how people (or agents interacting with people) might optimally lie (maximally benefit themselves) while maintaining the illusion of fairness towards the other party. In doing so, we build on concepts from game theory and the preference-elicitation literature, but ap-ply these to human, not rational, behavior. Our findings demon-strate clear benefits to lying and provide empirical support for a heuristic – the “fixed-pie lie” – that substantially enhances the effi-ciency of such deceptive algorithms. We conclude with implica-tions and potential defenses against such manipulative techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosoff, Heather; John, Richard S.
Semi-Automated Construction of Decision-Theoretic Models of Human Behavior Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 891–899, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{pynadath_semi-automated_2016,
title = {Semi-Automated Construction of Decision-Theoretic Models of Human Behavior},
author = {David V. Pynadath and Heather Rosoff and Richard S. John},
url = {http://dl.acm.org/citation.cfm?id=2937055},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {891–899},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Multiagent social simulation provides a powerful mechanism for policy makers to understand the potential outcomes of their decisions before implementing them. However, the value of such simulations depends on the accuracy of their underlying agent models. In this work, we present a method for automatically exploring a space of decision-theoretic models to arrive at a multiagent social simulation that is consistent with human behavior data. We start with a factored Partially Observable Markov Decision Process (POMDP) whose states, actions, and reward capture the questions asked in a survey from a disaster response scenario. Using input from domain experts, we construct a set of hypothesized dependencies that may or may not exist in the transition probability function. We present an algorithm to search through each of these hypotheses, evaluate their accuracy with respect to the data, and choose the models that best re ect the observed behavior, including individual di⬚erences. The result is a mechanism for constructing agent models that are grounded in human behavior data, while still being able to support hypothetical reasoning that is the main advantage of multiagent social simulation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Graham, Paul; Tunwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Journal Article
In: Computer Graphics Forum, 2016, ISSN: 1467-8659.
@article{fyffe_near-instant_2016,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Graham Fyffe and Paul Graham and Borom Tunwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://onlinelibrary.wiley.com/doi/10.1111/cgf.12837/abstract},
doi = {10.1111/cgf.12837},
issn = {1467-8659},
year = {2016},
date = {2016-05-01},
journal = {Computer Graphics Forum},
abstract = {We present a near-instant method for acquiring facial geometry and reflectance using a set of commodity DSLR cameras and flashes. Our setup consists of twenty-four cameras and six flashes which are fired in rapid succession with subsets of the cameras. Each camera records only a single photograph and the total capture time is less than the 67ms blink reflex. The cameras and flashes are specially arranged to produce an even distribution of specular highlights on the face. We employ this set of acquired images to estimate diffuse color, specular intensity, specular exponent, and surface orientation at each point on the face. We further refine the facial base geometry obtained from multi-view stereo using estimated diffuse and specular photometric information. This allows final submillimeter surface mesostructure detail to be obtained via shape-from-specularity. The final system uses commodity components and produces models suitable for authoring high-quality digital human characters.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Mell, Johnathan; Gratch, Jonathan
IAGO: Interactive Arbitration Guide Online Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1510–1512, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{mell_iago_2016,
title = {IAGO: Interactive Arbitration Guide Online},
author = {Johnathan Mell and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937230},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1510–1512},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Automated negotiation between two agents has been the subject of much research focused on optimization and efficiency. Howev-er, human-agent negotiation represents a field in which real-world considerations can be more fully explored. Furthermore, teaching negotiation and other interpersonal skills requires long periods of practice with open-ended dialogues and partners. The API pre-sented in this paper represents a novel platform on which to con-duct human-agent research and facilitate teaching negotiation tactics in a longitudinal way. We present a prototype demonstra-tion that is real-time, rapidly distributable, and allows more ac-tions than current platforms of negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chollet, Mathieu; Wortwein, Torsten; Morency, Louis–Philippe; Scherer, Stefan
A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety Proceedings Article
In: Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation, pp. 488–495, European Language Resources Association, Portoroz, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{chollet_multimodal_2016,
title = {A Multimodal Corpus for the Assessment of Public Speaking Ability and Anxiety},
author = {Mathieu Chollet and Torsten Wortwein and Louis–Philippe Morency and Stefan Scherer},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/599_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the LREC 2016, Tenth International Conference on Language Resources and Evaluation},
pages = {488–495},
publisher = {European Language Resources Association},
address = {Portoroz, Slovenia},
abstract = {The ability to efficiently speak in public is an essential asset for many professions and is used in everyday life. As such, tools enabling the improvement of public speaking performance and the assessment and mitigation of anxiety related to public speaking would be very useful. Multimodal interaction technologies, such as computer vision and embodied conversational agents, have recently been investigated for the training and assessment of interpersonal skills. Once central requirement for these technologies is multimodal corpora for training machine learning models. This paper addresses the need of these technologies by presenting and sharing a multimodal corpus of public speaking presentations. These presentations were collected in an experimental study investigating the potential of interactive virtual audiences for public speaking training. This corpus includes audio-visual data and automatically extracted features, measures of public speaking anxiety and personality, annotations of participants’ behaviors and expert ratings of behavioral aspects and overall performance of the presenters. We hope this corpus will help other research teams in developing tools for supporting public speaking training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2008
Parsons, Thomas D.; Rizzo, Albert
Initial Validation of a Virtual Environment for Assessment of Memory Functioning: Virtual Reality Cognitive Performance Assessment Test Journal Article
In: CyberPsychology and Behavior, vol. 11, no. 1, pp. 16–24, 2008.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_initial_2008,
title = {Initial Validation of a Virtual Environment for Assessment of Memory Functioning: Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Initial%20Validation%20of%20a%20Virtual%20Environment%20for%20Assessment%20of%20Memory%20Functioning-%20Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2008},
date = {2008-01-01},
journal = {CyberPsychology and Behavior},
volume = {11},
number = {1},
pages = {16–24},
abstract = {The current project is an initial attempt at validating the Virtual Reality Cognitive Performance Assessment Test (VRCPAT), a virtual environment–based measure of learning and memory. To examine convergent and discriminant validity, a multitrait–multimethod matrix was used in which we hypothesized that the VRCPAT’s total learning and memory scores would correlate with other neuropsychological measures involving learning and memory but not with measures involving potential confounds (i.e., executive functions; attention; processing speed; and verbal fluency). Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 15-minute VRCPAT battery and a 1.5-hour in-person neuropsychological assessment were conducted with a sample of 30 healthy adults, between the ages of 21 and 36, that included equivalent distributions of men and women from ethnically diverse populations. Results supported both convergent and discriminant validity. That is, findings suggest that the VRCPAT measures a capacity that is (a) consistent with that assessed by traditional paper-and-pencil measures involving learning and memory and (b) inconsistent with that assessed by traditional paper-and-pencil measures assessing neurocognitive domains traditionally assumed to be other than learning and memory. We conclude that the VRCPAT is a valid test that provides a unique opportunity to reliably and efficiently study memory function within an ecologically valid environment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Morie, Jacquelyn; Verhulsdonck, Gustav
Body/Persona/Action! Emerging Non-anthropomorphic Communication and Interaction in Virtual Worlds Proceedings Article
In: Proceedings of the ACE 2008 ACM International Conference on Advances in Computer Entertainment Technology, Yokohama, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_bodypersonaaction_2008,
title = {Body/Persona/Action! Emerging Non-anthropomorphic Communication and Interaction in Virtual Worlds},
author = {Jacquelyn Morie and Gustav Verhulsdonck},
url = {http://ict.usc.edu/pubs/Body%20Persona%20Action%20Emerging%20Non-anthropomorphic%20Communication%20and%20Interaction%20in%20Virtual%20Worlds.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the ACE 2008 ACM International Conference on Advances in Computer Entertainment Technology},
address = {Yokohama, Japan},
abstract = {Avatars are traditionally understood as representing their human counterpart in virtual contexts by closely mimicking their real world physical characteristics. A new approach to designing avatars around non-anthropomorphic (non-human) characteristics currently questions the use of anthropomorphic principles and expands the use of avatars for virtual world interaction and communication. This paper provides a brief history of non-anthropomorphic avatars, with a focus on exploring the current use of such avatars in virtual worlds. In order to explain the shift in degree of anthropomorphism, we discuss Goffman's theory of symbolic interactionism, which holds that the self is constructed as a persona through social performance and relates identity to social behavior rather than appearance. Since non-anthropomorphic avatars are persistent characters engaged in a prolonged performance in virtual worlds, their use also may motivate emerging social mores, politics and ideologies. This paper argues that such avatar species create new social interactions and modes of communication that may signal interesting directions for future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2007
Morie, Jacquelyn Ford
Performing in (virtual) spaces: embodiment and being in virtual environments Journal Article
In: International Journal of Performance Arts and Digital Media, vol. 3, no. 2-3, pp. 123–138, 2007.
Abstract | Links | BibTeX | Tags:
@article{morie_performing_2007,
title = {Performing in (virtual) spaces: embodiment and being in virtual environments},
author = {Jacquelyn Ford Morie},
url = {http://ict.usc.edu/pubs/Performing%20in%20(virtual)%20spaces%20-%20embodiment%20and%20being%20in%20virtual%20environments.pdf},
year = {2007},
date = {2007-12-01},
journal = {International Journal of Performance Arts and Digital Media},
volume = {3},
number = {2-3},
pages = {123–138},
abstract = {This paper focuses on how the body has been recontextualised in the age of digital technology, especially through the phenomenon of Virtual Reality, and specifically on fully immersive VR environments made as art or performative installations. It discusses the progresstextbackslashtextbackslashtextbackslashtextbackslashtextbackslashion in form and function from other digital media or 'cybermedia' to fully immersive virtual environments (VEs). This paper attempts to explicate the specialised and intrinsic qualities of 'Being' in immersive VEs, and how it impacts both the experience of the embodied person in the virtual environment, and our thinking about everyday reality. The unique state of Being in immersive VEs has created a paradigm shift in what humans are now able to experience, and affects how we understand our embodied selves in an increasingly digital world. Because of this, the contributions of visual and performance artists to VE’s continued development is key to how we will know and comprehend ourselves in the near and far future as creatures existing in both the physical and the digital domains. The paper draws upon twenty years as a professional Virtual Reality 'maker' who has trained in both Computer Science and in Art, and finds fascinating affinities between these disciplines in the space of the VE where people and performers interact in new embodied modalities.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Swartout, William; Traum, David; Marsella, Stacy C.; Piepol, Diane
Building Interactive Virtual Humans for Training Environments Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), Orlando, FL, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_building_2007,
title = {Building Interactive Virtual Humans for Training Environments},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and William Swartout and David Traum and Stacy C. Marsella and Diane Piepol},
url = {http://ict.usc.edu/pubs/Building%20Interactive%20Virtual%20Humans%20for%20Training%20Environments.pdf},
year = {2007},
date = {2007-11-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
address = {Orlando, FL},
abstract = {There is a great need in the Joint Forces to have human to human interpersonal training for skills such as negotiation, leadership, interviewing and cultural training. Virtual environments can be incredible training tools if used properly and used for the correct training application. Virtual environments have already been very successful in training Warfighters how to operate vehicles and weapons systems. At the Institute for Creative Technologies (ICT) we have been exploring a new question: can virtual environments be used to train Warfighters in interpersonal skills such as negotiation, tactical questioning and leadership that are so critical for success in the contemporary operating environment? Using embodied conversational agents to create this type of training system has been one of the goals of the Virtual Humans project at the institute. ICT has a great deal of experience building complex, integrated and immersive training systems that address the human factor needs for training experiences. This paper will address the research, technology and value of developing virtual humans for training environments. This research includes speech recognition, natural language understanding & generation, dialogue management, cognitive agents, emotion modeling, question response managers, speech generation and non-verbal behavior. Also addressed will be the diverse set of training environments we have developed for the system, from single computer laptops to multi-computer immersive displays to real and virtual integrated environments. This paper will also discuss the problems, issues and solutions we encountered while building these systems. The paper will recount subject testing we have performed in these environments and results we have obtained from users. Finally the future of this type of Virtual Humans technology and training applications will be discussed.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Tortell, Rebecca; Williams, Josh
Would You Like to Play a Game? Experience and Expectation in Game-Based Learning Environments Book Section
In: Computer Games and Team and Individual Learning, Amsterdam, The Netherlands, 2007.
@incollection{morie_would_2007,
title = {Would You Like to Play a Game? Experience and Expectation in Game-Based Learning Environments},
author = {Jacquelyn Morie and Rebecca Tortell and Josh Williams},
year = {2007},
date = {2007-11-01},
booktitle = {Computer Games and Team and Individual Learning},
address = {Amsterdam, The Netherlands},
abstract = {We present results from a series of experiments that looked at how previous experience and immediate priming affect a user's arousal state, performance and memory in a virtual environment used for training. We found that people's game play experience had effects on these measures, and that if participants expected the environment to be a game, they approached it with expectations that were not always conducive to optimal training. We suggest that the type of game being used for training will have the best outcome if users are familiar with that mode and have the appropriate schema to approach the training.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew S.; Cao, Yong; Swanson, Reid
Automated Story Capture From Internet Weblogs Proceedings Article
In: Proceedings of the 4th International Conference on Knowledge Capture, Whistler, BC, 2007.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_automated_2007,
title = {Automated Story Capture From Internet Weblogs},
author = {Andrew S. Gordon and Yong Cao and Reid Swanson},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Capture%20From%20Internet%20Weblogs.pdf},
year = {2007},
date = {2007-10-01},
booktitle = {Proceedings of the 4th International Conference on Knowledge Capture},
address = {Whistler, BC},
abstract = {mong the most interesting ways that people share knowledge is through the telling of stories, i.e. first-person narratives about real life experiences. Millions of these stories appear in Internet weblogs, offering a potentially valuable resource for future knowledge management and training applications. In this paper we describe efforts to automatically capture stories from Internet weblogs by extracting them using statistical text classification techniques. We evaluate the precision and recall performance of competing approaches. We describe the large-scale application of story extraction technology to Internet weblogs, producing a corpus of stories with over a billion words.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Oh, Sejin; Gratch, Jonathan; Woontack, Woo
Explanatory Style for Socially Interactive Agents Proceedings Article
In: Lecture Notes in Computer Science, Lisbon, Portugal, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{oh_explanatory_2007,
title = {Explanatory Style for Socially Interactive Agents},
author = {Sejin Oh and Jonathan Gratch and Woo Woontack},
url = {http://ict.usc.edu/pubs/Explanatory%20Style%20for%20Socially%20Interactive%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Computer Science},
address = {Lisbon, Portugal},
abstract = {Recent years have seen an explosion of interest in computational models of socio-emotional processes, both as a mean to deepen understanding of human behavior and as a mechanism to drive a variety of training and entertainment applications. In contrast with work on emotion, where research groups have developed detailed models of emotional processes, models of personality have emphasized shallow surface behavior. Here, we build on computational appraisal models of emotion to better characterize dispositional differences in how people come to understand social situations. Known as explanatory style, this dispositional factor plays a key role in social interactions and certain socio-emotional disorders, such as depression. Building on appraisal and attribution theories, we model key conceptual variables underlying the explanatory style, and enable agents to exhibit different explanatory tendencies according to their personalities. We describe an interactive virtual environment that uses the model to allow participants to explore individual differences in the explanation of social events, with the goal of encouraging the development of perspective taking and emotion-regulatory skills.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Roque, Antonio; Leuski, Anton; Georgiou, Panayiotis G.; Gerten, Jillian; Martinovski, Bilyana; Narayanan, Shrikanth; Robinson, Susan; Vaswani, Ashish
Hassan: A Virtual Human for Tactical Questioning Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_hassan_2007,
title = {Hassan: A Virtual Human for Tactical Questioning},
author = {David Traum and Antonio Roque and Anton Leuski and Panayiotis G. Georgiou and Jillian Gerten and Bilyana Martinovski and Shrikanth Narayanan and Susan Robinson and Ashish Vaswani},
url = {http://ict.usc.edu/pubs/Hassan-%20A%20Virtual%20Human%20for%20Tactical%20Questioning%20.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present Hassan, a virtual human who engages in Tactical Questioning dialogues. We describe the tactical questioning domain, the motivation for this character, the speciï¬c architecture and present brief examples and an evaluation.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fron, Janine; Fullerton, Tracy; Morie, Jacquelyn; Pearce, Celia
The Hegemony of Play Proceedings Article
In: Proceedings of DiGRA: Situated Play, Tokyo, Japan, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{fron_hegemony_2007,
title = {The Hegemony of Play},
author = {Janine Fron and Tracy Fullerton and Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/The%20Hegemony%20of%20Play.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Proceedings of DiGRA: Situated Play},
address = {Tokyo, Japan},
abstract = {In this paper, we introduce the concept of a "Hegemony of Play," to critique the way in which a complex layering of technological, commercial and cultural power structures have dominated the development of the digital game industry over the past 35 years, creating an entrenched status quo which ignores the needs and desires of "minority" players such as women and "non-gamers," Who in fact represent the majority of the population. Drawing from the history of pre-digital games, we demonstrate that these practices have "narrowed the playing field," and contrary to conventional wisdom, have actually hindered, rather than boosted, its commercial success. We reject the inevitability of these power structures, and urge those in game studies to "step up to the plate" and take a more proactive stance in questioning and critiquing the status of the Hegemony of Play.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lance, Brent; Marsella, Stacy C.
Emotionally Expressive Head and Body Movement During Gaze Shifts Proceedings Article
In: 7th International Conference on Intelligent Virtual Agents (IVA 2007), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation
@inproceedings{lance_emotionally_2007,
title = {Emotionally Expressive Head and Body Movement During Gaze Shifts},
author = {Brent Lance and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Emotionally%20Expressive%20Head%20and%20Body%20Movement%20During%20Gaze%20Shifts.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {7th International Conference on Intelligent Virtual Agents (IVA 2007)},
address = {Paris, France},
abstract = {The current state of the art virtual characters fall far short of characters produced by skilled animators. One reason for this is that the physical behaviors of virtual characters do not express the emotions and attitudes of the character adequately. A key deficiency possessed by virtual characters is that their gaze behavior is not emotionally expressive. This paper describes work on expressing emotion through head movement and body posture during gaze shifts, with intent to integrate a model of emotionally expressive eye movement into this work in the future. The paper further describes an evaluation showing that users can recognize the emotional states generated by the model.},
keywords = {Social Simulation},
pubstate = {published},
tppubtype = {inproceedings}
}
Roque, Antonio; Traum, David
A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents Proceedings Article
In: 8th SIGdial Workshop on Discourse and Dialogue, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_model_2007,
title = {A Model of Compliance and Emotion for Potentially Adversarial Dialogue Agents},
author = {Antonio Roque and David Traum},
url = {http://ict.usc.edu/pubs/A%20Model%20of%20Compliance%20and%20Emotion%20for%20Potentially%20Adversarial%20Dialogue%20%20Agents.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {8th SIGdial Workshop on Discourse and Dialogue},
address = {Antwerp, Belgium},
abstract = {We present a model of compliance, for domains in which a dialogue agent may become adversarial. This model includes a set of emotions and a set of levels of compliance, and strategies for changing these.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jonsdottir, Gudny Ragna; Gratch, Jonathan; Fast, Edward; Thórisson, Kristinn R.
Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jonsdottir_fluid_2007,
title = {Fluid Semantic Back-Channel Feedback in Dialogue: Challenges & Progress},
author = {Gudny Ragna Jonsdottir and Jonathan Gratch and Edward Fast and Kristinn R. Thórisson},
url = {http://ict.usc.edu/pubs/Fluid%20Semantic%20Back-Channel%20Feedback%20in%20Dialogue-%20Challenges%20&%20Progress.pdf},
year = {2007},
date = {2007-09-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
address = {Paris, France},
abstract = {Participation in natural, real-time dialogue calls for behaviors supported by perception-action cycles from around 100 msec and up. Generating certain kinds of such behaviors, namely envelope feedback, has been possible since the early 90s. Real-time backchannel feedback related to the content of a dialogue has been more difficult to achieve. In this paper we describe our progress in allowing virtual humans to give rapid within-utterance content-specific feedback in real-time dialogue. We present results from human-subject studies of content feedback, where results show that content feedback to a particular phrase or word in human-human dialogue comes 560-2500 msec from the phrase's onset, 1 second on average. We also describe a system that produces such feedback with an autonomous agent in limited topic domains, present performance data of this agent in human-agent interactions experiments and discuss technical challenges in light of the observed human-subject data.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
An Interactive 360° Light Field Display Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_interactive_2007,
title = {An Interactive 360° Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
abstract = {While a great deal of computer generated imagery is modeled and rendered in 3D, the vast majority of this 3D imagery is shown on 2D displays. Various forms of 3D displays have been contemplated and constructed for at least one hundred years [Lippman 1908], but only recent evolutions in digital capture, computation, and display have made functional and practical 3D displays possible.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Chabert, Charles-Felix; Bolas, Mark; Peers, Pieter; Debevec, Paul
A system for high-resolution face scanning based on polarized spherical illumination Proceedings Article
In: SIGGRAPH, San Diego, CA, 2007.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{ma_system_2007,
title = {A system for high-resolution face scanning based on polarized spherical illumination},
author = {Wan-Chun Ma and Tim Hawkins and Charles-Felix Chabert and Mark Bolas and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20system%20for%20high-resolution%20face%20scanning%20based%20on%20polarized%20spherical%20illumination.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {SIGGRAPH},
address = {San Diego, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Ai, Hua; Roque, Antonio; Leuski, Anton; Traum, David
Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System Proceedings Article
In: Proceedings of the 10th Interspeech Conference, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ai_using_2007,
title = {Using Information State to Improve Dialogue Move Identification in a Spoken Dialogue System},
author = {Hua Ai and Antonio Roque and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/Using%20Information%20State%20to%20Improve%20Dialogue%20Move%20Identification%20in%20a%20Spoken%20Dialogue%20System.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Proceedings of the 10th Interspeech Conference},
address = {Antwerp, Belgium},
abstract = {In this paper we investigate how to improve the performance of a dialogue move and parameter tagger for a taskoriented dialogue system using the information-state approach. We use a corpus of utterances and information states from an implemented system to train and evaluate a tagger, and then evaluate the tagger in an on-line system. Use of information state context is shown to improve performance of the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Traum, David
Creating Spoken Dialogue Characters from Corpora without Annotations Proceedings Article
In: Interspeech 2007, Antwerp, Belgium, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_creating_2007,
title = {Creating Spoken Dialogue Characters from Corpora without Annotations},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20Spoken%20Dialogue%20Characters%20from%20Corpora%20without%20Annotations%20.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {Interspeech 2007},
address = {Antwerp, Belgium},
abstract = {Virtual humans are being used in a number of applications, including simulation-based training, multi-player games, and museum kiosks. Natural language dialogue capabilities are an essential part of their human-like persona. These dialogue systems have a goal of being believable and generally have to operate within the bounds of their restricted domains. Most dialogue systems operate on a dialogue-act level and require extensive annotation efforts. Semantic annotation and rule authoring have long been known as bottlenecks for developing dialogue systems for new domains. In this paper, we investigate several dialogue models for virtual humans that are trained on an unannotated human-human corpus. These are inspired by information retrieval and work on the surface text level. We evaluate these in text-based and spoken interactions and also against the upper baseline of human-human dialogues.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; McDowall, Ian; Yamada, Hideshi; Bolas, Mark; Debevec, Paul
Rendering for an Interactive 360 Degree Light Field Display Proceedings Article
In: ACM SIGGRAPH conference proceedings, San Diego, CA, 2007.
Abstract | Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_rendering_2007,
title = {Rendering for an Interactive 360 Degree Light Field Display},
author = {Andrew Jones and Ian McDowall and Hideshi Yamada and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/Rendering%20for%20an%20Interactive%20360%20Light%20Field%20Display.pdf},
year = {2007},
date = {2007-08-01},
booktitle = {ACM SIGGRAPH conference proceedings},
address = {San Diego, CA},
abstract = {We describe a set of rendering techniques for an autostereoscopic light field display able to present interactive 3D graphics to multiple simultaneous viewers 360 degrees around the display. The display consists of a high-speed video projector, a spinning mirror covered by a holographic diffuser, and FPGA circuitry to decode specially rendered DVI video signals. The display uses a standard programmable graphics card to render over 5,000 images per second of interactive 3D graphics, projecting 360-degree views with 1.25 degree separation up to 20 updates per second. We describe the system's projection geometry and its calibration process, and we present a multiple-center-of-projection rendering technique for creating perspective-correct images from arbitrary viewpoints around the display. Our projection technique allows correct vertical perspective and parallax to be rendered for any height and distance when these parameters are known, and we demonstrate this effect with interactive raster graphics using a tracking system to measure the viewer's height and distance. We further apply our projection technique to the display of photographed light fields with accurate horizontal and vertical parallax. We conclude with a discussion of the display's visual accommodation performance and discuss techniques for displaying color imagery.},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Sagae, Kenji; Tsujii, Jun
Dependency parsing and domain adaptation with data-driven LR models and parser ensembles Proceedings Article
In: Proceedings of the CoNLL 2007 Shared Task. Joint Conferences on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{sagae_dependency_2007,
title = {Dependency parsing and domain adaptation with data-driven LR models and parser ensembles},
author = {Kenji Sagae and Jun Tsujii},
url = {http://ict.usc.edu/pubs/Dependency%20Parsing%20and%20Domain%20Adaptation%20with%20LR%20Models%20and%20Parser%20Ensembles.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {Proceedings of the CoNLL 2007 Shared Task. Joint Conferences on Empirical Methods in Natural Language Processing and Computational Natural Language Learning},
address = {Prague, Czech Republic},
abstract = {We present a data-driven variant of the LR algorithm for dependency parsing, and extend it with a best-first search for probabilistic generalized data-driven LR dependency parsing. Parser actions are determined by a machine learning component, based on features that represent the current state of the parser. We apply this parsing framework to both tracks of the CoNLL 2007 shared task on dependency parsing, in each case taking advantage of multiple models trained with different learners. In the multilingual track, we train three data-driven LR models for each of the ten languages, and combine the analyses obtained with each individual model using a maximum spanning tree voting scheme. In the domain adaptation track, we use two models to parse unlabeled data in the target domain to supplement the labeled training set in the source domain, in a scheme similar to one iteration of co-training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Hartholt, Arno; Gratch, Jonathan; Traum, David; Marsella, Stacy C.; Swartout, William
The More the Merrier: Multi-Party Negotiation with Virtual Humans Proceedings Article
In: AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence, pp. 1970–1971, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_more_2007,
title = {The More the Merrier: Multi-Party Negotiation with Virtual Humans},
author = {Patrick G. Kenny and Arno Hartholt and Jonathan Gratch and David Traum and Stacy C. Marsella and William Swartout},
url = {http://ict.usc.edu/pubs/The%20More%20the%20Merrier-%20Multi-Party%20Negotiation%20with%20Virtual%20Humans.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {AAAI Conference On Artificial Intelligence; Proceedings of the 22nd National Conference on Artificial Intelligence},
volume = {2},
pages = {1970–1971},
abstract = {The goal of the Virtual Humans Project at the University of Southern California�s Institute for Creative Technologies is to enrich virtual training environments with virtual humans � autonomous agents that support face-to-face interaction with trainees in a variety of roles � through bringing together many different areas of research including speech recognition, natural language understanding, dialogue management, cognitive modeling, emotion modeling, non-verbal behavior and speech and knowledge management. The demo at AAAI will focus on our work using virtual humans to train negotiation skills. Conference attendees will negotiate with a virtual human doctor and elder to try to move a clinic out of harm�s way in single and multi-party negotiation scenarios using the latest iteration of our Virtual Humans framework. The user will use natural speech to talk to the embodied agents, who will respond in accordance with their internal task model and state. The characters will carry out a multi-party dialogue with verbal and non-verbal behavior. A video of a single-party version of the scenario was shown at AAAI-06. This new interactive demo introduces several new features, including multi-party negotiation, dynamically generated non-verbal behavior and a central ontology.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Robinson, Susan; Roque, Antonio; Vaswani, Ashish; Traum, David; Hernandez, Charles; Millspaugh, Bill
Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training Proceedings Article
In: 10th International Pragmatics Conference, Gotenborg, Sweden, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{robinson_evaluation_2007,
title = {Evaluation of a Spoken Dialogue System for Virtual Reality Call for Fire Training},
author = {Susan Robinson and Antonio Roque and Ashish Vaswani and David Traum and Charles Hernandez and Bill Millspaugh},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20a%20Spoken%20Dialogue%20System%20for%20Virtual%20Reality%20Call%20for%20Fire%20Training.pdf},
year = {2007},
date = {2007-07-01},
booktitle = {10th International Pragmatics Conference},
address = {Gotenborg, Sweden},
abstract = {We present an evaluation of a spoken dialogue system that engages in dialogues with soldiers training in an immersive Call for Fire (CFF) simulation. We briefly describe aspects of the Joint Fires and Effects Trainer System, and the Radiobot-CFF dialogue system, which can engage in voice communications with a trainee in call for fire dialogues. An experiment is described to judge performance of the Radiobot CFF system compared with human radio operators. Results show that while the current version of the system is not quite at humanperformance levels, it is already viable for training interaction and as an operator-controller aid.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Rizzo, Albert; Parsons, Thomas D.; Gratch, Jonathan; Swartout, William
A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine, Washington D.C., 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007,
title = {A Virtual Human Agent for Training Novice Therapist Clinical Interviewing Skills},
author = {Patrick G. Kenny and Albert Rizzo and Thomas D. Parsons and Jonathan Gratch and William Swartout},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Training%20Novice%20Therapist%20Clinical%20Interviewing%20Skills.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine},
address = {Washington D.C.},
abstract = {Virtual Reality (VR) is rapidly evolving into a pragmatically usable technology for mental health (MH) applications. Over the last five years, the technology for creating virtual humans (VHs) has evolved to the point where they are no longer regarded as simple background characters, but rather can serve a functional interactional role. Our current project involves the construction of a natural language-capable virtual client named “Justin,” which derived from a military negotiation train- ing tool into a virtual therapy patient for training novice clinicians the art of clinical interviewing with a resistant client. Justin portrays a 16-year old male with a conduct disorder who is being forced to par- ticipate in therapy by his family. The system uses a sophisticated natural language interface that al- lows novice clinicians to practice asking interview questions in an effort to create a positive therapeu- tic alliance with this very challenging virtual client. Herein we proffer a description of our iterative de- sign process and outline our long term vision.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Thagard, Paul; Ditto, Peter; Gratch, Jonathan; Marsella, Stacy C.; Westen, Drew
Emotional Cognition in the Real World Proceedings Article
In: Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society, Nashville, TN, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{thagard_emotional_2007,
title = {Emotional Cognition in the Real World},
author = {Paul Thagard and Peter Ditto and Jonathan Gratch and Stacy C. Marsella and Drew Westen},
url = {http://ict.usc.edu/pubs/Emotional%20Cognition%20in%20the%20Real%20World.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Twenty-Ninth Annual Meeting of the Cognitive Science Society},
address = {Nashville, TN},
abstract = {There is increasing appreciation in cognitive science of the impact of emotions on many kinds of thinking, from decision making to scientific discovery. This appreciation has developed in all the fields of cognitive science, including, psychology, philosophy, artificial intelligence, and linguistics, and anthropology. The purpose of the proposed symposium is to report and discuss new investigations of the impact of emotion on cognitive processes, in particular ones that are important in real life situations. We will approach the practical importance of emotional cognition from a variety of disciplinary perspectives: social psychology (Ditto), clinical psychology (Westen), computer science (Gratch and Marsella), and philosophy and neuroscience (Thagard). In order to provide integration across these approaches, we will try to address a fundamental set of questions, including: 1. How do emotions interact with basic cognitive processes? 2. What are the positive contributions of emotions to various kinds of thinking in real world situations? 3. How do emotions sometimes bias thinking in real world situations? 4. How can understanding of the psychology and neuroscience of emotional cognition be used to improve the effectiveness of real world thinking?},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Generalizing Semantic Role Annotations Across Syntactically Similar Verbs Proceedings Article
In: Proceedings of the 2007 Meeting of the Association for Computational Linguistics (ACL-07), Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_generalizing_2007,
title = {Generalizing Semantic Role Annotations Across Syntactically Similar Verbs},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Generalizing%20Semantic%20Role%20Annotations%20Across%20Syntactically%20Similar%20Verbs.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the 2007 Meeting of the Association for Computational Linguistics (ACL-07)},
address = {Prague, Czech Republic},
abstract = {Large corpora of parsed sentences with semantic role labels (e.g. PropBank) provide training data for use in the creation of high-performance automatic semantic role labeling systems. Despite the size of these corpora, individual verbs (or role-sets) often have only a handful of instances in these corpora, and only a fraction of English verbs have even a single annotation. In this paper, we describe an approach for dealing with this sparse data problem, enabling accurate semantic role labeling for novel verbs (rolesets) with only a single training example. Our approach involves the identification of syntactically similar verbs found in PropBank, the alignment of arguments in their corresponding rolesets, and the use of their corresponding annotations in PropBank as surrogate training data.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Jan, Dusan; Traum, David
Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations Proceedings Article
In: ACL 2007 Workshop on Embodied Language Processing, Prague, Czech Republic, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{jan_dynamic_2007,
title = {Dynamic Movement and Positioning of Embodied Agents in Multiparty Conversations},
author = {Dusan Jan and David Traum},
url = {http://ict.usc.edu/pubs/Dynamic%20Movement%20and%20Positioning%20of%20Embodied%20Agents%20in%20Multiparty%20%20Conversations.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {ACL 2007 Workshop on Embodied Language Processing},
address = {Prague, Czech Republic},
abstract = {For embodied agents to engage in realistic multiparty conversation, they must stand in appropriate places with respect to other agents and the environment. When these factors change, for example when an agent joins a conversation, the agents must dynamically move to a new location and/or orientation to accommodate. This paper presents an algorithm for simulating the movement of agents based on observed human behavior using techniques developed for pedestrian movement in crowd simulations. We extend a previous group conversation simulation to include an agent motion algorithm. We examine several test cases and show how the simulation generates results that mirror real-life conversation settings.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Paek, Tim; Gandhe, Sudeep; Chickering, David Maxwel; Ju, Yun Cheng
Handling Out-of-Grammar Commands in Mobile Speech Interaction Using Backoff Filler Models Proceedings Article
In: Proceedings of the Workshop on Grammar-Based Approaches to Spoken Language Processing, pp. 33–40, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{paek_handling_2007,
title = {Handling Out-of-Grammar Commands in Mobile Speech Interaction Using Backoff Filler Models},
author = {Tim Paek and Sudeep Gandhe and David Maxwel Chickering and Yun Cheng Ju},
url = {http://ict.usc.edu/pubs/Handling%20Out-of-Grammar%20Commands%20in%20Mobile%20Speech%20Interaction%20Using%20Backoff%20Filler%20Models.pdf},
year = {2007},
date = {2007-06-01},
booktitle = {Proceedings of the Workshop on Grammar-Based Approaches to Spoken Language Processing},
pages = {33–40},
abstract = {In command and control (C&C) speech interaction, users interact by speaking commands or asking questions typically specified in a context-free grammar (CFG). Unfortunately, users often produce out-ofgrammar (OOG) commands, which can result in misunderstanding or nonunderstanding. We explore a simple approach to handling OOG commands that involves generating a backoff grammar from any CFG using filler models, and utilizing that grammar for recognition whenever the CFG fails. Working within the memory footprint requirements of a mobile C&C product, applying the approach yielded a 35% relative reduction in semantic error rate for OOG commands. It also improved partial recognitions for enabling clarification dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Crooks, Valerie C.; Parsons, Thomas D.; Buckwalter, John Galen
Validation of the Cognitive Assessment of Later Life Status (CALLS) instrument: a computerized telephonic measure Journal Article
In: BMC Neurology, vol. 7, no. 10, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{crooks_validation_2007,
title = {Validation of the Cognitive Assessment of Later Life Status (CALLS) instrument: a computerized telephonic measure},
author = {Valerie C. Crooks and Thomas D. Parsons and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Validation%20of%20the%20Cognitive%20Assessment%20of%20Later%20Life%20Status%20(CALLS)%20instrument-%20a%20computerized%20telephonic%20measure.pdf},
doi = {10.1186/1471-2377-7-10},
year = {2007},
date = {2007-05-01},
journal = {BMC Neurology},
volume = {7},
number = {10},
abstract = {Background: Brief screening tests have been developed to measure cognitive performance and dementia, yet they measure limited cognitive domains and often lack construct validity. Neuropsychological assessments, while comprehensive, are too costly and time-consuming for epidemiological studies. This study's aim was to develop a psychometrically valid telephone administered test of cognitive function in aging. Methods: Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 30 minute Cognitive Assessment of Later Life Status (CALLS) measure and a 2.5 hour in-person neuropsychological assessment were conducted with a randomly selected sample of 211 participants 65 years and older that included equivalent distributions of men and women from ethnically diverse populations. Results: Overall Cronbach's coefficient alpha for the CALLS test was 0.81. A principal component analysis of the CALLS tests yielded five components. The CALLS total score was significantly correlated with four neuropsychological assessment components. Older age and having a high school education or less was significantly correlated with lower CALLS total scores. Females scored better overall than males. There were no score differences based on race. Conclusion: The CALLS test is a valid measure that provides a unique opportunity to reliably and efficiently study cognitive function in large populations.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Nakasujja, Noeline; Wong, Matthew; Musisi, Seggane; Katabira, Elly; Parsons, Thomas D.; Ronald, Allan; Sacktor, Ned
Pattern of neuropsychological performance among HIV positive patients in Uganda Journal Article
In: BMC Neurology, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_pattern_2007,
title = {Pattern of neuropsychological performance among HIV positive patients in Uganda},
author = {R. Kevin Robertson and Noeline Nakasujja and Matthew Wong and Seggane Musisi and Elly Katabira and Thomas D. Parsons and Allan Ronald and Ned Sacktor},
url = {http://ict.usc.edu/pubs/Pattern%20of%20neuropsychological%20performance%20among%20HIV%20positive%20patients%20in%20Uganda.pdf},
year = {2007},
date = {2007-04-01},
journal = {BMC Neurology},
abstract = {Few studies have examined cognitive functioning of HIV positive patients in sub-Saharan Africa. It cannot be assumed that HIV positive patients in Africa exhibit the same declines as patients in high-resource settings, since there are differences that may influence cognitive functioning including nutrition, history of concomitant disease, and varying HIV strains, among other possibilities. Part of the difficulty of specifying abnormalities in neuropsychological functioning among African HIV positive patients is that there are no readily available African normative databases. The purpose of the current study was to evaluate the pattern of neuropsychological performance in a sample of HIV positive patients in comparison to HIV negative control subjects in Uganda. Methods: The neuropsychological test scores of 110 HIV positive patients (WHO Stage 2},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Marsella, Stacy C.
The Architectural Role of Emotion in Cognitive Systems Book Section
In: Integrated Models of Cognitive Systems, Oxford University Press, New York, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_architectural_2007,
title = {The Architectural Role of Emotion in Cognitive Systems},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/The%20Architectural%20Role%20of%20Emotion%20in%20Cognitive%20Systems.pdf},
year = {2007},
date = {2007-03-01},
booktitle = {Integrated Models of Cognitive Systems},
publisher = {Oxford University Press},
address = {New York},
abstract = {In this chapter, we will revive an old argument that theories of human emotion can give insight into the design and control of complex cognitive systems. In particular, we claim that appraisal theories of emotion provide essential insight into the influences of emotion over cognition and can help translate such findings into concrete guidance for the design of cognitive systems. Ap- praisal theory claims that emotion plays a central and functional role in sensing external events, characterizing them as opportunity or threats and recruiting the cognitive, physical and social resources needed to adaptively respond. Further, because it argues for a close association be- tween emotion and cognition, the theoretical claims of appraisal theory can be recast as a re- quirement specification for how to build a cognitive system. This specification asserts a set of judgments that must be supported in order to correctly interpret and respond to stimuli and pro- vides a unifying framework for integrating these judgments into a coherent physical or social re- sponse. This chapter elaborates argument in some detail based on our joint experience in build- ing complex cognitive systems and computational models of emotion.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms Proceedings Article
In: Proceedings of the 1st International Conference on Computational Creativity (ICCC-X), pp. 248–257, 2007, ISBN: 978-989-96001-2-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_evolving_2007,
title = {Evolving Expression of Emotions through Color in Virtual Humans using Genetic Algorithms},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20through%20Color%20in%20Virtual%20Humans%20using%20Genetic%20Algorithms.pdf},
isbn = {978-989-96001-2-6},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 1st International Conference on Computational Creativity (ICCC-X)},
pages = {248–257},
abstract = {For centuries artists have been exploring the formal elements of art (lines, space, mass, light, color, sound, etc.) to express emotions. This paper takes this insight to explore new forms of expression for virtual humans which go beyond the usual bodily, facial and vocal expression channels. In particular, the paper focuses on how to use color to influence the perception of emotions in virtual humans. First, a lighting model and filters are used to manipulate color. Next, an evolutionary model, based on genetic algorithms, is developed to learn novel associations between emotions and color. An experiment is then conducted where non-experts evolve mappings for joy and sadness, without being aware that genetic algorithms are used. In a second experiment, the mappings are analyzed with respect to its features and how general they are. Results indicate that the average fitness increases with each new generation, thus suggesting that people are succeeding in creating novel and useful mappings for the emotions. Moreover, the results show consistent differences between the evolved images of joy and the evolved images of sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zbylut, Michelle L.; Metcalf, Kimberly A.; Kim, Julia; Hill, Randall W.; Rocher, Scott
Army Excellence in Leadership (AXL): A Multimedia Approach to Building Tacit Knowledge and Cultural Reasoning Technical Report
no. Technical Report 1194, 2007.
Abstract | Links | BibTeX | Tags:
@techreport{zbylut_army_2007,
title = {Army Excellence in Leadership (AXL): A Multimedia Approach to Building Tacit Knowledge and Cultural Reasoning},
author = {Michelle L. Zbylut and Kimberly A. Metcalf and Julia Kim and Randall W. Hill and Scott Rocher},
url = {http://ict.usc.edu/pubs/Army%20Excellence%20in%20Leadership%20(AXL)-%20A%20Multimedia%20Approach%20to%20Building%20Tacit%20Knowledge%20and%20Cultural%20Reasoning.pdf},
year = {2007},
date = {2007-01-01},
number = {Technical Report 1194},
abstract = {This report presents findings from a preliminary examination of the Army Excellence in Leadership (AXL) system, a leader intervention that targets the development of tacit leadership knowledge and cultural awareness in junior Army officers. Fifty-five junior officers interacted with a pilot version of a cultural awareness module from the AXL system. Results indicated that the AXL approach resulted in improvements in leader judgment on a forced-choice measure. Furthermore, results indicated that cultural issues were more salient to leaders after completion of the cultural awareness module. Reactions to training were generally positive, with officers indicating that the cultural awareness module was useful and stimulated thought. Additionally, this investigation explored the relationship between affect and learning and found that emotional responses to the AXL system were related to learning-relevant variables, such as judgment scores and officer reports that they could apply the training to their activities as a leader.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Buckwalter, John Galen; Geiger, A. M.; Parsons, Thomas D.; Handler, J.; Howes, J.; Lehmer, R. R.
Cognitive Effects of Short-term Use of Raloxifene: A Randomized Clinical Trial Journal Article
In: International Journal of Neuroscience, vol. 117, pp. 1579–1590, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{buckwalter_cognitive_2007,
title = {Cognitive Effects of Short-term Use of Raloxifene: A Randomized Clinical Trial},
author = {John Galen Buckwalter and A. M. Geiger and Thomas D. Parsons and J. Handler and J. Howes and R. R. Lehmer},
url = {http://ict.usc.edu/pubs/Cognitive%20Effects%20of%20Short-term%20Use%20of%20Raloxifene-%20A%20Randomized%20Clinical%20Trial.pdf},
year = {2007},
date = {2007-01-01},
journal = {International Journal of Neuroscience},
volume = {117},
pages = {1579–1590},
abstract = {Two questions regarding findings from the Women's Health Initiative are (1) What is the effect of various hormonal regimens including selective estrogen receptor modulators? and (2) Is the negative effect on cognitive functioning related to the older age (65+years) if the women? This study addresses these two questions in a short-term randomized trial of the effects of raloxifene versus alendronate on cognition. The study found only one significant interaction where the raloxifene and alendronate group changed differently across the two testing occasions. Hence, raloxifene does not have any impact, positive or negative, on short-term cognitive functioning when compared to alendronate.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Graap, Ken; McLay, Robert N.; Perlman, Karen; Rothbaum, Barbara O.; Reger, Greg; Parsons, Thomas D.; Difede, JoAnn; Pair, Jarrell
Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder Journal Article
In: Virtual Rehabilitation, vol. 27, pp. 124–130, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_virtual_2007,
title = {Virtual Iraq: Initial Case Reports from a VR Exposure Therapy Application for Combat-Related Post Traumatic Stress Disorder},
author = {Albert Rizzo and Ken Graap and Robert N. McLay and Karen Perlman and Barbara O. Rothbaum and Greg Reger and Thomas D. Parsons and JoAnn Difede and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Virtual%20Iraq-%20Initial%20Case%20Reports%20from%20a%20VR%20Exposure%20Therapy%20Application%20for%20Combat-Related%20Post%20Traumatic%20Stress%20Disorder.pdf},
year = {2007},
date = {2007-01-01},
journal = {Virtual Rehabilitation},
volume = {27},
pages = {124–130},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale and brief description of a Virtual Iraq PTSD VR therapy application and present initial findings from two successfully treated patients. The VR treatment environment was created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far, Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center and the results from two successfully treated patients are presented along with a delineation of our future plans for research and clinical care using this application.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert; Bamattre, Jacob; Brennan, John
Virtual Reality Cognitive Performance Assessment Test Journal Article
In: Annual Review of CyberTherapy and Telemedicine, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_virtual_2007,
title = {Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Albert Rizzo and Jacob Bamattre and John Brennan},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2007},
date = {2007-01-01},
journal = {Annual Review of CyberTherapy and Telemedicine},
abstract = {Virtual Reality Cognitive Performance Assessment Test (VRCPAT) is a virtual environment based measure of learning and memory. We examined convergent and discriminant validity and hypothesized that the VRCPAT’s Total Learning and Memory scores would correlate with other neuropsychological measures involving learning and memory, but not with measures involving potential confounds (i.e., Executive Functions; Attention; and Processing Speed). Using a sequential hierarchical strategy, each stage of test development did not proceed until specified criteria were met. The 15 minute VRCPAT battery and a 1.5 hour in-person neuropsychological assessment were conducted with a randomly selected sample of 20 healthy adults that included equivalent distributions of men and women from ethnically diverse populations. Results supported both convergent and discriminant validity. That is, findings suggest that the VRCPAT measures a capacity that is 1) consistent with that assessed by traditional paper and pencil measures involving learning and memory; and 2) inconsistent with that assessed by traditional paper and pencil measures assessing neurocognitive domains traditionally assumed to be other than learning and memory. We conclude that the VRCPAT is a valid test that provides a unique opportunity to reliably and efficiently study memory function within an ecologically valid environment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Wang, Ning; Gerten, Jillian; Fast, Edward; Duffy, Robin
Creating Rapport with Virtual Agents Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 125–128, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_creating_2007,
title = {Creating Rapport with Virtual Agents},
author = {Jonathan Gratch and Ning Wang and Jillian Gerten and Edward Fast and Robin Duffy},
url = {http://ict.usc.edu/pubs/Creating%20Rapport%20with%20Virtual%20Agents.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {125–128},
address = {Paris, France},
abstract = {Recent research has established the potential for virtual characters to establish rapport with humans through simple contingent nonverbal behaviors. We hypothesized that the contingency, not just the frequency of positive feedback is crucial when it comes to creating rapport. The primary goal in this study was evaluative: can an agent generate behavior that engenders feelings of rapport in human speakers and how does this compare to human generated feedback? A secondary goal was to answer the question: Is contingency (as opposed to frequency) of agent feedback crucial when it comes to creating feelings of rapport? Results suggest that contingency matters when it comes to creating rapport and that agent generated behavior was as good as human listeners in creating rapport. A "virtual human listener" condition performed worse than other conditions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Yeh, Shih-Ching; Rizzo, Albert; McLaughlin, Margaret; Parsons, Thomas D.
In: Studies in Health Technology and Informatics, vol. 125, pp. 506–511, 2007.
@article{yeh_vr_2007,
title = {VR Enhanced Upper Extremity Motor Training for Post-Stroke Rehabilitation: Task Design, Clinical Experiment and Visualization on Performance and Progress},
author = {Shih-Ching Yeh and Albert Rizzo and Margaret McLaughlin and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/VR%20Enhanced%20Upper%20Extremity%20Motor%20Training%20for%20Post-Stroke%20Rehabilitation-%20Task%20Design,%20Clinical%20Experiment%20and%20Visualization%20on%20Performance%20and%20Progress.pdf},
year = {2007},
date = {2007-01-01},
journal = {Studies in Health Technology and Informatics},
volume = {125},
pages = {506–511},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Bowerly, Todd; Buckwalter, John Galen; Rizzo, Albert
In: Child Neuropsychology, vol. 13, pp. 363–381, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_controlled_2007,
title = {A controlled clinical comparison of attention performance in children with ADHD in a virtual reality classroom compared to standard neuropsychological methods},
author = {Thomas D. Parsons and Todd Bowerly and John Galen Buckwalter and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20CONTROLLED%20CLINICAL%20COMPARISON%20OF%20ATTENTION%20PERFORMANCE%20IN%20CHILDREN%20WITH%20ADHD%20IN%20A%20VIRTUAL%20REALITY%20CLASSROOM%20COMPARED%20TO%20STANDARD%20NEUROPSYCHOLOGICAL%20METHODS.pdf},
doi = {10.1080/13825580600943473},
year = {2007},
date = {2007-01-01},
journal = {Child Neuropsychology},
volume = {13},
pages = {363–381},
abstract = {In this initial pilot study, a controlled clinical comparison was made of attention performance in children with attention deficit-hyperactivity disorder (ADHD) in a virtual reality (VR) classroom. Ten boys diagnosed with ADHD and ten normal control boys participated in the study. Groups did not significantly differ in mean age, grade level, ethnicity, or handedness. No participants reported simulator sickness following VR exposure. Children with ADHD exhibited more omission errors, commission errors, and overall body movement than normal control children in the VR classroom. Children with ADHD were more impacted by distraction in the VR classroom. VR classroom measures were correlated with traditional ADHD assessment tools and the flatscreen CPT. Of note, the small sample size incorporated in each group and higher WISC-III scores of normal controls might have some bearing on the overall interpretation of results. These data suggested that the Virtual Classroom had good potential for controlled performance assessment within an ecologically valid environment and appeared to parse out significant effects due to the presence of distraction stimuli.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad
Metacognition and the Development of Intercultural Competence Proceedings Article
In: Proceedings of the Workshop on Metacognition and Self-Regulated Learning in Intelligent Tutoring Systems at the 13th International Conference on Artificial Intelligence in Education (AIED), pp. 23–32, Marina del Rey, CA, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_metacognition_2007,
title = {Metacognition and the Development of Intercultural Competence},
author = {H. Chad Lane},
url = {http://ict.usc.edu/pubs/Metacognition%20and%20the%20Development%20of%20Intercultural%20Competence.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the Workshop on Metacognition and Self-Regulated Learning in Intelligent Tutoring Systems at the 13th International Conference on Artificial Intelligence in Education (AIED)},
pages = {23–32},
address = {Marina del Rey, CA},
abstract = {We argue that metacognition is a critical component in the development of intercultural competence by highlighting the importance of supporting a learner's self-assessment, self-monitoring, predictive, planning and reflection skills. We also survey several modern immersive cultural learning environments and discuss the role intelligent tutoring and experience management techniques can play to support these metacognitive demands. Techniques for adapting the behaviors of virtual humans to promote cultural learning are discussed, as well as explicit approaches to feedback. We conclude with several suggestions for future research, including the use of existing intercultural development metrics for evaluating learning in immersive environments and to conduct more studies of the use of implicit and explicit feedback to guide learning and establish optimal conditions for acquiring intercultural competence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Martinovski, Bilyana; Traum, David; Marsella, Stacy C.
Rejection of empathy in negotiation Journal Article
In: Group Decision and Negotiation, vol. 16, pp. 61–76, 2007, ISSN: 0926-2644.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{martinovski_rejection_2007,
title = {Rejection of empathy in negotiation},
author = {Bilyana Martinovski and David Traum and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Rejection%20of%20empathy%20in%20negotiation.pdf},
issn = {0926-2644},
year = {2007},
date = {2007-01-01},
journal = {Group Decision and Negotiation},
volume = {16},
pages = {61–76},
abstract = {Trust is a crucial quality in the development of individuals and societies and empathy plays a key role in the formation of trust. Trust and empathy have growing importance in studies of negotiation. However, empathy can be rejected which complicates its role in negotiation. This paper presents a linguistic analysis of empathy by focusing on rejection of empathy in negotiation. Some of the rejections are due to failed recognition of the rejector's needs and desires whereas others have mainly strategic functions gaining momentum in the negotiation. In both cases, rejection of empathy is a phase in the negotiation not a breakdown.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Miller, Karen J.; Parsons, Thomas D.; Whybrow, Peter C.; Herle, Katja; Rasgon, Natalie; Herle, Andre; Martinez, Dorothy; Silverman, Dan H.; Bauer, Michael
Verbal Memory Retrieval Deficits Associated With Untreated Hypothyroidism Journal Article
In: Journal of Neuropsychiatry and Clinical Neurosciences, vol. 19, no. 2, pp. 132–136, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{miller_verbal_2007,
title = {Verbal Memory Retrieval Deficits Associated With Untreated Hypothyroidism},
author = {Karen J. Miller and Thomas D. Parsons and Peter C. Whybrow and Katja Herle and Natalie Rasgon and Andre Herle and Dorothy Martinez and Dan H. Silverman and Michael Bauer},
url = {http://ict.usc.edu/pubs/Verbal%20Memory%20Retrieval%20Deficits%20Associated%20With%20Untreated%20Hypothyroidism.pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Neuropsychiatry and Clinical Neurosciences},
volume = {19},
number = {2},
pages = {132–136},
abstract = {The effects of inadequate thyroid hormone availability to the brain on adult cognitive function are poorly understood. This study assessed the effects of hypothyroidism on cognitive function using a standard neuropsychological battery in 14 patients suffering from untreated hypothyroidism and complaining of subjective cognitive difï¬culties in comparison with 10 age-matched healthy comparison subjects. Signiï¬cant differences between groups were limited to verbal memory retrieval as measured by the California Verbal Learning Test (CVLT). On short delay free recall, long delay free recall, and long delay cued recall, signiï¬cant differences remained between groups despite the limited statistical power of this study. There were no signiï¬cant results found between groups on attentional or nonverbal tasks. Results suggest that hypothyroid-related memory deï¬cits are not attributable to an attentional deï¬cit but rather to speciï¬c retrieval deï¬cits.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Elson, David K.; Riedl, Mark O.
A Lightweight Intelligent Virtual Cinematography System for Machinima Production Proceedings Article
In: Proceedings of the 3rd Annual Conference on Artificial Intelligence and Interactive Digital Entertainment AIIDE 07, Defense Technical Information Center, Palo Alto, CA, 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{elson_lightweight_2007,
title = {A Lightweight Intelligent Virtual Cinematography System for Machinima Production},
author = {David K. Elson and Mark O. Riedl},
url = {http://ict.usc.edu/pubs/A%20Lightweight%20Intelligent%20Virtual%20Cinematography%20System%20for%20Machinima%20Production.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the 3rd Annual Conference on Artificial Intelligence and Interactive Digital Entertainment AIIDE 07},
publisher = {Defense Technical Information Center},
address = {Palo Alto, CA},
abstract = {Machinima is a low-cost alternative to full production filmmaking. However, creating quality cinematic visualizations with existing machinima techniques still requires a high degree of talent and effort. We introduce a lightweight artificial intelligence system, Cambot, that canbe used to assist in machinima production. Cambot takes a script as input and produces a cinematic visualization. Unlike other virtual cinematography systems, Cambot favors an offline algorithm coupled with an extensible library of specific modular and reusable facets of cinematicknowledge. One of the advantages of this approach tovirtual cinematography is a tight coordination between the positions and movements of the camera and the actors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Hawkins, Tim; Peers, Pieter; Chabert, Charles-Felix; Weiss, Malte; Debevec, Paul
Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination Proceedings Article
In: Kautz, Jan; Pattanaik, (Ed.): Eurographics Symposium on Rendering, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_rapid_2007,
title = {Rapid Acquisition of Specular and Diffuse Normal Maps from Polarized Spherical Gradient Illumination},
author = {Wan-Chun Ma and Tim Hawkins and Pieter Peers and Charles-Felix Chabert and Malte Weiss and Paul Debevec},
editor = {Jan Kautz and Pattanaik},
url = {http://ict.usc.edu/pubs/Rapid%20Acquisition%20of%20Specular%20and%20Diffuse%20Normal%20Maps%20from%20Polarized%20Spherical%20Gradient%20Illumination.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Eurographics Symposium on Rendering},
abstract = {We estimate surface normal maps of an object from either its diffuse or specular reflectance using four spherical gradient illumination patterns. In contrast to traditional photometric stereo, the spherical patterns allow normals to be estimated simultaneously from any number of viewpoints. We present two polarized lighting techniques that allow the diffuse and specular normal maps of an object to be measured independently. For scattering materials, we show that the specular normal maps yield the best record of detailed surface shape while the diffuse normals deviate from the true surface normal due to subsurface scattering, and that this effect is dependent on wavelength. We show several applications of this acquisition technique. First, we capture normal maps of a facial performance simultaneously from several viewing positions using time-multiplexed illumination. Second, we show that highresolution normal maps based on the specular component can be used with structured light 3D scanning to quickly acquire high-resolution facial surface geometry using off-the-shelf digital still cameras. Finally, we present a realtime shading model that uses independently estimated normal maps for the specular and diffuse color channels to reproduce some of the perceptually important effects of subsurface scattering.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark; Gomboc, Dave; Karnavat, Ashish; Rosenberg, Milton
Intelligent Tutoring for Interpersonal and Intercultural Skills Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), 2007.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_intelligent_2007,
title = {Intelligent Tutoring for Interpersonal and Intercultural Skills},
author = {H. Chad Lane and Mark Core and Dave Gomboc and Ashish Karnavat and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Intelligent%20Tutoring%20for%20Interpersonal%20and%20Intercultural%20Skills.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
abstract = {We describe some key issues involved in building an intelligent tutoring system for the ill-defined domain of interpersonal and intercultural skill acquisition. We discuss the consideration of mixed-result actions (actions with pros and cons), categories of actions (e.g. required steps vs. rules of thumb), the role of narrative, and reflective tutoring, among other topics. We present these ideas in the context of our work on an intelligent tutor for ELECT BiLAT, a game-based system to teach cultural awareness and negotiation skills for bilateral engagements. The tutor provides guidance in two forms: (1) as a coach that gives hints and feedback during an engagement with a virtual character, and (2) during an after-action review to help the learner reflect on their choices. Learner activities are mapped to learning objectives, which include whether the actions represent positive or negative evidence of learning. These underlie an expert model, student model, and models of coaching and reflective tutoring that support the learner. We describe several other cultural and interpersonal training systems that situate learners in goal based social contexts that include interaction with virtual characters and automated guidance. Finally, our future work includes evaluations of learning, expansion of the coach and reflective tutoring strategies, and integration of deeper knowledge-based resources that capture more nuanced cultural aspects of interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Jina; Marsella, Stacy C.; Traum, David; Gratch, Jonathan; Lance, Brent
The Rickel Gaze Model: A Window on the Mind of a Virtual Human Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 296–303, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{lee_rickel_2007,
title = {The Rickel Gaze Model: A Window on the Mind of a Virtual Human},
author = {Jina Lee and Stacy C. Marsella and David Traum and Jonathan Gratch and Brent Lance},
url = {http://ict.usc.edu/pubs/The%20Rickel%20Gaze%20Model-%20A%20Window%20on%20the%20Mind%20of%20a%20Virtual%20Human.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {296–303},
address = {Paris, France},
abstract = {Gaze plays a large number of cognitive, communicative and affective roles in face-to-face human interaction. To build a believable virtual human, it is imperative to construct a gaze model that generates realistic gaze behaviors. However, it is not enough to merely imitate a person's eye movements. The gaze behaviors should reflect the internal states of the virtual human and users should be able to derive them by observing the behaviors. In this paper, we present a gaze model driven by the cognitive operations; the model processes the virtual human's reasoning, dialog management, and goals to generate behaviors that reflect the agent's inner thoughts. It has been implemented in our virtual human system and operates in real-time. The gaze model introduced in this paper was originally designed and developed by Jeff Rickel but has since been extended by the authors.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Leuski, Anton; Rizzo, Albert
Virtual Patients for Clinical Therapist Skills Training Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA), pp. 197–210, Paris, France, 2007.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_virtual_2007-1,
title = {Virtual Patients for Clinical Therapist Skills Training},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Anton Leuski and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Patients%20for%20Clinical%20Therapist%20Skills%20Training.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 7th International Conference on Intelligent Virtual Agents (IVA)},
volume = {4722},
pages = {197–210},
address = {Paris, France},
abstract = {Virtual humans offer an exciting and powerful potential for rich interactive experiences. Fully embodied virtual humans are growing in capability, ease, and utility. As a result, they present an opportunity for expanding research into burgeoning virtual patient medical applications. In this paper we consider the ways in which one may go about building and applying virtual human technology to the virtual patient domain. Specifically we aim to show that virtual human technology may be used to help develop the interviewing and diagnostics skills of developing clinicians. Herein we proffer a description of our iterative design process and preliminary results to show that virtual patients may be a useful adjunct to psychotherapy education.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Rogers, Steven A.; Hall, Colin D.; Robertson, R. Kevin
Motor Based Assessment of Neurocognitive Functioning in Resource-Limited International Settings. Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 29, pp. 59–66, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_motor_2007,
title = {Motor Based Assessment of Neurocognitive Functioning in Resource-Limited International Settings.},
author = {Thomas D. Parsons and Steven A. Rogers and Colin D. Hall and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Motor%20based%20assessment%20of%20neurocognitive%20functioning%20in%20resource-limited%20Iinternational%20settings.pdf},
year = {2007},
date = {2007-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {29},
pages = {59–66},
abstract = {This study compared variance accounted for by neuropsychological tests in both a brief motor battery and in a comprehensive neuropsychological battery. 327 HIV + subjects received a comprehensive cognitive battery and a shorter battery (Timed Gait, Grooved Pegboard, and Fingertapping). A significant correlation existed between the motor component tests and the more comprehensive battery (52% of variance). Adding Digit symbol and Trailmaking increased the amount of variance accounted for (73%). Motor battery sensitivity to impairment diagnosis was 0.79 and specificity was 0.76. A motor battery may have broader utility to diagnose and monitor HIV related neurocognitive disorders in international settings.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Wang, Ning; Okhmatovskaia, Anna; Lamothe, Francois; Morales, Mathieu; Werf, R. J.; Morency, Louis-Philippe
Can virtual humans be more engaging than real ones? Proceedings Article
In: Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments, pp. 286–297, Beijing, China, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_can_2007,
title = {Can virtual humans be more engaging than real ones?},
author = {Jonathan Gratch and Ning Wang and Anna Okhmatovskaia and Francois Lamothe and Mathieu Morales and R. J. Werf and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Can%20virtual%20humans%20be%20more%20engaging%20than%20real%20ones.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {Proceedings of the International Conference on Human-Computer Interaction, HCI Intelligent Multimodal Interaction Environments},
pages = {286–297},
address = {Beijing, China},
abstract = {Emotional bonds don't arise from a simple exchange of facial displays, but often emerge through the dynamic give and take of face-to-face interactions. This article explores the phenomenon of rapport, a feeling of connectedness that seems to arise from rapid and contingent positive feedback between partners and is often associated with socio-emotional processes. Rapport has been argued to lead to communicative efficiency, better learning outcomes, improved acceptance of medical advice and successful negotiations. We provide experimental evidence that a simple virtual character that provides positive listening feedback can induce stronger rapport-like effects than face-to-face communication between human partners. Specifically, this interaction can be more engaging to storytellers than speaking to a human audience, as measured by the length and content of their stories.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Iudicello, Jennifer E.; Woods, Steven Paul; Parsons, Thomas D.; Moran, Lisa M.; Carey, Catherine L.; Grant, Igor
Verbal fluency in HIV infection: A meta-analytic review Journal Article
In: Journal of the International Neuropsychological Society, vol. 13, pp. 183–189, 2007.
Abstract | Links | BibTeX | Tags: MedVR
@article{iudicello_verbal_2007,
title = {Verbal fluency in HIV infection: A meta-analytic review},
author = {Jennifer E. Iudicello and Steven Paul Woods and Thomas D. Parsons and Lisa M. Moran and Catherine L. Carey and Igor Grant},
url = {http://ict.usc.edu/pubs/Verbal%20fluency%20in%20HIV%20infection-%20A%20meta-analytic%20review.pdf},
doi = {10.10170S1355617707070221},
year = {2007},
date = {2007-01-01},
journal = {Journal of the International Neuropsychological Society},
volume = {13},
pages = {183–189},
abstract = {Given the largely prefrontostriatal neuropathogenesis of HIV-associated neurobehavioral deficits, it is often presumed that HIV infection leads to greater impairment on letter versus category fluency. A meta-analysis of the HIV verbal fluency literature was conducted (k 5 37, n 5 7110) to assess this hypothesis and revealed generally small effect sizes for both letter and category fluency, which increased in magnitude with advancing HIV disease severity. Across all studies, the mean effect size of category fluency was slightly larger than that of letter fluency. However, the discrepancy between category and letter fluency dissipated in a more conservative analysis of only those studies that included both tests. Thus, HIV-associated impairments in letter and category fluency are of similar magnitude, suggesting that mild word generation deficits are evident in HIV, regardless of whether traditional letter or semantic cues are used to guide the word search and retrieval process.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Smurzynski, Marlene; Parsons, Thomas D.; Wu, Kunling; Bosch, Ronald J.; Wu, Julia; McArthur, Justin C.; Collier, Ann C.; Evans, Scott R.; Ellis, Ron J.
The Prevalence and Incidence of Neurocognitive Impairment in the HAART Era Journal Article
In: AIDS, vol. 21, pp. 1915–1921, 2007, ISSN: 0269-9370.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_prevalence_2007,
title = {The Prevalence and Incidence of Neurocognitive Impairment in the HAART Era},
author = {R. Kevin Robertson and Marlene Smurzynski and Thomas D. Parsons and Kunling Wu and Ronald J. Bosch and Julia Wu and Justin C. McArthur and Ann C. Collier and Scott R. Evans and Ron J. Ellis},
url = {http://ict.usc.edu/pubs/The%20prevalence%20and%20incidence%20of%20neurocognitive%20impairment%20in%20the%20HAART%20era.pdf},
issn = {0269-9370},
year = {2007},
date = {2007-01-01},
journal = {AIDS},
volume = {21},
pages = {1915–1921},
abstract = {Objectives: HAART suppresses HIV viral replication and restores immune function. The effects of HAART on neurological disease are less well understood. The aim of this study was to assess the prevalence and incidence of neurocognitive impairment in individuals who initiated HAART as part of an AIDS clinical trial. Design: A prospective cohort study of HIV-positive patients enrolled in randomized antiretroviral trials, the AIDS Clinical Trials Group (ACTG) Longitudinal Linked Randomized Trials (ALLRT) study. Methods: We examined the association between baseline and demographic characteristics and neurocognitive impairment among 1160 subjects enrolled in the ALLRT study. Results: A history of immunosuppression (nadir CD4 cell count textbackslashtextbackslashtextbackslashtextbackslashtextless 200 cells/ml) was associated with an increase in prevalent neurocognitive impairment. There were no signiï¬cant virological and immunological predictors of incident neurocognitive impairment. Current immune status (low CD4 cell count) was associated with sustained prevalent impairment. Conclusion: The association of previous advanced immunosuppression with prevalent and sustained impairment suggests that there is a non-reversible component of neural injury that tracks with a history of disease progression. The association of sustained impairment with worse current immune status (low CD4 cell count) suggests that restoring immunocompetence increases the likelihood of neurocognitive recovery. Finally, the lack of association between incident neurocognitive impairment and virological and immunological indicators implies that neural injury continues in some patients regardless of the success of antiretroviral therapy on these laboratory measures.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lamond, Bruce; Peers, Pieter; Debevec, Paul
Fast Image-based Separation of Diffuse and Specular Reflections Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2007, 2007.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{lamond_fast_2007,
title = {Fast Image-based Separation of Diffuse and Specular Reflections},
author = {Bruce Lamond and Pieter Peers and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-02-2007.pdf},
year = {2007},
date = {2007-01-01},
number = {ICT TR 02 2007},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel image-based method for separating diffuse and specular reflections of real objects under distant environmental illumination. By illuminating a scene with only four high frequency illumination patterns, the specular and diffuse reflections can be separated by computing the maximum and minimum observed pixel values. Furthermore, we show that our method can be extended to separate diffuse and specular components under image-based environmental illumination. Applications range from image-based modeling of reflectance properties to improved normal and geometry acquisition.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Gandhe, Sudeep; Traum, David
First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus Proceedings Article
In: 5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems, Hyderabad, India, 2007.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_first_2007,
title = {First Steps Towards Dialogue Modelling from an Un-annotated Human-Human Corpus},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/First%20Steps%20towards%20Dialogue%20Modelling%20from%20an%20Un-annotated%20Human-Human%20Corpus.pdf},
year = {2007},
date = {2007-01-01},
booktitle = {5th Workshop on Knowledge and Reasoning in Practical Dialogue Systems},
address = {Hyderabad, India},
abstract = {Virtual human characters equipped with natural language dialogue capability have proved useful in many fields like simulation training and interactive games. Generally behind such dialogue managers lies a complex knowledge-rich rule-based system. Building such system involves meticulous annotation of data and hand autoring of rules. In this paper we build a statistical dialogue model from roleplay and wizard of oz dialog corpus with virtually no annotation. We compare these methods with the tra ditional approaches. We have evaluated these systems for perceived appropriateness of response and the results are presented here.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}