Publications
Search
Georgila, Kallirroi; Pynadath, David V.
Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events Proceedings Article
In: Proceedings of The 29th International FLAIRS Conference, pp. 44–49, AAAI Press, Key Largo, FL, 2016.
@inproceedings{georgila_towards_2016,
title = {Towards a Computational Model of Human Opinion Dynamics in Response to Real-World Events},
author = {Kallirroi Georgila and David V. Pynadath},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12960/12539},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of The 29th International FLAIRS Conference},
pages = {44–49},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Accurate multiagent social simulation requires a computational model of how people incorporate their observations of real-world events into their beliefs about the state of their world. Current methods for creating such agent-based models typically rely on manual input that can be both burdensome and subjective. In this investigation, we instead pursue automated methods that can translate available data into the desired computational models. For this purpose, we use a corpus of real-world events in combination with longitudinal public opinion polls on a variety of opinion issues. We perform two experiments using automated methods taken from the literature. In our first experiment, we train maximum entropy classifiers to model changes in opinion scores as a function of real-world events. We measure and analyze the accuracy of our learned classifiers by comparing the opinion scores they generate against the opinion scores occurring in a held-out subset of our corpus. In our second experiment, we learn Bayesian networks to capture the same function.We then compare the dependency structures induced by the two methods to identify the event features that have the most significant effect on changes in public opinion.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa
Writing Stories with Help from Recurrent Neural Networks Proceedings Article
In: AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence, pp. 4311 – 4312, AAAI Press, Phoenix, AZ, 2016.
@inproceedings{roemmele_writing_2016,
title = {Writing Stories with Help from Recurrent Neural Networks},
author = {Melissa Roemmele},
url = {http://www.aaai.org/ocs/index.php/AAAI/AAAI16/paper/view/11966},
year = {2016},
date = {2016-02-01},
booktitle = {AAAI Conference on Artificial Intelligence; Thirtieth AAAI Conference on Artificial Intelligence},
pages = {4311 – 4312},
publisher = {AAAI Press},
address = {Phoenix, AZ},
abstract = {This thesis explores the use of a recurrent neural network model for a novel story generation task. In this task, the model analyzes an ongoing story and generates a sentence that continues the story.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Interpretation of Triangle Behavior Proceedings Article
In: Thirtieth AAAI Conference on Artificial Intelligence, AAAI Press, Phoenix, AZ, 2016.
@inproceedings{gordon_commonsense_2016,
title = {Commonsense Interpretation of Triangle Behavior},
author = {Andrew S. Gordon},
url = {https://www.aaai.org/ocs/index.php/AAAI/AAAI16/rt/metadata/11790/12152},
year = {2016},
date = {2016-02-01},
booktitle = {Thirtieth AAAI Conference on Artificial Intelligence},
publisher = {AAAI Press},
address = {Phoenix, AZ},
abstract = {The ability to infer intentions, emotions, and other unobservable psychological states from people’s behavior is a hallmark of human social cognition, and an essential capability for future Artificial Intelligence systems. The commonsense theories of psychology and sociology necessary for such inferences have been a focus of logic-based knowledge representation research, but have been difficult to employ in robust automated reasoning architectures. In this paper we model behavior interpretation as a process of logical abduction, where the reasoning task is to identify the most probable set of assumptions that logically entail the observable behavior of others, given commonsense theories of psychology and sociology. We evaluate our approach using Triangle-COPA, a benchmark suite of 100 challenge problems based on an early social psychology experiment by Fritz Heider and Marianne Simmel. Commonsense knowledge of actions, social relationships, intentions, and emotions are encoded as defeasible axioms in first-order logic. We identify sets of assumptions that logically entail observed behaviors by backchaining with these axioms to a given depth, and order these sets by their joint probability assuming conditional independence. Our approach solves almost all (91) of the 100 questions in Triangle-COPA, and demonstrates a promising approach to robust behavior interpretation that integrates both logical and probabilistic reasoning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan
Multimodal Behavior Analytics for Interactive Technologies Journal Article
In: KI - Künstliche Intelligenz, vol. 30, no. 1, pp. 91–92, 2016, ISSN: 0933-1875, 1610-1987.
@article{scherer_multimodal_2016,
title = {Multimodal Behavior Analytics for Interactive Technologies},
author = {Stefan Scherer},
url = {http://download.springer.com/static/pdf/790/art%253A10.1007%252Fs13218-015-0401-0.pdf?originUrl=http%3A%2F%2Flink.springer.com%2Farticle%2F10.1007%2Fs13218-015-0401-0&token2=exp=1474903610 acl=%2Fstatic%2Fpdf%2F790%2Fart%25253A10.1007%25252Fs13218-015-0401-0.pdf%3ForiginUrl%3Dhttp%253A%252F%252Flink.springer.com%252Farticle%252F10.1007%252Fs13218-015-0401-0* hmac=8e31601212e82ac3ea1341f6bbddc376f14d6833e9b1df0adff03a332bb17122},
doi = {10.1007/s13218-015-0401-0},
issn = {0933-1875, 1610-1987},
year = {2016},
date = {2016-02-01},
journal = {KI - Künstliche Intelligenz},
volume = {30},
number = {1},
pages = {91–92},
abstract = {Human communication is multifaceted and information between humans is communicated on many channels in parallel. In order for a machine to become an efficient and accepted social companion, it is important that the machine understands interactive cues that not only represent direct communicative information such as spoken words but also nonverbal behavior. Hence, technologies to understand and put nonverbal communication into the context of the present interaction are essential for the advancement of human-machine interfaces [3, 4]. Multimodal behavior analytics—a transdisciplinary field of research—aims to close this gap and enables machines to automatically identify, characterize, model, and synthesize individuals’ multimodal nonverbal behavior within both human-machine as well as machine-mediated humanhuman interaction. The emerging technology of this field is relevant for a wide range of interaction applications, including but not limited to the areas of healthcare and education. Exemplarily, the characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or post-traumatic stress, holds transformative potential and could change treatment and the healthcare systems efficiency significantly [6]. Within the educational context the assessment of proficiency and expertise of individuals’ social skills, in particular for those with learning disabilities or social anxiety, can help create individualized education scenarios [2, 8]. The potential of machine-assisted training for individuals with autism spectrum disorders (ASD) for example could have far reaching impacts on our society. In the following, I highlight two behavior analytics approaches that were investigated in my PhD dissertation [3] and summarized in a multimodal framework for human behavior analysis [4].},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Graesser, Arthur C; Hu, Xiangen; Nye, Benjamin D.; Sottilare, Robert A.
Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT) Book Section
In: Using Games and Simulations for Teaching and Assessment, pp. 58–79, Routledge, New York, NY, 2016, ISBN: 978-0-415-73787-6.
@incollection{graesser_intelligent_2016,
title = {Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT)},
author = {Arthur C Graesser and Xiangen Hu and Benjamin D. Nye and Robert A. Sottilare},
url = {https://www.researchgate.net/publication/304013322_Intelligent_Tutoring_Systems_Serious_Games_and_the_Generalized_Intelligent_Framework_for_Tutoring_GIFT},
isbn = {978-0-415-73787-6},
year = {2016},
date = {2016-01-01},
booktitle = {Using Games and Simulations for Teaching and Assessment},
pages = {58–79},
publisher = {Routledge},
address = {New York, NY},
abstract = {This chapter explores the prospects of integrating games with intelligent tutoring systems (ITSs). The hope is that there can be learning environments that optimize both motivation through games and deep learning through ITS technologies. Deep learning refers to the acquisition of knowledge, skills, strategies, and reasoning processes at the higher levels of Bloom’s (1956) taxonomy or the Knowledge-Learning-Instruction (KLI) framework (Koedinger, Corbett, & Perfetti, 2012), such as the application of knowledge to new cases, knowledge analysis and synthesis, problem solving, critical thinking, and other difficult cognitive processes. In contrast, shallow learning involves perceptual learning, memorization of explicit material, and mastery of simple rigid procedures. Shallow knowledge may be adequate for near transfer tests of knowledge/skills but not far transfer tests to new situations that have some modicum of complexity.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Rizzo, Albert; Talbot, Thomas
Virtual Reality Standardized Patients for Clinical Training Book Section
In: The Digital Patient, pp. 255–272, John Wiley & Sons, Inc, Hoboken, NJ, 2016, ISBN: 978-1-118-95278-8 978-1-118-95275-7.
@incollection{rizzo_virtual_2016,
title = {Virtual Reality Standardized Patients for Clinical Training},
author = {Albert Rizzo and Thomas Talbot},
url = {http://doi.wiley.com/10.1002/9781118952788.ch18},
isbn = {978-1-118-95278-8 978-1-118-95275-7},
year = {2016},
date = {2016-01-01},
booktitle = {The Digital Patient},
pages = {255–272},
publisher = {John Wiley & Sons, Inc},
address = {Hoboken, NJ},
abstract = {There are several quite distinct educational approaches that are all called a virtual patient. It includes case presentations, interactive patient scenarios, virtual patient games, human standardized patients (HSPs), high-fidelity software simulations, high-fidelity manikins, and virtual human (VH) conversational agents. VH conversations are possible that include an avatar that responds to pre-selected choices; such an interview is called a structured encounter. Most VSPs attempted to date have been on traditional computers. With the increased prevalence of mobile devices, it is logical to consider the migration of VSP technology to phones and tablets. Future distant recognition (DSR) systems will require a high level of individual speaker discrimination and will likely adopt microphone array-based acoustic beam forming technology. Future success may no longer be rate-limited by the pace of technology, but by the creativity and innovation of educators who will create compelling VSP experiences and curricula.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Kaplan, Jonas T.; Gimbel, Sarah I.; Dehghani, Morteza; Immordino-Yang, Mary Helen; Sagae, Kenji; Wong, Jennifer D.; Tipper, Christine M.; Damasio, Hanna; Gordon, Andrew S.; Damasio, Antonio
Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates Journal Article
In: Cerebral Cortex, 2016, ISSN: 1047-3211, 1460-2199.
@article{kaplan_processing_2016,
title = {Processing Narratives Concerning Protected Values: A Cross-Cultural Investigation of Neural Correlates},
author = {Jonas T. Kaplan and Sarah I. Gimbel and Morteza Dehghani and Mary Helen Immordino-Yang and Kenji Sagae and Jennifer D. Wong and Christine M. Tipper and Hanna Damasio and Andrew S. Gordon and Antonio Damasio},
url = {http://www.cercor.oxfordjournals.org/lookup/doi/10.1093/cercor/bhv325},
doi = {10.1093/cercor/bhv325},
issn = {1047-3211, 1460-2199},
year = {2016},
date = {2016-01-01},
journal = {Cerebral Cortex},
abstract = {Narratives are an important component of culture and play a central role in transmitting social values. Little is known, however, about how the brain of a listener/reader processes narratives. A receiver's response to narration is influenced by the narrator's framing and appeal to values. Narratives that appeal to “protected values,” including core personal, national, or religious values, may be particularly effective at influencing receivers. Protected values resist compromise and are tied with identity, affective value, moral decision-making, and other aspects of social cognition. Here, we investigated the neural mechanisms underlying reactions to protected values in narratives. During fMRI scanning, we presented 78 American, Chinese, and Iranian participants with real-life stories distilled from a corpus of over 20 million weblogs. Reading these stories engaged the posterior medial, medial prefrontal, and temporo-parietal cortices. When participants believed that the protagonist was appealing to a protected value, signal in these regions was increased compared with when no protected value was perceived, possibly reflecting the intensive and iterative search required to process this material. The effect strength also varied across groups, potentially reflecting cultural differences in the degree of concern for protected values.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cukor, Judith; Gerardi, Maryrose; Alley, Stephanie; Reist, Christopher; Roy, Michael; Rothbaum, Barbara O.; Difede, JoAnn; Rizzo, Albert
Virtual Reality Exposure Therapy for Combat-Related PTSD Book Section
In: Posttraumatic Stress Disorder and Related Diseases in Combat Veterans, pp. 69–83, Springer International Publishing, Cham, Switzerland, 2016, ISBN: 978-3-319-22984-3 978-3-319-22985-0.
@incollection{cukor_virtual_2016,
title = {Virtual Reality Exposure Therapy for Combat-Related PTSD},
author = {Judith Cukor and Maryrose Gerardi and Stephanie Alley and Christopher Reist and Michael Roy and Barbara O. Rothbaum and JoAnn Difede and Albert Rizzo},
url = {http://link.springer.com/10.1007/978-3-319-22985-0_7},
isbn = {978-3-319-22984-3 978-3-319-22985-0},
year = {2016},
date = {2016-01-01},
booktitle = {Posttraumatic Stress Disorder and Related Diseases in Combat Veterans},
pages = {69–83},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {War is perhaps one of the most challenging situations that a human being can experience. The physical, emotional, cognitive, and psychological demands of a combat environment place enormous stress on even the best-prepared military personnel. Numerous reports indicate that the incidence of posttraumatic stress disorder (PTSD) in returning Operation Enduring Freedom/Operation Iraqi Freedom (OEF/OIF) military personnel is significant. This has served to motivate research on how to better develop and disseminate evidence-based treatments for PTSD that leverage the unique features available with virtual reality (VR) technology. VR-delivered exposure therapy for PTSD is currently being used to treat combatand terrorist attack-related PTSD with initial reports of positive outcomes. This chapter presents a brief overview and rationale for the use of VR exposure for combat-related PTSD and describes the Virtual Iraq/Afghanistan exposure therapy system. This includes a short review of the previous literature, a description of the system components and the treatment protocol, and a case presentation. VR offers an alternative format for delivering exposure-based therapies for PTSD that may appeal to certain service members and veterans who grew up “digital” and who might be inclined to seek treatment in this fashion.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction Journal Article
In: IEEE Transactions on Affective Computing, vol. PP, no. 99, 2016, ISSN: 1949-3045.
@article{venek_adolescent_2016,
title = {Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7384418},
doi = {10.1109/TAFFC.2016.2518665},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {PP},
number = {99},
abstract = {Youth suicide is a major public health problem. It is the third leading cause of death in the United States for ages 13 through 18. Many adolescents that face suicidal thoughts or make a suicide plan never seek professional care or help. Within this work, we evaluate both verbal and nonverbal responses to a five-item ubiquitous questionnaire to identify and assess suicidal risk of adolescents. We utilize a machine learning approach to identify suicidal from non-suicidal speech as well as characterize adolescents that repeatedly attempted suicide in the past. Our findings investigate both verbal and nonverbal behavior information of the face-to-face clinician-patient interaction. We investigate 60 audio-recorded dyadic clinician-patient interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents. The interaction between clinician and adolescents is statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters’ behaviors in comparison to suicidal non-repeaters. By using a hierarchical classifier we were able to show that the verbal responses to the ubiquitous questions sections of the interviews were useful to discriminate suicidal and non-suicidal patients. However, to additionally classify suicidal repeaters and suicidal non-repeaters more information especially nonverbal information is required.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Scherer, Stefan; Lucas, Gale M.; Gratch, Jonathan; Rizzo, Albert Skip; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing, vol. 7, no. 1, pp. 59–73, 2016, ISSN: 1949-3045.
@article{scherer_self-reported_2016,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale M. Lucas and Jonathan Gratch and Albert Skip Rizzo and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/document/7117386/?arnumber=7117386},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2016},
date = {2016-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {7},
number = {1},
pages = {59–73},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals’ with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g.sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ustun, Volkan; Rosenbloom, Paul S.; Kim, Julia; Li, Lingshan
BUILDING HIGH FIDELITY HUMAN BEHAVIOR MODELS IN THE SIGMA COGNITIVE ARCHITECTURE Proceedings Article
In: Proceedings of the 2015 Winter Simulation Conference, pp. 3124–3125, IEEE, Huntington Beach, CA, 2015, ISBN: 978-1-4673-9741-4.
@inproceedings{ustun_building_2015,
title = {BUILDING HIGH FIDELITY HUMAN BEHAVIOR MODELS IN THE SIGMA COGNITIVE ARCHITECTURE},
author = {Volkan Ustun and Paul S. Rosenbloom and Julia Kim and Lingshan Li},
url = {http://dl.acm.org/citation.cfm?id=2888619.2888999},
isbn = {978-1-4673-9741-4},
year = {2015},
date = {2015-12-01},
booktitle = {Proceedings of the 2015 Winter Simulation Conference},
pages = {3124–3125},
publisher = {IEEE},
address = {Huntington Beach, CA},
abstract = {Many agent simulations involve computational models of intelligent human behavior. In a variety of cases, these behavior models should be high-fidelity to provide the required realism and credibility. Cognitive architectures may assist the generation of such high-fidelity models as they specify the fixed structure underlying an intelligent cognitive system that does not change over time and across domains. Existing symbolic architectures, such as Soar and ACT-R, have been used in this way, but here the focus is on a new architecture, Sigma (!), that leverages probabilistic graphical models towards a uniform grand unification of not only the traditional cognitive capabilities but also key non-cognitive aspects, and which thus yields unique opportunities for construction of new kinds of non-modular high-fidelity behavior models. Here, we briefly introduce Sigma along with two disparate proof-of-concept virtual humans – one conversational and the other a pair of ambulatory agents – that demonstrate its diverse capabilities.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Jones, Andrew; Hays, Kia; Maio, Heather; Alexander, Oleg; Artstein, Ron; Debevec, Paul; Gainer, Alesia; Georgila, Kallirroi; Haase, Kathleen; Jungblut, Karen; Leuski, Anton; Smith, Stephen; Swartout, William
New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling Book Section
In: Interactive Storytelling, vol. 9445, pp. 269–281, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27035-7 978-3-319-27036-4.
@incollection{traum_new_2015,
title = {New Dimensions in Testimony: Digitally Preserving a Holocaust Survivor’s Interactive Storytelling},
author = {David Traum and Andrew Jones and Kia Hays and Heather Maio and Oleg Alexander and Ron Artstein and Paul Debevec and Alesia Gainer and Kallirroi Georgila and Kathleen Haase and Karen Jungblut and Anton Leuski and Stephen Smith and William Swartout},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_26},
isbn = {978-3-319-27035-7 978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {269–281},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We describe a digital system that allows people to have an interactive conversation with a human storyteller (a Holocaust survivor) who has recorded a number of dialogue contributions, including many compelling narratives of his experiences and thoughts. The goal is to preserve as much as possible of the experience of face-to-face interaction. The survivor's stories, answers to common questions, and testimony are recorded in high ⬚delity, and then delivered interactively to an audience as responses to spoken questions. People can ask questions and receive answers on a broad range of topics including the survivor's experiences before, after and during the war, his attitudes and philosophy. Evaluation results show that most user questions can be addressed by the system, and that audiences are highly engaged with the resulting interaction.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Building Trust in a Human-Robot Team with Automatically Generated Explanations Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{wang_building_2015,
title = {Building Trust in a Human-Robot Team with Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Technological advances offer the promise of robotic systems that work with people to form human-robot teams that are more capable than their individual members. Unfortunately, the increasing capability of such autonomous systems has often failed to increase the capability of the human-robot team. Studies have identified many causes underlying these failures, but one critical aspect of a successful human-machine interaction is trust. When robots are more suited than humans for a certain task, we want the humans to trust the robots to perform that task. When the robots are less suited, we want the humans to appropriately gauge the robots’ ability and have people perform the task manually. Failure to do so results in disuse of robots in the former case and misuse in the latter. Real-world case studies and laboratory experiments show that failures in both cases are common. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies show that explanations offered by an automated system can help maintain trust with the humans in case the system makes an error, indicating that the robot’s communication transparency can be an important factor in earning an appropriate level of trust. To study how robots can communicate their decisionmaking process to humans, we have designed an agent-based online test-bed that supports virtual simulation of domain-independent human-robot interaction. In the simulation, humans work together with virtual robots as a team. The test-bed allows researchers to conduct online human-subject studies and gain better understanding of how robot communication can improve human-robot team performance by fostering better trust relationships between humans and their robot teammates. In this paper, we describe the details of our design, and illustrate its operation with an example human-robot team reconnaissance task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Merchant, Chirag
Toward Acquiring a Human Behavior Model of Competition vs. Cooperation Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{pynadath_toward_2015,
title = {Toward Acquiring a Human Behavior Model of Competition vs. Cooperation},
author = {David V. Pynadath and Ning Wang and Chirag Merchant},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {One of the challenges in modeling human behavior is accurately capturing the conditions under which people will behave selfishly or selflessly. Researchers have been unable to craft purely cooperative (or competitive) scenarios without significant numbers of subjects displaying unintended selfish (or selfless) behavior (e.g., Rapoport & Chammah, 1965). In this work, rather than try to further isolate competitive vs. cooperative behavior, we instead construct an experimental setting that deliberately includes both, in a way that fits within an operational simulation model. Using PsychSim, a multiagent social simulation framework with both Theory of Mind and decision theory, we have implemented an online resource allocation game called “Team of Rivals”, where four players seek to defeat a common enemy. The players have individual pools of resources which they can allocate toward that common goal. In addition to their progress toward this common goal, the players also receive individual feedback, in terms of the number of resources they own and have won from the enemy. By giving the players both an explicit cooperative goal and implicit feedback on potential competitive goals, we give them room to behave anywhere on the spectrum between these two extremes. Furthermore, by moving away from the more common two-player laboratory settings (e.g., Prisoner’s Dilemma), we can observe differential behavior across the richer space of possible interpersonal relationships. We discuss the design of the game that allows us to observe and analyze these relationships from human behavior data acquired through this game. We then describe decision-theoretic agents that can simulate hypothesized variations on human behavior. Finally, we present results of a preliminary playtest of the testbed and discuss the gathered data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Gordon, Andrew S.
Creative Help: A Story Writing Assistant Book Section
In: Interactive Storytelling, vol. 9445, pp. 81–92, Springer International Publishing, Copenhagen, Denmark, 2015, ISBN: 978-3-319-27036-4.
@incollection{roemmele_creative_2015,
title = {Creative Help: A Story Writing Assistant},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {http://link.springer.com/10.1007/978-3-319-27036-4_8},
isbn = {978-3-319-27036-4},
year = {2015},
date = {2015-12-01},
booktitle = {Interactive Storytelling},
volume = {9445},
pages = {81–92},
publisher = {Springer International Publishing},
address = {Copenhagen, Denmark},
abstract = {We present Creative Help, an application that helps writers by generating suggestions for the next sentence in a story as it being written. Users can modify or delete suggestions according to their own vision of the unfolding narrative. The application tracks users' changes to suggestions in order to measure their perceived helpfulness to the story, with fewer edits indicating more helpful suggestions. We demonstrate how the edit distance between a suggestion and its resulting modi⬚cation can be used to comparatively evaluate di⬚erent models for generating suggestions. We describe a generation model that uses case-based reasoning to find relevant suggestions from a large corpus of stories. The application shows that this model generates suggestions that are more helpful than randomly selected suggestions at a level of marginal statistical signifcance. By giving users control over the generated content, Creative Help provides a new opportunity in open-domain interactive storytelling.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
McAlinden, Ryan; Suma, Evan; Grechkin, Timofey; Enloe, Michael
Procedural Reconstruction of Simulation Terrain Using Drones Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{mcalinden_procedural_2015,
title = {Procedural Reconstruction of Simulation Terrain Using Drones},
author = {Ryan McAlinden and Evan Suma and Timofey Grechkin and Michael Enloe},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Photogrammetric techniques for constructing 3D virtual environments have previously been plagued by expensive equipment, imprecise and visually unappealing results. However, with the introduction of low-cost, off-the-shelf (OTS) unmanned aerial systems (UAS), lighter and capable cameras, and more efficient software techniques for reconstruction, the modeling and simulation (M&S) community now has available to it new types of virtual assets that are suited for modern-day games and simulations. This paper presents an approach for fully autonomously collecting, processing, storing and rendering highly-detailed geo-specific terrain data using these OTS techniques and methods. We detail the types of equipment used, the flight parameters, the processing and reconstruction pipeline, and finally the results of using the dataset in a game/simulation engine. A key objective of the research is procedurally segmenting the terrain into usable features that the engine can interpret – i.e. distinguishing between roads, buildings, vegetation, etc. This allows the simulation core to assign attributes related to physics, lighting, collision cylinders and navigation meshes that not only support basic rendering of the model but introduce interaction with it. The results of this research are framed in the context of a new paradigm for geospatial collection, analysis and simulation. Specifically, the next generation of M&S systems will need to integrate environmental representations that have higher detail and richer metadata while ensuring a balance between performance and usability.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Park, Sunghyun; Morency, Louis-Philippe; Scherer, Stefan
Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits Proceedings Article
In: Proceedings of the 2015 ACM on International Conference on Multimodal Interaction, pp. 7–14, ACM Press, Seattle, Washington, 2015, ISBN: 978-1-4503-3912-4.
@inproceedings{chatterjee_combining_2015,
title = {Combining Two Perspectives on Classifying Multimodal Data for Recognizing Speaker Traits},
author = {Moitreya Chatterjee and Sunghyun Park and Louis-Philippe Morency and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2818346.2820747},
doi = {10.1145/2818346.2820747},
isbn = {978-1-4503-3912-4},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 2015 ACM on International Conference on Multimodal Interaction},
pages = {7–14},
publisher = {ACM Press},
address = {Seattle, Washington},
abstract = {Human communication involves conveying messages both through verbal and non-verbal channels (facial expression, gestures, prosody, etc.). Nonetheless, the task of learning these patterns for a computer by combining cues from multiple modalities is challenging because it requires effective representation of the signals and also taking into consideration the complex interactions between them. From the machine learning perspective this presents a two-fold challenge: a) Modeling the intermodal variations and dependencies; b) Representing the data using an apt number of features, such that the necessary patterns are captured but at the same time allaying concerns such as over-fitting. In this work we attempt to address these aspects of multimodal recognition, in the context of recognizing two essential speaker traits, namely passion and credibility of online movie reviewers. We propose a novel ensemble classification approach that combines two different perspectives on classifying multimodal data. Each of these perspectives attempts to independently address the two-fold challenge. In the first, we combine the features from multiple modalities but assume inter-modality conditional independence. In the other one, we explicitly capture the correlation between the modalities but in a space of few dimensions and explore a novel clustering based kernel similarity approach for recognition. Additionally, this work investigates a recent technique for encoding text data that captures semantic similarity of verbal content and preserves word-ordering. The experimental results on a recent public dataset shows significant improvement of our approach over multiple baselines. Finally, we also analyze the most discriminative elements of a speaker's non-verbal behavior that contribute to his/her perceived credibility/passionateness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Casas, Dan; Shapiro, Ari
Avatar Reshaping and Automatic Rigging Using a Deformable Model Proceedings Article
In: Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games, pp. 57–64, ACM Press, Paris, France, 2015, ISBN: 978-1-4503-3991-9.
@inproceedings{feng_avatar_2015,
title = {Avatar Reshaping and Automatic Rigging Using a Deformable Model},
author = {Andrew Feng and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2822013.2822017},
doi = {10.1145/2822013.2822017},
isbn = {978-1-4503-3991-9},
year = {2015},
date = {2015-11-01},
booktitle = {Proceedings of the 8th ACM SIGGRAPH Conference on Motion in Games},
pages = {57–64},
publisher = {ACM Press},
address = {Paris, France},
abstract = {3D scans of human figures have become widely available through online marketplaces and have become relatively easy to acquire using commodity scanning hardware. In addition to static uses of such 3D models, such as 3D printed figurines or rendered 3D still imagery, there are numerous uses for an animated 3D character that uses such 3D scan data. In order to effectively use such models as dynamic 3D characters, the models must be properly rigged before they are animated. In this work, we demonstrate a method to automatically rig a 3D mesh by matching a set of morphable models against the 3D scan. Once the morphable model has been matched against the 3D scan, the skeleton position and skinning attributes are then copied, resulting in a skinning and rigging that is similar in quality to the original hand-rigged model. In addition, the use of a morphable model allows us to reshape and resize the 3D scan according to approximate human proportions. Thus, a human 3D scan can be modified to be taller, shorter, fatter or skinnier. Such manipulations of the 3D scan are useful both for social science research, as well as for visualization for applications such as fitness, body image, plastic surgery and the like.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Papaefthymiou, Margarita; Feng, Andrew; Shapiro, Ari; Papagiannakis, George
A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters Proceedings Article
In: SIGGRAPH Asia 2015, pp. 1–8, ACM Press, Kobe, Japan, 2015, ISBN: 978-1-4503-3928-5.
@inproceedings{papaefthymiou_fast_2015,
title = {A fast and robust pipeline for populating mobile AR scenes with gamified virtual characters},
author = {Margarita Papaefthymiou and Andrew Feng and Ari Shapiro and George Papagiannakis},
url = {http://dl.acm.org/citation.cfm?doid=2818427.2818463},
doi = {10.1145/2818427.2818463},
isbn = {978-1-4503-3928-5},
year = {2015},
date = {2015-11-01},
booktitle = {SIGGRAPH Asia 2015},
pages = {1–8},
publisher = {ACM Press},
address = {Kobe, Japan},
abstract = {In this work we present a complete methodology for robust authoring of AR virtual characters powered from a versatile character animation framework (Smartbody), using only mobile devices. We can author, fully augment with life-size, animated, geometrically accurately registered virtual characters into any open space in less than 1 minute with only modern smartphones or tablets and then automatically revive this augmentation for subsequent activations from the same spot, in under a few seconds. Also, we handle efficiently scene authoring rotations of the AR objects using Geometric Algebra rotors in order to extract higher quality visual results. Moreover, we have implemented a mobile version of the global illumination for real-time Precomputed Radiance Transfer algorithm for diffuse shadowed characters in real-time, using High Dynamic Range (HDR) environment maps integrated in our opensource OpenGL Geometric Application (glGA) framework. Effective character interaction plays fundamental role in attaining high level of believability and makes the AR application more attractive and immersive based on the SmartBody framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance Proceedings Article
In: Eurographics Symposium on Virtual Environments (2015), pp. 93–100, The Eurographics Association, Kyoto, Japan, 2015, ISBN: 978-3-905674-84-2.
@inproceedings{azmandian_physical_2015,
title = {Physical Space Requirements for Redirected Walking: How Size and Shape Affect Performance},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {https://diglib.eg.org/handle/10.2312/13833},
doi = {10.2312/egve.20151315},
isbn = {978-3-905674-84-2},
year = {2015},
date = {2015-10-01},
booktitle = {Eurographics Symposium on Virtual Environments (2015)},
pages = {93–100},
publisher = {The Eurographics Association},
address = {Kyoto, Japan},
abstract = {Redirected walking provides a compelling solution to explore large virtual environments in a natural way. However, research literature provides few guidelines regarding trade-offs involved in selecting size and layout for physical tracked space. We designed a rigorously controlled benchmarking framework and conducted two simulated user experiments to systematically investigate how the total area and dimensions of the tracked space affect performance of steer-to-center and steer-to-orbit algorithms. The results indicate that minimum viable size of physical tracked space for these redirected walking algorithms is approximately 6m 6m with performance continuously improving in larger tracked spaces. At the same time, no ”optimal” tracked space size can guarantee the absence of contacts with the boundary. We also found that square tracked spaces enabled best overall performance with steer-to-center algorithm also performing well in moderately elongated rectangular spaces. Finally, we demonstrate that introducing translation gains can provide a useful boost in performance, particularly when physical space is constrained. We conclude with the discussion of potential applications of our benchmarking toolkit to other problems related to performance of redirected walking platforms.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2006
Mao, Wenji; Gratch, Jonathan
Evaluating a Computational Model of Social Causality and Responsibility Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Hakodate, Japan, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_evaluating_2006,
title = {Evaluating a Computational Model of Social Causality and Responsibility},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20Computational%20Model%20of%20Social%20Causality%20and%20Responsibility.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Hakodate, Japan},
abstract = {Intelligent agents are typically situated in a social environment and must reason about social cause and effect. Such reasoning is qualitatively different from physical causal reasoning that underlies most intelligent systems. Modeling social causal reasoning can enrich the capabilities of multi-agent systems and intelligent user interfaces. In this paper, we empirically evaluate a computational model of social causality and responsibility against human social judgments. Results from our experimental studies show that in general, the model's predictions of internal variables and inference process are consistent with human responses, though they also suggest some possible refinement to the computational model.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kim, Soo-Min; Hovy, Eduard
Identifying and Analyzing Judgment Opinions Proceedings Article
In: Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference, New York, NY, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{kim_identifying_2006,
title = {Identifying and Analyzing Judgment Opinions},
author = {Soo-Min Kim and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Identifying%20and%20Analyzing%20Judgment%20Opinions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the Humans Language Technology/North American Association of Computational Linguistics Conference},
address = {New York, NY},
abstract = {In this paper, we introduce a methodology for analyzing judgment opinions. We define a judgment opinion as consisting of a valence, a holder, and a topic. We decompose the task of opinion analysis into four parts: 1) recognizing the opinion; 2) identifying the valence; 3) identifying the holder; and 4) identifying the topic. In this paper, we address the first three parts and evaluate our methodology using both intrinsic and extrinsic measures},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Clevenger, William
A Culturally-enhanced Environmental Framework for Virtual Environments Proceedings Article
In: Proceedings of Behavior Representation in Modeling and Simulation, Baltimore, MD, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_culturally-enhanced_2006,
title = {A Culturally-enhanced Environmental Framework for Virtual Environments},
author = {Ryan McAlinden and William Clevenger},
url = {http://ict.usc.edu/pubs/A%20Culturally-enhanced%20Environmental%20Framework%20for%20Virtual%20Environments.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of Behavior Representation in Modeling and Simulation},
address = {Baltimore, MD},
abstract = {This paper details the design and implementation of an embedded environmental framework that introduces cultural and social influences into a simulation agent's decision-making process. We describe the current limitations associated with accurately representing culture in virtual environments and military simulations, and how recent research in other academic fields have enabled computational techniques to begin incorporating the effects of culture into AI and behavior subsystems. The technical approach is presented that describes the design and implementation of a hierarchical data model, as well as the software techniques for embedding culturally-specific information inside of a virtual environment. Finally, future work is discussed for developing a more comprehensive and standardized approach for embedding this culturally-specific information inside of the virtual domain.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Braaten, Alyssa J.; Parsons, Thomas D.; McCue, Robert; Sellers, Alfred; Burns, William J.
In: International Journal of Neuroscience, vol. 116, pp. 1271–1293, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{braaten_neurocognitive_2006,
title = {Neurocognitive Differential Diagnosis of Dementing Diseases: Alzheimers Demntia, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder},
author = {Alyssa J. Braaten and Thomas D. Parsons and Robert McCue and Alfred Sellers and William J. Burns},
url = {http://ict.usc.edu/pubs/NEUROCOGNITIVE%20DIFFERENTIAL%20DIAGNOSIS%20OF%20DEMENTING%20DISEASES-%20ALZHEIMER%E2%80%99S%20DEMENTIA,%20VASCULAR%20DEMENTIA,%20FRONTOTEMPORAL%20DEMENTIA,%20AND%20MAJOR%20DEPRESSIVE%20DISORDER.pdf},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {116},
pages = {1271–1293},
abstract = {Similarities in presentation of Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder, pose differential diagnosis challenges. The current study identiï¬es speciï¬c neuropsychological patterns of scores for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. Neuropsychological domains directly assessed in the study included: immediate memory, delayed memory, confrontational naming, verbal fluency, attention, concentration, and executive functioning. The results reveal speciï¬c neuropsychological comparative proï¬les for Dementia of Alzheimer's Type, Vascular Dementia, Frontotemporal Dementia, and Major Depressive Disorder. The identiï¬cation of these proï¬les will assist in the differential diagnosis of these disorders and aid in patient treatment.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Core, Mark; Gomboc, Dave; Solomon, Steve; Lent, Michael; Rosenberg, Milton
Reflective Tutoring for Immersive Simulation Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Tutoring Systems, Jhongli, Taiwan, 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_reflective_2006,
title = {Reflective Tutoring for Immersive Simulation},
author = {H. Chad Lane and Mark Core and Dave Gomboc and Steve Solomon and Michael Lent and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Reflective%20Tutoring%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Tutoring Systems},
address = {Jhongli, Taiwan},
abstract = {Reflection is critically important for time-constrained training simulations that do not permit extensive tutor-student interactions during an exercise. Here, we describe a reflective tutoring system for a virtual human simulation of negotiation. The tutor helps students review their exercise, elicits where and how they could have done better, and uses explainable artificial intelligence (XAI) to allow students the chance to ask questions about the virtual human's behavior.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Graap, Ken; Pair, Jarrell; Reger,; Treskunov, Anton; Parsons, Thomas D.
User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder Proceedings Article
In: Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology, Esbjerg, Denmark, 2006.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_user-centered_2006,
title = {User-centered design driven development of a virtual reality therapy application for Iraq war combat-related post traumatic stress disorder},
author = {Albert Rizzo and Ken Graap and Jarrell Pair and Reger and Anton Treskunov and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/User-centered%20design%20driven%20development%20of%20a%20virtual%20reality%20therapy%20application%20for%20Iraq%20war%20combat-related%20post%20traumatic%20stress%20disorder.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 2006 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Esbjerg, Denmark},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been used with reports of positive outcomes. The aim of the current paper is to present the rationale, technical specifications, application features and user-centered design process for the development of a Virtual Iraq PTSD VR therapy application. The VR treatment environment is being created via the recycling of virtual graphic assets that were initially built for the U.S. Army-funded combat tactical simulation scenario and commercially successful X-Box game, Full Spectrum Warrior, in addition to other available and newly created assets. Thus far we have created a series of customizable virtual scenarios designed to represent relevant contexts for exposure therapy to be conducted in VR, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system in Iraq tested by an Army Combat Stress Control Team. Clinical trials are currently underway at Camp Pendleton and at the San Diego Naval Medical Center. Other sites are preparing to use the application for a variety of PTSD and VR research purposes.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Buxbaum, Laurel J.; Palermo, Maryann; Mastrogiovanni, Dina; Read, Mary Schmidt; Rosenberg-Pitonyak, Ellen; Rizzo, Albert; Coslett, H. Branch
Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task Proceedings Article
In: 5th Annual International Workshop on Virtual Rehabilitation, New York, NY, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{buxbaum_assessment_2006,
title = {Assessment of Spatial Neglect with a Virtual Wheelchair Navigation Task},
author = {Laurel J. Buxbaum and Maryann Palermo and Dina Mastrogiovanni and Mary Schmidt Read and Ellen Rosenberg-Pitonyak and Albert Rizzo and H. Branch Coslett},
url = {http://ict.usc.edu/pubs/Assessment%20of%20Spatial%20Neglect%20with%20a%20Virtual%20Wheelchair%20Navigation%20Task.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {5th Annual International Workshop on Virtual Rehabilitation},
address = {New York, NY},
abstract = {We report data from 9 participants with right hemisphere stroke on a new virtual reality (VR) wheelchair navigation test designed to assess lateralized spatial attention and neglect. The test consists of a virtual winding path along which participants must navigate (or be navigated by an experimenter) as they name objects encountered along the way. There are 4 VR task conditions, obtained by crossing the factors array complexity (Simple, Complex) and Driver (Participant, Experimenter). Participants performed the VR task, a real-life wheelchair navigation task, and a battery of tests assessing arousal, visual attention under secondary task demands, and neglect. The VR test showed sensitivity to both array complexity and driver, with best performance occurring in the Experimenter Navigated, Simple Array condition. The VR test also showed high correlations with the wheelchair navigation test, and these correlations were in many instances higher than those between traditional clinical neglect tests and the wheelchair navigation task. Moreover, the VR test detected lateralized attention deficits in participants whose performance was within the normal range on other neglect tests. We conclude that the VR task is sensitive to factors likely to affect the severity of neglect in the daily environment, and shows promise as an efficient, easily administered measure of real-life wheelchair navigation.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; Gordon, Andrew S.; Traum, David
Improving Question-Answering With Linking Dialogues Proceedings Article
In: International Conference on Intelligent User Interfaces (IUI-2006), Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: The Narrative Group, Virtual Humans
@inproceedings{gandhe_improving_2006,
title = {Improving Question-Answering With Linking Dialogues},
author = {Sudeep Gandhe and Andrew S. Gordon and David Traum},
url = {http://ict.usc.edu/pubs/Improving%20Question-Answering%20With%20Linking%20Dialogues%20.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {International Conference on Intelligent User Interfaces (IUI-2006)},
address = {Sydney, Australia},
abstract = {Question-answering dialogue systems have found many applications in interactive learning environments. This paper is concerned with one such application for Army leadership training, where trainees input free-text questions that elicit pre-recorded video responses. Since these responses are already crafted before the question is asked, a certain degree of incoherence exists between the question that is asked and the answer that is given. This paper explores the use of short linking dialogues that stand in between the question and its video response to alleviate the problem of incoherence. We describe a set of experiments with human generated linking dialogues that demonstrate their added value. We then describe our implementation of an automated method for utilizing linking dialogues and show that these have better coherence properties than the original system without linking dialogues.},
keywords = {The Narrative Group, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Wenji, Mao
Towards a Validated Model of "Emotional Intelligence" Proceedings Article
In: Proceedings of the 21st National Conference on Artificial Intelligence, pp. 1613–1616, Boston, MA, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_towards_2006,
title = {Towards a Validated Model of "Emotional Intelligence"},
author = {Jonathan Gratch and Stacy C. Marsella and Mao Wenji},
url = {http://ict.usc.edu/pubs/Towards%20a%20Validated%20Model%20of%20Emotional%20Intelligence.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence},
volume = {2},
pages = {1613–1616},
address = {Boston, MA},
abstract = {This article summarizes recent progress in developing a validated computational account of the cognitive antecedents and consequences of emotion. We describe the potential of this work to impact a variety of AI problem domains.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Tröster, Alexander I.; Parsons, Thomas D.
Sodium Amytal Testing and Language Journal Article
In: Encyclopedia of Language and Linguistics, vol. 11, pp. 500–503, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{troster_sodium_2006,
title = {Sodium Amytal Testing and Language},
author = {Alexander I. Tröster and Thomas D. Parsons},
url = {http://ict.usc.edu/pubs/Sodium%20Amytal%20Testing%20and%20Language.pdf},
year = {2006},
date = {2006-01-01},
journal = {Encyclopedia of Language and Linguistics},
volume = {11},
pages = {500–503},
abstract = {The intracarotid amobarbital test (IAT) was first described by Juhn Wada and thus is often referred to as the 'Wada test.' Wada originally developed this technique to study the interhemispheric spread of epileptiform discharges in patients undergoing unilateral electroconvulsive therapy. Based on his observation that an expressive aphasia resulted when the language dominant hemisphere was injected with amobarbital, he reasoned that this technique might be useful in determining hemispheric language dominance in neurosurgical candidates (and thus minimize speech and language dysfunction in patients undergoing dominant hemisphere surgery).},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Tariq, Sarah; Gardner, Andrew; Llamas, Ignacio; Jones, Andrew; Debevec, Paul; Turk, Greg
Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 01 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{tariq_efficient_2006,
title = {Efficient Estimation of Spatially Varying Subsurface Scattering Parameters for Relighting},
author = {Sarah Tariq and Andrew Gardner and Ignacio Llamas and Andrew Jones and Paul Debevec and Greg Turk},
url = {http://ict.usc.edu/pubs/ICT-TR-01-2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 01 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based technique to rapidly ac- quire spatially varying subsurface reflectance prop- erties of a human face. The estimated properties can be used directly to render faces with spatially vary- ing scattering, or can be used to estimate a robust average across the face. We demonstrate our tech- nique with renderings of peoples' faces under novel, spatially-varying illumination and provide compar- isons with current techniques. Our captured data consists of images of the face from a single view- point under two small sets of projected images. The first set, a sequence of phase shifted periodic stripe patterns, provides a per-pixel profile of how light scatters from adjacent locations. The second set contains structured light and is used to obtain face geometry. We match the observed reflectance pro- files to scattering properties predicted by a scatter- ing model using a lookup table. From these prop- erties we can generate images of the face under any incident illumination, including local lighting. The rendered images exhibit realistic subsurface trans- port, including light bleeding across shadow edges. Our method works more than an order of magnitude faster than current techniques for capturing subsur- face scattering information, and makes it possible for the first time to capture these properties over an entire face.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Nelson, Nathaniel W.; Parsons, Thomas D.; Grote, Christopher L.; Smith, Clifford A.; II, James R. Sisung
The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1–12, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{nelson_mmpi-2_2006,
title = {The MMPI-2 Fake Bad Scale: Concordance and Specificity of True and Estimated Scores},
author = {Nathaniel W. Nelson and Thomas D. Parsons and Christopher L. Grote and Clifford A. Smith and James R. Sisung II},
url = {http://ict.usc.edu/pubs/The%20MMPI-2%20Fake%20Bad%20Scale-%20Concordance%20and%20Specificity%20of%20True%20and%20Estimated%20Scores.pdf},
doi = {10.1080/13803390490919272},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1–12},
series = {1380-339},
abstract = {A number of recent studies have supported the use of the MMPI-2 Fake Bad Scale (FBS) as a measure of negative response bias, the scale at times demonstrating greater sensitivity to negative response bias than other MMPI-2 validity scales. However, clinicians may not always have access to True FBS (T-FBS) scores, such as when True-False answer sheets are unavailable or published research studies do not report FBS raw scores. Under these conditions, Larrabee (2003a) suggests a linear regression formula that provides estimated FBS (E-FBS) scores derived from weighted validity and clinical T-Scores. The present study intended to validate this regression formula of MMPI-2 E-FBS scores and demonstrate its specificity in a sample of non-litigating, clinically referred, medically intractable epilepsy patients. We predicted that the E-FBS scores would correlate highly (textbackslashtextbackslashtextbackslashtextbackslashtextgreater.70) with the T-FBS scores, that the E-FBS would show comparable correlations with MMPI-2 validity and clinical scales relative to the T-FBS, and that the E-FBS would show an adequate ability to match T-FBS scores using a variety of previously suggested T-FBS raw score cutoffs. Overall, E-FBS scores correlated very highly with T-FBS scores (r = .78, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001), though correlations were especially high for women (r = .85, p textbackslashtextbackslashtextbackslashtextbackslashtextless .0001) compared to men (r = .62, p textbackslashtextbackslashtextbackslashtextbackslashtextless .001). Thirty-one of 32 (96.9%) comparisons made between E-FBS/T-FBS correlates with other MMPI-2 scales were nonsignificant. When matching to T-FBS "high" and "low" scores, the E-FBS scores demonstrated the highest hit rate (92.5%) through use of Lees-Haley's (1992) revised cutoffs for men and women. These same cutoffs resulted in excellent overall specificity for both the T-FBS scores (92.5%) and E-FBS scores (90.6%). The authors conclude that the E-FBS represents an adequate estimate of T-FBS scores in the current epilepsy sample. Use of E-FBS scores may be especially useful when clinicians conduct the MMPI-2 short form, which does not include all of the 43 FBS items but does include enough items to compute each of the validity and clinical T-Scores. Future studies should examine E-FBS sensitivity in compensation-seekers with incomplete effort.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Sidtis, John J.; Inman, Tina Hanlon; Robertson, Wendy T.; Hall, Colin D.; Price, Richard W.
Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex Journal Article
In: Journal of Clinical and Experimental Neuropsychology, vol. 28, pp. 1053–1064, 2006, ISSN: 1380-3395.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_timed_2006,
title = {Timed Gait Test: Normative Data for the Assessment of the AIDS Dementia Complex},
author = {R. Kevin Robertson and Thomas D. Parsons and John J. Sidtis and Tina Hanlon Inman and Wendy T. Robertson and Colin D. Hall and Richard W. Price},
url = {http://ict.usc.edu/pubs/Timed%20Gait%20Test-%20Normative%20Data%20for%20the%20Assessment%20of%20the%20AIDS%20Dementia%20Complex.pdf},
doi = {10.1080/13803390500205684},
issn = {1380-3395},
year = {2006},
date = {2006-01-01},
journal = {Journal of Clinical and Experimental Neuropsychology},
volume = {28},
pages = {1053–1064},
abstract = {The Timed Gait test is a standardized procedure assessing motor dysfunction of lower extremities and gait abnormalities associated with AIDS dementia complex. Heretofore, interpretations of Timed Gait results have been hampered by the lack of normative data. We provide results on this test derived from 1,549 subjects (HIV-seronegatives (HIV-) and seropositives (HIV+) classified according to ADC stage). Timed Gait was found to be a useful screening and assessment tool for evaluating ADC and correlated with clinical ADC staging as well as more extensive structured neurological and neuropsychological evaluations. Analysis of covariance results (with age and education as covariates) revealed symptomatic HIV+(SX) and AIDS groups having significantly slower Timed Gait scores than those in the HIV– and asymptomatic HIV+(ASX) groups. The SX group obtained significantly slower timed gait scores than those in the AIDS group. There was a significant increase in Timed Gait scores with each increase in dementia staging with the HIV- subjects having the fastest mean Timed Gait scores and the HIV+ dementia stage 2+ having the slowest. These normative data should prove useful in both recognition of ADC and treatment response. Given its minimal training requirements, the Timed Gait would have utility in resource limited settings.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Peers, Pieter; Hawkins, Tim; Debevec, Paul
A Reflective Light Stage Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 04 2006, 2006.
Abstract | Links | BibTeX | Tags: Graphics
@techreport{peers_reflective_2006,
title = {A Reflective Light Stage},
author = {Pieter Peers and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT-TR-04.2006.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 04 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present a novel acquisition device to capture high resolution 4D re- flectance fields of real scenes. The device consists of a concave hemispher- ical surface coated with a rough specular paint and a digital video projector with a fish-eye lens positioned near the center of the hemisphere. The scene is placed near the projector, also near the center, and photographed from a fixed vantage point. The projector projects a high-resolution image of incident illu- mination which is reflected by the rough hemispherical surface to become the illumination on the scene. We demonstrate the utility of this device by cap- turing a high resolution hemispherical reflectance field of a specular object which would be difficult to capture using previous acquisition techniques.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {techreport}
}
Werf, R. J.
Creating Rapport with Virtual Humans Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2006, 2006.
Abstract | Links | BibTeX | Tags:
@techreport{van_der_werf_creating_2006,
title = {Creating Rapport with Virtual Humans},
author = {R. J. Werf},
url = {http://ict.usc.edu/pubs/ICT-TR.02.2006-Rick.pdf},
year = {2006},
date = {2006-01-01},
number = {ICT TR 02 2006},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {This report describes the internship about the assignment Creating Rapport with Virtual Humans. The assignment is split up into two separate parts. The first part is to improve the visual feature detection of the current mimicking system [MAA04]. This is going to be done using a Computer Vision approach. Together with two other interns [LAM05] the whole mimicking system was improved, leading to a new Rapport system. The second part involves subject testing with the newly developed system. Firstly the goal is to make a working system that can be reused and expanded in the future. Secondly the goal is to use the data from the subject test to determine if rapport can be created with Virtual Humans. The resulting Rapport system should be a very well reuseable and expandable system. This system makes it possible for other people, unfamiliar with the system, to easily use the system for future testing. Unfortunately too little data was obtained with subject testing to give a solid conclusion whether or not creating rapport with Virtual Humans is possible. The subject testing did lead to a improved testing procedure which makes future testing quite easy.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Parsons, Thomas D.; Kratz, K. M.; Thompson, E.; Stanczyk, F. Z.; Buckwalter, John Galen
DHEA Supplementation and Cognition in Postmenopausal Women Journal Article
In: International Journal of Neuroscience, vol. 16, pp. 141–155, 2006, ISSN: 0020-7454.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_dhea_2006,
title = {DHEA Supplementation and Cognition in Postmenopausal Women},
author = {Thomas D. Parsons and K. M. Kratz and E. Thompson and F. Z. Stanczyk and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/DHEA%20Supplementation%20and%20Cognition%20in%20Postmenopausal%20Women.pdf},
doi = {10.1080/00207450500341506},
issn = {0020-7454},
year = {2006},
date = {2006-01-01},
journal = {International Journal of Neuroscience},
volume = {16},
pages = {141–155},
abstract = {Previous work has suggested that DHEA supplementation may have adverse cognitive effects in elderly women. This article analyzed 24-h measurements of DHEA, DHEAS, and cortisol to determine if cognitive decrease with treatment is mediated by DHEA’s impact on endogenous cortisol. It was found that DHEA administration increased cortisol at several hours during the day. In the treatment group, cortisol was positively associated with cognition at study completion. An increase in negative associations between DHEA(S) levels and cognition was found at completion. Increased cortisol does not explain the cognitive deficits associated with DHEA, suggesting a direct negative effect of exogenous DHEA on cognition.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Roque, Antonio; Ai, Hua; Traum, David
Evaluation of an Information State-Based Dialogue Manager Proceedings Article
In: Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue, Potsdam, Germany, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{roque_evaluation_2006,
title = {Evaluation of an Information State-Based Dialogue Manager},
author = {Antonio Roque and Hua Ai and David Traum},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20an%20Information%20State-Based%20Dialogue%20Manager.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Brandial 2006: The 10th Workshop on the Semantics and Pragmatics of Dialogue},
address = {Potsdam, Germany},
abstract = {We describe an evaluation of an information state-based dialogue manager by measuring its accuracy in information state component updating.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Dillenbourg, Pierre; Traum, David
Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving Journal Article
In: The Journal of the Learning Sciences, vol. 15, no. 1, pp. 121–151, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{dillenbourg_sharing_2006,
title = {Sharing Solutions: Persistence and Grounding in Multimodal Collaborative Problem Solving},
author = {Pierre Dillenbourg and David Traum},
url = {http://ict.usc.edu/pubs/Sharing%20Solutions-%20Persistence%20and%20Grounding%20in%20Multimodal%20Collaborative%20Problem%20Solving.pdf},
year = {2006},
date = {2006-01-01},
journal = {The Journal of the Learning Sciences},
volume = {15},
number = {1},
pages = {121–151},
abstract = {This article reports on an exploratory study of the relationship between grounding and problem solving in multimodal computer-mediated collaboration. This article examines two different media, a shared whiteboard and a MOO environment that includes a text chat facility. A study was done on how the acknowledgment rate (how often partners give feedback of having perceived, understood, and accepted partner's contributions) varies according to the media and the content of interactions. It was expected that the whiteboard would serve to draw schemata that disambiguate chat utterances. Instead, results show that the whiteboard is primarily used to represent the state of problem solving and the chat is used for grounding information created on the whiteboard. These results are interpreted in terms of persistence: More persistent information is exchanged through the more persistent medium. The whiteboard was used as a shared memory rather than a grounding tool.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Gratch, Jonathan; Hill, Randall W.; Hovy, Eduard; Lindheim, Richard; Marsella, Stacy C.; Rickel, Jeff; Traum, David
Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation Book Section
In: Multimodal Intelligent Information Presentation, vol. 27, pp. 305–321, Springer, Netherlands, 2006.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{swartout_simulation_2006,
title = {Simulation Meets Hollywood: Integrating Graphics, Sound, Story and Character for Immersive Simulation},
author = {William Swartout and Jonathan Gratch and Randall W. Hill and Eduard Hovy and Richard Lindheim and Stacy C. Marsella and Jeff Rickel and David Traum},
url = {http://ict.usc.edu/pubs/SIMULATION%20MEETS%20HOLLYWOOD-%20Integrating%20Graphics,%20Sound,%20Story%20and%20Character%20for%20Immersive%20Simulation.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Multimodal Intelligent Information Presentation},
volume = {27},
pages = {305–321},
publisher = {Springer},
address = {Netherlands},
abstract = {The Institute for Creative Technologies was created at the University of Southern California with the goal of bringing together researchers in simulation technology to collaborate with people from the entertainment industry. The idea was that much more compelling simulations could be developed if researchers who understood state-of-the-art simulation technology worked together with writers and directors who knew how to create compelling stories and characters. This paper presents our first major effort to realize that vision, the Mission Rehearsal Exercise Project, which confronts a soldier trainee with the kinds of dilemmas he might reasonably encounter in a peacekeeping operation. The trainee is immersed in a synthetic world and interacts with virtual humans: artificially intelligent and graphically embodied conversational agents that understand and generate natural language, reason about world events and respond appropriately to the trainee's actions or commands. This project is an ambitious exercise in integration, both in the sense of integrating technology with entertainment industry content, but also in that we have also joined a number of component technologies that have not been integrated before. This integration has not only raised new research issues, but it has also suggested some new approaches to difficult problems. In this paper we describe the Mission Rehearsal Exercise system and the insights gained through this large-scale integration.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul
A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond Journal Article
In: Tutorials in Quantitative Methods for Psychology, vol. 2, no. 2, pp. 43–51, 2006.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, Virtual Humans
@article{rosenbloom_cognitive_2006,
title = {A Cognitive Odyssey: From the Power Law of Practice to a General Learning Mechanism and Beyond},
author = {Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/A%20Cognitive%20Odyssey-%20From%20the%20Power%20Law%20of%20Practice%20to%20a%20General%20Learning%20Mechanism%20and%20Beyond.pdf},
year = {2006},
date = {2006-01-01},
journal = {Tutorials in Quantitative Methods for Psychology},
volume = {2},
number = {2},
pages = {43–51},
abstract = {This article traces a line of research that began with the establishment of a pervasive regularity in human performance – the Power Law of Practice – and proceeded through several decades' worth of investigations that this opened up into learning and cognitive architecture. The results touch on both cognitive psychology and artificial intelligence, and more specifically on the possibily of building general learning mechanisms/systems. It is a story whose final chapter is still to be written.},
keywords = {CogArch, Cognitive Architecture, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Feintuch, Uri; Liat, Raz; Hwang, Jane; Josman, Naomi; Katz, Noomi; Kizony, Rachel; Rand, Debbie; Rizzo, Albert; Shahar, Meir; Yongseok, Jang; Weiss, Patrice L. (Tamar)
Integrating haptic-tactile feedback into a video capture based VE for rehabilitation Journal Article
In: CyberPsychology and Behavior, vol. 9, no. 2, pp. 129–132, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{feintuch_integrating_2006,
title = {Integrating haptic-tactile feedback into a video capture based VE for rehabilitation},
author = {Uri Feintuch and Raz Liat and Jane Hwang and Naomi Josman and Noomi Katz and Rachel Kizony and Debbie Rand and Albert Rizzo and Meir Shahar and Jang Yongseok and Patrice L. (Tamar) Weiss},
url = {http://ict.usc.edu/pubs/Integrating%20Haptic-Tactile%20Feedback%20into%20a%20Video-Capture%E2%80%93Based%20Virtual%20Environment%20for%20Rehabilitation.pdf},
year = {2006},
date = {2006-01-01},
journal = {CyberPsychology and Behavior},
volume = {9},
number = {2},
pages = {129–132},
abstract = {Video-capture virtual reality (VR) systems are gaining popularity as intervention tools. Todate, these platforms offer visual and audio feedback but do not provide haptic feedback. Wecontend that adding haptic feedback may enhance the quality of intervention for various theoretical and empirical reasons. This study aims to integrate haptic-tactile feedback into avideo capture system (GX VR), which is currently applied for rehabilitation. The proposedmulti-modal system can deliver audio-visual as well as vibrotactile feedback. The latter isprovided via small vibratory discs attached to the patient's limbs. This paper describes thesystem, the guidelines of its design, and the ongoing usability study.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Robertson, R. Kevin; Parsons, Thomas D.; Horst, Charles; Hall, Colin D.
Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals Journal Article
In: Death Studies, vol. 30, pp. 455–469, 2006, ISSN: 0748-1187.
Abstract | Links | BibTeX | Tags: MedVR
@article{robertson_thoughts_2006,
title = {Thoughts of death and suicidal ideation in nonpsychiatric human immunodeficiency virus seropositive individuals},
author = {R. Kevin Robertson and Thomas D. Parsons and Charles Horst and Colin D. Hall},
url = {http://ict.usc.edu/pubs/THOUGHTS%20OF%20DEATH%20AND%20SUICIDAL%20IDEATION%20IN%20NONPSYCHIATRIC%20HUMAN%20IMMUNODEFICIENCY%20VIRUS%20SEROPOSITIVE%20INDIVIDUALS.pdf},
doi = {10.1080/07481180600614435},
issn = {0748-1187},
year = {2006},
date = {2006-01-01},
journal = {Death Studies},
volume = {30},
pages = {455–469},
abstract = {The present study examines the prevalence of death thoughts and suicidality in HIV infection. Subjects (n = 246) were examined for psychiatric morbidity and suicidality. Compared to high risk HIV seronegatives, HIV seropositives (HIV•) had significantly increased frequency and severity of both suicidal ideation and death thoughts. Two-thirds of seropositives had suicidal ideation at some point; half of the seropositives reported suicide plans and one quarter suicide attempts; and third of seropositives reported current suicidal ideation. Suicidal ideation did not increase with advancing disease. The high prevalence of suicidal ideation suggests inclusion of its assessment in HIV treatment regardless of stage.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Woods, Steven Paul; Rippeth, Julie D.; Conover, Emily; Carey, Catherine L.; Parsons, Thomas D.; Tröster, Alexander I.
Statistical Power of Studies Examining the Cognitive Effects of Subthalamic Nucleus Deep Brain Stimulation in Parkinson's Disease Journal Article
In: The Clinical Neuropsychologist, vol. 20, pp. 27–38, 2006, ISSN: 1385-4046.
Abstract | Links | BibTeX | Tags: MedVR
@article{woods_statistical_2006,
title = {Statistical Power of Studies Examining the Cognitive Effects of Subthalamic Nucleus Deep Brain Stimulation in Parkinson's Disease},
author = {Steven Paul Woods and Julie D. Rippeth and Emily Conover and Catherine L. Carey and Thomas D. Parsons and Alexander I. Tröster},
url = {http://ict.usc.edu/pubs/STATISTICAL%20POWER%20OF%20STUDIES%20EXAMINING%20THE%20COGNITIVE%20EFFECTS%20OF%20SUBTHALAMIC%20NUCLEUS%20DEEP%20BRAIN%20STIMULATION%20IN%20PARKINSON%E2%80%99S%20DISEASE.pdf},
doi = {10.1080/13854040500203290},
issn = {1385-4046},
year = {2006},
date = {2006-01-01},
journal = {The Clinical Neuropsychologist},
volume = {20},
pages = {27–38},
abstract = {It has been argued that neuropsychological studies generally possess adequate statistical power to detect large effect sizes. However, low statistical power is problematic in neuropsychological research involving clinical populations and novel interventions for which available sample sizes are often limited. One notable example of this problem is evident in the literature regarding the cognitive sequelae of deep brain stimulation (DBS) of the subthalamic nucleus (STN) in persons with Parkinson's disease (PD). In the current review, a post hoc estimate of the statistical power of 30 studies examining cognitive effects of STN DBS in PD revealed adequate power to detect substantial cognitive declines (i.e., very large effect sizes), but surprisingly low estimated power to detect cognitive changes associated with conventionally small, medium, and large effect sizes. Such wide spread Type II error risk in the STN DBS cognitive outcomes literature may affect the clinical decision-making process as concerns the possible risk of postsurgical cognitive morbidity, as well as conceptual inferences to be drawn regarding the role of the STN in higher-level cognitive functions. Statistical and methodological recommendations (e.g., meta-analysis) are offered to enhance the power of current and future studies examining the neuropsychological sequelae of STN DBS in PD.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana
Framework for analysis of mitigation in courts Journal Article
In: Journal of Pragmatics, 2006.
Abstract | Links | BibTeX | Tags:
@article{martinovski_framework_2006,
title = {Framework for analysis of mitigation in courts},
author = {Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Framework%20for%20analysis%20of%20mitigation%20in%20courts.pdf},
year = {2006},
date = {2006-01-01},
journal = {Journal of Pragmatics},
abstract = {This paper presents an activity-based framework for empirical discourse analysis of mitigation in public environments such as Swedish and Bulgarian courtroom examinations. Mitigation is defined as a pragmatic, cognitive and linguistic behavior the main purpose of which is reduction of vulnerability. The suggested framework consists of mitigation processes, which involve mitigating argumentation lines, defense moves, and communicative acts. The functions of mitigation are described in terms of the participants' actions and goals separately from politeness strategies. The conclusions and observations address two things: issues related to the pragmatic theory of communication especially mitigation and issues related to the trial as a social activity. For instance, non-turn-taking confirmations by examiners are often followed by volunteered utterances, which in some cases may be examples of 'rehearsed' testimonies. At the same time the witnesses' tendency to volunteer information even on the behalf of their own credibility indicates that they also favor pro-party testimonies. Despite the objective judicial role of the prosecutor or judge and/or despite the examiners accommodating style the verbal behavior of the witnesses exhibits constant anticipation of danger.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Leuski, Anton; Pair, Jarrell; Traum, David; McNerney, Peter J.; Georgiou, Panayiotis G.; Patel, Ronakkumar
How to Talk to a Hologram Proceedings Article
In: Proceedings of the 11th International Conference on Intelligent User Interfaces, Sydney, Australia, 2006.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_how_2006,
title = {How to Talk to a Hologram},
author = {Anton Leuski and Jarrell Pair and David Traum and Peter J. McNerney and Panayiotis G. Georgiou and Ronakkumar Patel},
url = {http://ict.usc.edu/pubs/How%20to%20Talk%20to%20a%20Hologram.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Proceedings of the 11th International Conference on Intelligent User Interfaces},
address = {Sydney, Australia},
abstract = {There is a growing need for creating life-like virtual human simulations that can conduct a natural spoken dialog with a human student on a predefined subject. We present an overview of a spoken-dialog system that supports a person interacting with a full-size hologram-like virtual human character in an exhibition kiosk settings. We also give a brief summary of the natural language classification component of the system and describe the experiments we conducted with the system.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Braaten, Alyssa J.; Hall, Colin D.; Robertson, R. Kevin
Better quality of life with neuropsychological improvement on HAART Journal Article
In: Health and Quality of Life Outcomes, vol. 4, no. 11, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{parsons_better_2006,
title = {Better quality of life with neuropsychological improvement on HAART},
author = {Thomas D. Parsons and Alyssa J. Braaten and Colin D. Hall and R. Kevin Robertson},
url = {http://ict.usc.edu/pubs/Better%20quality%20of%20life%20with%20neuropsychological%20improvement%20on%20HAART.pdf},
year = {2006},
date = {2006-01-01},
journal = {Health and Quality of Life Outcomes},
volume = {4},
number = {11},
abstract = {Background: Successful highly active antiretroviral therapy (HAART) regimens have resulted in substantial improvements in the systemic health of HIV infected persons and increased survival times. Despite increased systemic health, the prevalence of minor HIV-associated cognitive impairment appears to be rising with increased longevity, and it remains to be seen what functional outcomes will result from these improvements. Cognitive impairment can dramatically impact functional ability and day-to-day productivity. We assessed the relationship of quality of life (QOL) and neuropsychological functioning with successful HAART treatment. Methods: In a prospective longitudinal study, subjects were evaluated before instituting HAART (naïve) or before changing HAART regimens because current therapy failed to maintain suppression of plasma viral load (treatment failure). Subjects underwent detailed neuropsychological and neurological examinations, as well as psychological evaluation sensitive to possible confounds. Re-evaluation was performed six months after institution of the new HAART regimen and/or if plasma viral load indicated treatment failure. At each evaluation, subjects underwent ultrasensitive HIV RNA quantitative evaluation in both plasma and cerebrospinal fluid. Results: HAART successes performed better than failures on measures exploring speed of mental processing (p textbackslashtextbackslashtextbackslashtextbackslashtextless .02). HAART failure was significantly associated with increased self-reports of physical health complaints (p textbackslashtextbackslashtextbackslashtextbackslashtextless .01) and substance abuse (p textbackslashtextbackslashtextbackslashtextbackslashtextless .01). An interesting trend emerged, in which HAART failures endorsed greater levels of psychological and cognitive complaints (p = 06). Analysis between neuropsychological measures and QOL scores revealed significant. correlation between QOL Total and processing speed (p textbackslashtextbackslashtextbackslashtextbackslashtextless .05), as well as flexibility (p textbackslashtextbackslashtextbackslashtextbackslashtextless .05). Conclusion: Our study investigated the relationship between HIV-associated neurocognitive impairment and quality of life. HAART failures experienced slower psychomotor processing, and had increased self-reports of physical health complaints and substance abuse. Contrariwise, HAART successes experienced improved mental processing, demonstrating the impact of successful treatment on functioning. With increasing life expectancy for those who are HIV seropositive, it is important to measure cognitive functioning in relation to the actual QOL these individuals report. The study results have implications for the optimal management of HIV-infected persons. Specific support or intervention may be beneficial for those who have failed HAART in order to decrease substance abuse and increase overall physical health.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji; Marsella, Stacy C.
Modeling Social Emotions and Social Attributions Book Section
In: Sun, R. (Ed.): Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation, Cambridge University Press, 2006.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@incollection{gratch_modeling_2006,
title = {Modeling Social Emotions and Social Attributions},
author = {Jonathan Gratch and Wenji Mao and Stacy C. Marsella},
editor = {R. Sun},
url = {http://ict.usc.edu/pubs/Modeling%20Social%20Emotions%20and%20Social%20Attributions.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Cognition and Multi-Agent Interaction: Extending Cognitive Modeling to Social Simulation},
publisher = {Cambridge University Press},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Gold, Jeffrey I.; Kim, Seok Hyeon; Kant, Alexis J.; Joseph, Michael H.; Rizzo, Albert
Effectiveness of Virtual Reality for Pediatric Pain Distraction during IV Placement Journal Article
In: CyberPsychology and Behavior, vol. 9, no. 2, pp. 207–212, 2006.
Abstract | Links | BibTeX | Tags: MedVR
@article{gold_effectiveness_2006,
title = {Effectiveness of Virtual Reality for Pediatric Pain Distraction during IV Placement},
author = {Jeffrey I. Gold and Seok Hyeon Kim and Alexis J. Kant and Michael H. Joseph and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Effectiveness%20of%20Virtual%20Reality%20for%20Pediatric%20Pain%20Distraction%20during%20IV%20Placement.pdf},
year = {2006},
date = {2006-01-01},
journal = {CyberPsychology and Behavior},
volume = {9},
number = {2},
pages = {207–212},
abstract = {The objective of this study was to test the efficacy and suitability of virtual reality (VR) as apain distraction for pediatric intravenous (IV) placement. Twenty children (12 boys, 8 girls) requiring IV placement for a magnetic resonance imaging/computed tomography (MRI/CT) scan were randomly assigned to two conditions: (1) VR distraction using Street Luge(5DT), presented via a head-mounted display, or (2) standard of care (topical anesthetic) with no distraction. Children, their parents, and nurses completed self-report questionnaires that assessed numerous health-related outcomes. Responses from the Faces Pain Scale–Revisedindicated a fourfold increase in affective pain within the control condition; by contrast, nosignificant differences were detected within the VR condition. Significant associations between multiple measures of anticipatory anxiety, affective pain, IV pain intensity, and measures of past procedural pain provided support for the complex interplay of a multimodalassessment of pain perception. There was also a sufficient amount of evidence supportingthe efficacy of Street Luge as a pediatric pain distraction tool during IV placement: an adequate level of presence, no simulator sickness, and significantly more child-, parent-, and nurse-reported satisfaction with pain management. VR pain distraction was positively endorsed by all reporters and is a promising tool for decreasing pain, and anxiety in childrenundergoing acute medical interventions. However, further research with larger sample sizesand other routine medical procedures is warranted.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {article}
}
Tortell, Rebecca; Morie, Jacquelyn
Videogame play and the effectiveness of virtual environments for training Proceedings Article
In: Interservice/Industry Training, Simulation and Education Conference (I/ITSEC), 2006.
Abstract | Links | BibTeX | Tags:
@inproceedings{tortell_videogame_2006,
title = {Videogame play and the effectiveness of virtual environments for training},
author = {Rebecca Tortell and Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/Videogame%20play%20and%20the%20effectiveness%20of%20virtual%20environments%20for%20training.pdf},
year = {2006},
date = {2006-01-01},
booktitle = {Interservice/Industry Training, Simulation and Education Conference (I/ITSEC)},
abstract = {The Sensory Environments Evaluation (SEE) project set out to examine the effects of emotional valence of a virtual training scenario on learning and memory. Emotional arousal is well-established as having enhancing effects on memory (McGaugh, 2000). A virtual scenario called DarkCon was created to resemble a night-time reconnaissance mission. Priming of subjects was the first experimental variable. Subjects were randomly assigned to receive their mission briefing in a serious style, suggesting a serious military mission, or in a lighter style, suggesting a fun roleplaying game. The influence of videogame experience was included in analysis of subjects' recall of the environment and of their physiology. In the present study, 34 Army Rangers from Fort Benning, GA underwent the DarkCon mission. Significant effects of priming condition and videogame play were discovered in subjects' recollection of the mission, and in their physiological reactions to highly exciting material. This paper is primarily concerned with the effects of videogame play frequency on subjects' behavior, recall, and physiology. The effects of priming will be cursorily discussed here as they relate to videogame play habits, and explored in more detail on their own in future publications. Directions for future research into the effects of videogame play experience on training are discussed.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2005
Cantzos, Demetrios; Kyriakakis, Chris
Quality Enhancement of Low Bit Rate MPEG1-Layer 3 Audio Based on Audio Resynthesis Proceedings Article
In: Proceedings of the 119th Audio Engineering Society Convention, New York, NY, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{cantzos_quality_2005,
title = {Quality Enhancement of Low Bit Rate MPEG1-Layer 3 Audio Based on Audio Resynthesis},
author = {Demetrios Cantzos and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Quality%20Enhancement%20of%20Low%20Bit%20Rate%20MPEG1-Layer%203%20Audio%20Based%20on%20Audio%20Resynthesis.pdf},
year = {2005},
date = {2005-10-01},
booktitle = {Proceedings of the 119th Audio Engineering Society Convention},
address = {New York, NY},
abstract = {One of the most popular audio compression formats is indisputably the MPEG1-Layer 3 format which is based on the idea of low-bit transparent encoding. As these types of audio signals are starting to migrate from portable players with inexpensive headphones to higher quality home audio systems, it is becoming evident that higher bit rates may be required to maintain transparency. We propose a novel method that enhances low bit rate MP3 encoded audio segments by applying multichannel audio resynthesis methods in a post-processing stage or during decoding. Our algorithm employs the highly efficient Generalized Gaussian mixture model which, combined with cepstral smoothing, leads to very low cepstral reconstruction errors. In addition, residual conversion is applied which proves to significantly improve the enhancement performance. The method presented can be easily generalized to include other audio formats for which sound quality is an issue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Ganesan, Kavita
Automated Story Capture From Conversational Speech Proceedings Article
In: 3rd International Conference on Knowledge Capture (K-CAP 05), Banff, Alberta, Canada, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_automated_2005,
title = {Automated Story Capture From Conversational Speech},
author = {Andrew S. Gordon and Kavita Ganesan},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Capture%20From%20Conversational%20Speech.pdf},
year = {2005},
date = {2005-10-01},
booktitle = {3rd International Conference on Knowledge Capture (K-CAP 05)},
address = {Banff, Alberta, Canada},
abstract = {While storytelling has long been recognized as an important part of effective knowledge management in organizations, knowledge management technologies have generally not distinguished between stories and other types of discourse. In this paper we describe a new type of technological support for storytelling that involves automatically capturing the stories that people tell to each other in conversations. We describe our first attempt at constructing an automated story extraction system using statistical text classification and a simple voting scheme. We evaluate the performance of this system and demonstrate that useful levels of precision and recall can be obtained when analyzing transcripts of interviews, but that performance on speech recognition data is not above what can be expected by chance. This paper establishes the level of performance that can be obtained using a straightforward approach to story extraction, and outlines ways in which future systems can improve on these results and enable a wide range of knowledge socialization applications.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.; Kenny, Patrick G.; Hovy, Eduard; Narayanan, Shrikanth; Fast, Edward; Martinovski, Bilyana; Baghat, Rahul; Robinson, Susan; Marshall, Andrew; Wang, Dagen; Gandhe, Sudeep; Leuski, Anton
Dealing with Doctors: A Virtual Human for Non-team Interaction Proceedings Article
In: 6th SIGdial Conference on Discourse and Dialogue, Lisbon, Portugal, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_dealing_2005,
title = {Dealing with Doctors: A Virtual Human for Non-team Interaction},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella and Patrick G. Kenny and Eduard Hovy and Shrikanth Narayanan and Edward Fast and Bilyana Martinovski and Rahul Baghat and Susan Robinson and Andrew Marshall and Dagen Wang and Sudeep Gandhe and Anton Leuski},
url = {http://ict.usc.edu/pubs/Dealing%20with%20Doctors.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {6th SIGdial Conference on Discourse and Dialogue},
address = {Lisbon, Portugal},
abstract = {We present a virtual human do tor who an engage in multi-modal negotiation dialogue with people from other organizations. The do tor is part of the SASO-ST system, used for training for non-team intera tions},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Marsella, Stacy C.; Gratch, Jonathan
Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis Proceedings Article
In: 5th International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_fight_2005,
title = {Fight, Flight, or Negotiate: Believable Strategies for Conversing under Crisis},
author = {David Traum and William Swartout and Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Fight,%20Flight,%20or%20Negotiate-%20Believable%20Strategies%20for%20Conversing%20under%20Crisis.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {5th International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {This paper des ribes a model of onversation strategies implemented in virtual humans designed to help people learn negotiation skills. We motivate and dis uss these strategies and their use to allow a virtual human to engage in omplex adversarial negotiation with a human trainee. Choi e of strategy depends on both the personality of the agent and assessment of the likelihood that the negotiation an be bene ial. Exe ution of strategies an be performed by hoosing spe i dialogue behaviors su h as whether and how to respond to a proposal. Current assessment of the value of the topi , the utility of the strategy, and aÆliation toward the other onversants an be used to dynami ally hange strategies throughout the ourse of a onversation. Examples will be given from the SASO-ST proje t, in whi h a trainee learns to negotiate by intera ting with virtual humans who employ these strategies.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Iyer, Kumar; Luigi, Donat-Pierre; Williams, Josh; Dozois, Aimee; Rizzo, Albert
Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences Journal Article
In: Applied Psychophysiology and Biofeedback, vol. 30, no. 3, pp. 319–331, 2005.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Worlds
@article{morie_development_2005,
title = {Development of a Data Management Tool for Investigating Multivariate Space and Free Will Experiences},
author = {Jacquelyn Morie and Kumar Iyer and Donat-Pierre Luigi and Josh Williams and Aimee Dozois and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Data%20Management%20Tool%20for%20Investigating%20Multivariate%20Space%20and%20Free%20Will%20Experiences%20in%20Virtual%20Reality.pdf},
year = {2005},
date = {2005-09-01},
journal = {Applied Psychophysiology and Biofeedback},
volume = {30},
number = {3},
pages = {319–331},
abstract = {While achieving realism has been a main goal in making convincing virtual reality (VR) environments, just what constitutes realism is still a question situated firmly in the research domain. VR has become mature enough to be used in therapeutic applications such as clinical exposure therapy with some success. We now need detailed scientific investigations to better understand why VR works for these types of cases, and how it could work for other key applications such as training. Just as in real life, it appears that the factors will be complex and multi-variate, and this plethoric situation presents exceptional challenges to the VR researcher. We would not want to lessen VR’s ability to replicate real world conditions in order to more easily study it, however, for by doing so we may compromise the very qualities that comprise its effectiveness. What is really needed are more robust tools to instrument, organize, and visualize the complex data generated by measurements of participant experiences in a realistic virtual world. We describe here our first study in an ongoing program of effective virtual environment research, the types of data we are dealing with, and a specific tool we have been compelled to create that allows us some measure of control over this data. We call this tool Phloem, after the botanical channels that plants use to transport, support and store nutrients.},
keywords = {MedVR, Virtual Worlds},
pubstate = {published},
tppubtype = {article}
}
Debevec, Paul
Capturing and Simulating Physically Accurate Illumination in Computer Graphics Proceedings Article
In: 11th Annual Symposium on Frontiers of Engineering, Niskayuna, NY, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_capturing_2005,
title = {Capturing and Simulating Physically Accurate Illumination in Computer Graphics},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Simulating%20Physically%20Accurate%20Illumination%20in%20Computer%20Graphics.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {11th Annual Symposium on Frontiers of Engineering},
address = {Niskayuna, NY},
abstract = {Anyone who has seen a recent summer blockbuster has witnessed the dramatic increases in computer-generated realism in recent years. Visual effects supervisors now report that bringing even the most challenging visions of film directors to the screen is no longer a question of whatDs possible; with todayDs techniques it is only a matter of time and cost. Driving this increase in realism have been computer graphics (CG) techniques for simulating how light travels within a scene and for simulating how light reflects off of and through surfaces. These techniquesJsome developed recently, and some originating in the 1980DsJare being applied to the visual effects process by computer graphics artists who have found ways to channel the power of these new tools.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Kallman, Marcelo; Marsella, Stacy C.
Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans Proceedings Article
In: International Working Conference on Intelligent Virtual Agents, Kos, Greece, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{kallman_hierarchical_2005,
title = {Hierarchical Motion Controllers for Real-Time Autonomous Virtual Humans},
author = {Marcelo Kallman and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Hierarchical%20Motion%20Controllers%20for%20Real-Time%20Autonomous%20Virtual%20Humans.pdf},
year = {2005},
date = {2005-09-01},
booktitle = {International Working Conference on Intelligent Virtual Agents},
address = {Kos, Greece},
abstract = {Continuous and synchronized whole-body motions are essential for achieving believable autonomous virtual humans in interactive applications. We present a new motion control architecture based on generic controllers that can be hierarchically interconnected and reused in real-time. The hierarchical organization implies that leaf controllers are motion generators while the other nodes are connectors, performing operations such as interpolation, blending, and precise scheduling of children controllers. We also describe how the system can correctly handle the synchronization of gestures with speech in order to achieve believable conversational characters. For that purpose, different types of controllers implement a generic model of the different phases of a gesture.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kwon, Soon-il; Narayanan, Shrikanth
Unsupervised Speaker Indexing Using Generic Models Journal Article
In: IEEE Transactions on Speech and Audio Processing, vol. 13, no. 5, pp. 1004–1013, 2005.
Abstract | Links | BibTeX | Tags:
@article{kwon_unsupervised_2005,
title = {Unsupervised Speaker Indexing Using Generic Models},
author = {Soon-il Kwon and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Indexing%20Using%20Generic%20Models.pdf},
year = {2005},
date = {2005-09-01},
journal = {IEEE Transactions on Speech and Audio Processing},
volume = {13},
number = {5},
pages = {1004–1013},
abstract = {Unsupervised speaker indexing sequentially detects points where a speaker identity changes in a multispeaker audio stream, and categorizes each speaker segment, without any prior knowledge about the speakers. This paper addresses two chal- lenges: The first relates to sequential speaker change detection. The second relates to speaker modeling in light of the fact that the number/identity of the speakers is unknown. To address this issue, a predetermined generic speaker-independent model set, called the sample speaker models (SSM), is proposed. This set can be useful for more accurate speaker modeling and clustering without requiring training models on target speaker data. Once a speaker-independent model is selected from the generic sample models, it is progressively adapted into a specific speaker-depen- dent model. Experiments were performed with data from the Speaker Recognition Benchmark NIST Speech corpus (1999) and the HUB-4 Broadcast News Evaluation English Test material (1999). Results showed that our new technique, sampled using the Markov Chain Monte Carlo method, gave 92.5% indexing accuracy on two speaker telephone conversations, 89.6% on four-speaker conversations with the telephone speech quality, and 87.2% on broadcast news. The SSMs outperformed the universal background model by up to 29.4% and the universal gender models by up to 22.5% in indexing accuracy in the experiments of this paper.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Jones, Andrew; Gardner, Andrew; Bolas, Mark; McDowall, Ian; Debevec, Paul
Performance Geometry Capture for Spatially Varying Relighting Proceedings Article
In: SIGGRAPH 2005 Sketch, Los Angeles, CA, 2005.
Links | BibTeX | Tags: Graphics, MxR
@inproceedings{jones_performance_2005,
title = {Performance Geometry Capture for Spatially Varying Relighting},
author = {Andrew Jones and Andrew Gardner and Mark Bolas and Ian McDowall and Paul Debevec},
url = {http://ict.usc.edu/pubs/Performance%20Geometry%20Capture%20for%20Spatially%20Varying%20Relighting.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH 2005 Sketch},
address = {Los Angeles, CA},
keywords = {Graphics, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Patel, Sanjit; Chu, Anson; Cohen, Jonathan; Pighin, Frédéric
Fluid Simulation Via Disjoint Translating Grids Proceedings Article
In: Special Interest Group - Graphics Technical Sketch, Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{patel_fluid_2005,
title = {Fluid Simulation Via Disjoint Translating Grids},
author = {Sanjit Patel and Anson Chu and Jonathan Cohen and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Fluid%20Simulation%20Via%20Disjoint%20Translating%20Grids.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {Special Interest Group - Graphics Technical Sketch},
address = {Los Angeles, CA},
abstract = {We present an adaptive fluid simulation technique that splits the computation domain in multiple moving grids. Using this technique, we are able to simulate fluids over large spatial domains with reasonable computation times.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Median Cut Algorithm for Light Probe Sampling Proceedings Article
In: SIGGRAPH (Special Interest Group - Graphics), Los Angeles, CA, 2005.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_median_2005,
title = {A Median Cut Algorithm for Light Probe Sampling},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20Median%20Cut%20Algorithm%20for%20Light%20Probe%20Sampling.pdf},
year = {2005},
date = {2005-08-01},
booktitle = {SIGGRAPH (Special Interest Group - Graphics)},
address = {Los Angeles, CA},
abstract = {We present a technique for approximating a light probe image as a constellation of light sources based on a median cut algorithm. The algorithm is efï¬cient, simple to implement, and can realistically represent a complex lighting environment with as few as 64 point light sources.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh; Dozois, Aimee; Luigi, Donat-Pierre
The Fidelity of "Feel": Emotional Affordance in Virtual Environments Proceedings Article
In: 11th International Conference on Human-Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_fidelity_2005,
title = {The Fidelity of "Feel": Emotional Affordance in Virtual Environments},
author = {Jacquelyn Morie and Josh Williams and Aimee Dozois and Donat-Pierre Luigi},
url = {http://ict.usc.edu/pubs/The%20Fidelity%20of%20Feel-%20Emotional%20Affordance%20in%20Virtual%20Environments.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {11th International Conference on Human-Computer Interaction},
address = {Las Vegas, NV},
abstract = {Virtual environments (VEs) should be able to provide experiences as rich and complex as those to be had in real life. While this seems obvious, it is not yet possible to create a perfect simulacrum of the real world, so such correspondence requires the development of design techniques by which VEs can be made to appear more real. It also requires evaluation studies to determine if such techniques produce the desired results. As emotions are implicated in our phenomenological understanding of the physical world, they should also play an integral role in the experience of the virtual one. Therefore, a logical sequence of experimentation to understand how VEs can be made to function as emotion-induction systems is in order. The Sensory Environments Evaluation (SEE) research program has developed a twofold design process to explore if we react to virtually supplied stimuli as we do to the real world equivalents. We look at manipulating both the sensory and emotional aspects of not only the environment but also the participant. We do this with the focus on what emotional affordances this manipulation will provide. Our first evaluation scenario, DarkCon, was designed in this way to produce a strong sense of presence. Sixty-four subjects have been fielded to date and the data is currently being analyzed for results. We hope to find that rich design techniques along with the frame of mind with which a VR experience is approached will predictably influence perception and behavior within a virtual world. We will use these results to inform continuing research into the creation of more emotionally affective VEs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark; Lent, Michael; Solomon, Steve; Gomboc, Dave
Explainable Artificial Intelligence for Training and Tutoring Proceedings Article
In: 12th International Conference on Artificial Intelligence in Education, Amsterdam, The Netherlands, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{lane_explainable_2005,
title = {Explainable Artificial Intelligence for Training and Tutoring},
author = {H. Chad Lane and Mark Core and Michael Lent and Steve Solomon and Dave Gomboc},
url = {http://ict.usc.edu/pubs/Explainable%20Artificial%20Intelligence%20for%20Training%20and%20Tutoring.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {12th International Conference on Artificial Intelligence in Education},
address = {Amsterdam, The Netherlands},
abstract = {This paper describes an Explainable Artificial Intelligence (XAI) tool that allows entities to answer questions about their activities within a tactical simulation. We show how XAI can be used to provide more meaningful after-action reviews and discuss ongoing work to integrate an intelligent tutor into the XAI framework.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Swartout, William; Gratch, Jonathan; Marsella, Stacy C.
Virtual Humans for non-team interaction training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids, Utrecht, Netherlands, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_virtual_2005,
title = {Virtual Humans for non-team interaction training},
author = {David Traum and William Swartout and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20non-team%20interaction%20training.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS) Workshop on Creating Bonds with Humanoids},
address = {Utrecht, Netherlands},
abstract = {We describe a model of virtual humans to be used in training for non-team interactions, such as negotiating with people from other organizations. The virtual humans build on existing task, dialogue, and emotion models, with an added model of trust, which are used to understand and produce interactional moves. The model has been implemented within an agent in the SASO-ST system, and some example dialogues are given, illustrating the necessity for building social bonds.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Evaluating a computational model of emotion Journal Article
In: Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004, vol. 11, no. 1, pp. 23–43, 2005.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{gratch_evaluating_2005,
title = {Evaluating a computational model of emotion},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Evaluating%20a%20computational%20model%20of%20emotion.pdf},
year = {2005},
date = {2005-07-01},
journal = {Journal Autonomous Agents and Multi-Agent Systems. Special Issue on the Best of AAMAS 2004},
volume = {11},
number = {1},
pages = {23–43},
abstract = {Spurred by a range of potential applications, there has been a growing body of research in computational models of human emotion. To advance the development of these models, it is critical that we evaluate them against the phenomena they purport to model. In this paper, we present one method to evaluate an emotion model that compares the behavior of the model against human behavior using a standard clinical instrument for assessing human emotion and coping. We use this method to evaluate the Emotion and Adaptation (EMA) model of emotion Gratch and Marsella. The evaluation highlights strengths of the approach and identifies where the model needs further development.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Riedl, Mark O.; Lane, H. Chad; Hill, Randall W.; Swartout, William
Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture Proceedings Article
In: AI and Education 2005 Workshop on Narrative Learning Environments, Amsterdam, The Netherlands, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{riedl_automated_2005,
title = {Automated Story Direction and Intelligent Tutoring: Towards a Unifying Architecture},
author = {Mark O. Riedl and H. Chad Lane and Randall W. Hill and William Swartout},
url = {http://ict.usc.edu/pubs/Automated%20Story%20Direction%20and%20Intelligent%20Tutoring-%20Towards%20a%20Unifying%20Architecture.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {AI and Education 2005 Workshop on Narrative Learning Environments},
address = {Amsterdam, The Netherlands},
abstract = {Recently, interactive storytelling systems H systems that allow a user to make decisions that can potentially impact the direction of a narrative H have been applied to training and education. Interactive storytelling systems often rely on an automated story director to manage the userKs experience. The focus of an automated director is the emergence of a narrative-like experience for the user. In contrast, intelligent tutors traditionally address the acquisition or strengthening of a learner's knowledge. Our goal is to build training simulations that cultivate compelling storylines while simultaneously maintaining a pedagogical presence by incorporating both automated story direction and intelligent tutoring into an immersive environment. But what is the relationship between an automated director and an intelligent tutor? In this paper, we discuss the similarities and differences of automated story directors and intelligent tutors and, based on our analysis, recommend an architecture for building narrative-based training simulations that utilize both effectively and without conflict.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Kim, Gerard J.; Yeh, Shih-Ching; Thiebaux, Marcus; Hwang, Jayne; Buckwalter, John Galen
Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods Proceedings Article
In: Proceedings of the 11th International Conference on Human Computer Interaction, Las Vegas, NV, 2005.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{rizzo_development_2005,
title = {Development of a Benchmarking Scenario for Testing 3D User Interface Devices and Interaction Methods},
author = {Albert Rizzo and Gerard J. Kim and Shih-Ching Yeh and Marcus Thiebaux and Jayne Hwang and John Galen Buckwalter},
url = {http://ict.usc.edu/pubs/Development%20of%20a%20Benchmarking%20Scenario%20for%20Testing%203D%20User%20Interface%20Devices%20and%20Interaction%20Methods.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {Proceedings of the 11th International Conference on Human Computer Interaction},
address = {Las Vegas, NV},
abstract = {To address a part of the challenge of testing and comparing various 3D user interface devices and methods, we are currently developing and testing a VR 3D User Interface benchmarking scenario. The approach outlined in this paper focuses on the capture of human interaction performance on object selection and manipulation tasks using standardized and scalable block configurations that allow for measurement of speed and efficiency with any interaction device or method. The block configurations that we are using as benchmarking stimuli are accompanied by a pure mental rotation visuospatial assessment test. This feature will allow researchers to test usersX existing spatial abilities and statistically parcel out the variability due to innate ability, from the actual hands-on performance metrics. This statistical approach could lead to a more pure analysis of the ergonomic features of interaction devices and methods separate from existing user abilities. An initial test was conducted at two sites using this benchmarking system to make comparisons between 3D/gesture-based and 2D/mouse-based interactions for 3D selection and manipulation. Our preliminary results demonstrated, as expected, that the 3D/gesture based method in general outperformed the 2D/mouse interface. As well there were statistically significant performance differences between different user groups when categorized by their sex, visuospatial ability and educational background.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Commonsense Psychology and the Functional Requirements of Cognitive Models Proceedings Article
In: American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence, AAAI Press, Pittsburgh, PA, 2005.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_commonsense_2005,
title = {Commonsense Psychology and the Functional Requirements of Cognitive Models},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Commonsense%20Psychology%20and%20the%20Functional%20Requirements%20of%20Cognitive%20Models.pdf},
year = {2005},
date = {2005-07-01},
booktitle = {American Association of Artificial Intelligence Workshop on Modular Construction of Human-Like Intelligence},
publisher = {AAAI Press},
address = {Pittsburgh, PA},
abstract = {In this paper we argue that previous models of cognitive abilities (e.g. memory, analogy) have been constructed to satisfy functional requirements of implicit commonsense psychological theories held by researchers and nonresearchers alike. Rather than working to avoid the influence of commonsense psychology in cognitive modeling research, we propose to capitalize on progress in developing formal theories of commonsense psychology to explicitly define the functional requirements of cognitive models. We present a taxonomy of 16 classes of cognitive models that correspond to the representational areas that have been addressed in large-scale inferential theories of commonsense psychology. We consider the functional requirements that can be derived from inferential theories for one of these classes, the processes involved in human memory. We argue that the breadth coverage of commonsense theories can be used to better evaluate the explanatory scope of cognitive models, as well as facilitate the investigation of larger-scale cognitive systems.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Ettaile, Emil; Gandhe, Sudeep; Georgiou, Panayiotis G.; Knight, Kevin; Marcu, Daniel; Narayanan, Shrikanth; Traum, David; Belvin, Robert
Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues Proceedings Article
In: Proceedings of the ACL Interactive Poster and Demonstration Sessions, pp. 89–92, Ann Arbor, MI, 2005.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ettaile_transonics_2005,
title = {Transonics: A Practical Speech-to-Speech Translator for English-Farsi Medical Dialogues},
author = {Emil Ettaile and Sudeep Gandhe and Panayiotis G. Georgiou and Kevin Knight and Daniel Marcu and Shrikanth Narayanan and David Traum and Robert Belvin},
url = {http://ict.usc.edu/pubs/TRANSONICS-%20A%20SPEECH%20TO%20SPEECH%20SYSTEM%20FOR%20ENGLISH-PERSIAN%20INTERACTIONS.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {Proceedings of the ACL Interactive Poster and Demonstration Sessions},
pages = {89–92},
address = {Ann Arbor, MI},
abstract = {We briefly describe a two-way speech-to-speech English-Farsi translation system prototype developed for use in doctorpatient interactions. The overarching philosophy of the developers has been to create a system that enables effective communication, rather than focusing on maximizing component-level performance. The discussion focuses on the general approach and evaluation of the system by an independent government evaluation team.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Liao, Wei-Kai; Cohen, Isaac
Classifying Facial Gestures in Presence of Head Motion Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{liao_classifying_2005,
title = {Classifying Facial Gestures in Presence of Head Motion},
author = {Wei-Kai Liao and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Classifying%20Facial%20Gestures%20in%20Presence%20of%20Head%20Motion.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {This paper addresses the problem of automatic facial gestures recognition in an interactive environment. Automatic facial gestures recognition is a difficult problem in computer vision, and most of the work has focused on inferring facial gestures in the context of a static head. In the paper we address the challenging problem of recognizing the facial expressions of a moving head. We present a systematic framework to analyze and classify the facial gestures with the head movement. Our system includes a 3D head pose estimation method to recover the global head motion. After estimating the head pose, the human face is modeled by a collection of face's regions. These regions represent the face model used for locating and extracting temporal facial features. We propose using a locally affine motion model to represent extracted motion fields. The classification consists of a graphical model for robustly representing the dependencies of the selected facial regions and the support vector machine. Our experiments show that this approach could classify human expressions in interactive environments accurately.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Chi-Wei; Cohen, Isaac
Posture and Gesture Recognition using 3D Body Shapes Decomposition Proceedings Article
In: IEEE Workshop on Vision for Human-Computer Interaction, San Diego, CA, 2005.
Abstract | Links | BibTeX | Tags:
@inproceedings{chu_posture_2005,
title = {Posture and Gesture Recognition using 3D Body Shapes Decomposition},
author = {Chi-Wei Chu and Isaac Cohen},
url = {http://ict.usc.edu/pubs/Posture%20and%20Gesture%20Recognition%20using%203D%20Body%20Shapes%20Decomposition.pdf},
year = {2005},
date = {2005-06-01},
booktitle = {IEEE Workshop on Vision for Human-Computer Interaction},
address = {San Diego, CA},
abstract = {We present a method for describing arbitrary human posture as a combination of basic postures. This decomposition allows for recognition of a larger number of postures and gestures from a small set of elementary postures called atoms. We propose a modified version of the matching pursuit algorithm for decomposing an arbitrary input posture into a linear combination of primary and secondary atoms. These atoms are represented through their shape descriptor inferred from the 3D visual-hull of the human body posture. Using an atom-based description of postures increases tremendously the set of recognizable postures while reducing the required training data set. A gesture recognition system based on the atom decomposition and Hidden Markov Model (HMM) is also described. Instead of representing gestures as HMM transition of postures, we separate the description of gestures as two HMMs, each describing the transition of Primary/Secondary atoms; thus greatly reducing the size of state space of HMM. We illustrate the proposed approach for posture and gesture recognition method on a set of video streams captured by four synchronous cameras.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}