Publications
Search
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew; Leuski, Anton; Casas, Dan; Shapiro, Ari
Smart Mobile Virtual Humans: “Chat with Me!” Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 475–478, Springer, Delft, Netherlands, 2015.
@inproceedings{kang_smart_2015,
title = {Smart Mobile Virtual Humans: “Chat with Me!”},
author = {Sin-Hwa Kang and Andrew Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Smart%20Mobile%20Virtual%20Humans%20-%20Chat%20with%20Me.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {475–478},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {In this study, we are interested in exploring whether people would talk with 3D animated virtual humans using a smartphone for a longer amount of time as a sign of feeling rapport [5], compared to non-animated or audio-only characters in everyday life. Based on previous studies [2, 7, 10], users prefer animated characters in emotionally engaged interactions when the characters were displayed on mobile devices, yet in a lab setting. We aimed to reach a broad range of users outside of the lab in natural settings to investigate the potential of our virtual human on smartphones to facilitate casual, yet emotionally engaging conversation. We also found that the literature has not reached a consensus regarding the ideal gaze patterns for a virtual human, one thing researchers agree on is that inappropriate gaze could negatively impact conversations at times, even worse than receiving no visual feedback at all [1, 4]. Everyday life may bring the experience of awkwardness or uncomfortable sentiments in reaction to continuous mutual gaze. On the other hand, gaze aversion could also make a speaker think their partner is not listening. Our work further aims to address this question of what constitutes appropriate eye gaze in emotionally engaged interactions. We developed a 3D animated and chat-based virtual human which presented emotionally expressive nonverbal behaviors such as facial expressions, head gestures, gaze, and other upper body movements (see Figure 1). The virtual human displayed appropriate gaze that was either consisted of constant mutual gaze or gaze aversion based on a statistical model of saccadic eye movement [8] while listening. Both gaze patterns were accompanied by other forms of appropriate nonverbal feedback. To explore the question of optimal communicative medium, we distributed our virtual human application to users via an app store for Android-powered phones (i.e. Google Play Store) in order to target users who owned a smartphone and could use our application in various natural settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Gratch, Jonathan; Ustun, Volkan
Towards Emotion in Sigma: From Appraisal to Attention Proceedings Article
In: Proceedings of AGI 2015, pp. 142 – 151, Springer International Publishing, Berlin, Germany, 2015.
@inproceedings{rosenbloom_towards_2015,
title = {Towards Emotion in Sigma: From Appraisal to Attention},
author = {Paul S. Rosenbloom and Jonathan Gratch and Volkan Ustun},
url = {http://ict.usc.edu/pubs/Towards%20Emotion%20in%20Sigma%20-%20From%20Appraisal%20to%20Attention.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of AGI 2015},
volume = {9205},
pages = {142 – 151},
publisher = {Springer International Publishing},
address = {Berlin, Germany},
abstract = {A first step is taken towards incorporating emotional processing into Sigma, a cognitive architecture that is grounded in graphical models, with the addition of appraisal variables for expectedness and desirability plus their initial implications for attention at two levels of the control hierarchy. The results leverage many of Sigma's existing capabilities but with a few key additions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Expression Graphs Unifying Factor Graphs and Sum-Product Networks Proceedings Article
In: Artificial General Intelligence, pp. 241–250, Springer, Berlin, Germany, 2015.
@inproceedings{demski_expression_2015,
title = {Expression Graphs Unifying Factor Graphs and Sum-Product Networks},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Expression%20Graphs%20Unifying%20Factor%20Graphs%20and%20Sum-Product%20Networks.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Artificial General Intelligence},
pages = {241–250},
publisher = {Springer},
address = {Berlin, Germany},
abstract = {Factor graphs are a very general knowledge representation, subsuming many existing formalisms in AI. Sum-product networks are a more recent representation, inspired by studying cases where factor graphs are tractable. Factor graphs emphasize expressive power, while sum-product networks restrict expressiveness to get strong guarantees on speed of inference. A sum-product network is not simply a restricted factor graph, however. Although the inference algorithms for the two structures are very similar, translating a sum-product network into factor graph representation can result in an exponential slowdown. We propose a formalism which generalizes factor graphs and sum-product networks, such that inference is fast in cases whose structure is close to a sum-product network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
@article{nagano_skin_2015-1,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Proceedings Article
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli
To tweet or not to tweet: The question of emotion and excitement about sporting events Proceedings Article
In: Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion, Geneva, Switzerland, 2015.
@inproceedings{gratch_tweet_2015,
title = {To tweet or not to tweet: The question of emotion and excitement about sporting events},
author = {Jon Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler},
url = {http://ict.usc.edu/pubs/To%20tweet%20or%20not%20to%20tweet%20-The%20question%20of%20emotion%20and%20excitement%20about%20sporting%20events.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion},
address = {Geneva, Switzerland},
abstract = {Sporting events can serve as laboratories to explore emotion and computational tools provide new ways to examine emotional processes “in the wild”. Moreover, emotional processes are assumed -but untested- in sports economics. For example, according to the well-studied uncertainty of outcome hypothesis (UOH), “close” games are more exciting and therefore better attended. If one team were certain to win, it would take away a major source of excitement, reducing positive affect, and therefore decreasing attendance. The role of emotion here is assumed but has not been tested; furthermore, the measures used (ticket sales, attendance, TV-viewership) do not allow for such a test because they are devoid of emotional content. To address this problem, we use tweets per minute (specifically, tweets posted during 2014 World Cup with official game hashtags). Sentiment analysis of these tweets can give interesting insights into what emotional processes are involved. Another benefit of tweets is that they are dynamic, and novel results from dynamic analyses (of TV-viewership) suggest that the UOH effect can actually reverse as games unfold (people switch channels away from close games). We therefore also reconsider the UOH, specifically, extending it by both examining sentiment and dynamic changes during the game. To consider such changes, we focus on games that could have been close (high in uncertainty), but ended up being lower in uncertainty. We operationalize such unexpected certainty of outcome as the extent to which games are predicted to be “close” (based on betting odds), but ended up with a bigger difference between the teams’ scores than was expected. Statistical analyses revealed that, contrary to the UOH, games with a bigger difference in score between teams than expected had higher tweets per minute. We also performed sentiment analysis, categorizing each tweet as positive, negative or neutral, and found that games with higher tweets per minute also have a higher percentage of negative tweets. Furthermore, games that have a bigger difference than expected have a higher percentage of negative tweets (compared to games closer to what is expected). This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion (and not positive emotion). The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. Further research could explore the specific mechanisms that link negative sentiment to excitement, such as worry or out-group derogation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummins, Nicholas; Scherer, Stefan; Krajewski, Jarek; Schnieder, Sebastian; Epps, Julien; Quatieri, Thomas F.
A Review of Depression and Suicide Risk Assessment Using Speech Analysis Journal Article
In: Speech Communication, vol. 71, pp. 10 – 49, 2015, ISSN: 0167-6393.
@article{cummins_review_2015,
title = {A Review of Depression and Suicide Risk Assessment Using Speech Analysis},
author = {Nicholas Cummins and Stefan Scherer and Jarek Krajewski and Sebastian Schnieder and Julien Epps and Thomas F. Quatieri},
url = {http://www.sciencedirect.com/science/article/pii/S0167639315000369},
doi = {http://dx.doi.org/10.1016/j.specom.2015.03.004},
issn = {0167-6393},
year = {2015},
date = {2015-07-01},
journal = {Speech Communication},
volume = {71},
pages = {10 – 49},
abstract = {This paper is the first review into the automatic analysis of speech for use as an objective predictor of depression and suicidality. Both conditions are major public health concerns; depression has long been recognised as a prominent cause of disability and burden worldwide, whilst suicide is a misunderstood and complex course of death that strongly impacts the quality of life and mental health of the families and communities left behind. Despite this prevalence the diagnosis of depression and assessment of suicide risk, due to their complex clinical characterisations, are difficult tasks, nominally achieved by the categorical assessment of a set of specific symptoms. However many of the key symptoms of either condition, such as altered mood and motivation, are not physical in nature; therefore assigning a categorical score to them introduces a range of subjective biases to the diagnostic procedure. Due to these difficulties, research into finding a set of biological, physiological and behavioural markers to aid clinical assessment is gaining in popularity. This review starts by building the case for speech to be considered a key objective marker for both conditions; reviewing current diagnostic and assessment methods for depression and suicidality including key non-speech biological, physiological and behavioural markers and highlighting the expected cognitive and physiological changes associated with both conditions which affect speech production. We then review the key characteristics; size, associated clinical scores and collection paradigm, of active depressed and suicidal speech databases. The main focus of this paper is on how common paralinguistic speech characteristics are affected by depression and suicidality and the application of this information in classification and prediction systems. The paper concludes with an in-depth discussion on the key challenges – improving the generalisability through greater research collaboration and increased standardisation of data collection, and the mitigating unwanted sources of variability – that will shape the future research directions of this rapidly growing field of speech processing research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kommers, Cody; Ustun, Volkan; Demski, Abram; Rosenbloom, Paul
Hierarchical Reasoning with Distributed Vector Representations Proceedings Article
In: Proceedings of 37th Annual Conference of the Cognitive Science Society, Cognitive Science Society, Pasadena, CA, 2015.
@inproceedings{kommers_hierarchical_2015,
title = {Hierarchical Reasoning with Distributed Vector Representations},
author = {Cody Kommers and Volkan Ustun and Abram Demski and Paul Rosenbloom},
url = {http://ict.usc.edu/pubs/Hierarchical%20Reasoning%20with%20Distributed%20Vector%20Representations.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of 37th Annual Conference of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Pasadena, CA},
abstract = {We demonstrate that distributed vector representations are capable of hierarchical reasoning by summing sets of vectors representing hyponyms (subordinate concepts) to yield a vector that resembles the associated hypernym (superordinate concept). These distributed vector representations constitute a potentially neurally plausible model while demonstrating a high level of performance in many different cognitive tasks. Experiments were run using DVRS, a word embedding system designed for the Sigma cognitive architecture, and Word2Vec, a state-of-the-art word embedding system. These results contribute to a growing body of work demonstrating the various tasks on which distributed vector representations perform competently.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark G.; Goldberg, Benjamin S.
Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 3, pp. 303 – 318, U.S. Army Research Laboratory, 2015.
@incollection{lane_lowering_2015,
title = {Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools},
author = {H. Chad Lane and Mark G. Core and Benjamin S. Goldberg},
url = {http://ict.usc.edu/pubs/Lowering%20the%20Technical%20Skill%20Requirements%20for%20Building%20Intelligent%20Tutors-A%20Review%20of%20Authoring%20Tools.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {3},
pages = {303 – 318},
publisher = {U.S. Army Research Laboratory},
abstract = {In this chapter, we focus on intelligent tutoring systems (ITSs), an instance of educational technology that is often criticized for not reaching its full potential (Nye, 2013). Researchers have debated why, given such strong empirical evidence in their favor (Anderson, Corbett, Koedinger & Pelletier, 1995; D’Mello & Graesser, 2012; VanLehn et al., 2005; Woolf, 2009), intelligent tutors are not in every classroom, on every device, providing educators with fine-grained assessment information about their students. Although many factors contribute to a lack of adoption (Nye, 2014), one widely agreed upon reason behind slow adoption and poor scalability of ITSs is that the engineering demands are simply too great. This is no surprise given that the effectiveness of ITSs is often attributable to the use of rich knowledge representations and cognitively plausible models of domain knowledge (Mark & Greer, 1995; Valerie J. Shute & Psotka, 1996; VanLehn, 2006; Woolf, 2009), which are inherently burdensome to build. To put it another way: the features that tend to make ITSs effective are also the hardest to build. The heavy reliance on cognitive scientists and artificial intelligence (AI) software engineers seems to be a bottleneck.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lane, H. Chad; Core, Mark G.; Hays, Matthew J.; Auerbach, Daniel; Rosenberg, Milton
Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective Proceedings Article
In: Artificial Intelligence in Education, pp. 195–204, Springer International Publishing, Madrid, Spain, 2015, ISBN: 978-3-319-19772-2 978-3-319-19773-9.
@inproceedings{chad_lane_situated_2015,
title = {Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective},
author = {H. Chad Lane and Mark G. Core and Matthew J. Hays and Daniel Auerbach and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Situated%20Pedagogical%20Authoring-Authoring%20Intelligent.pdf},
isbn = {978-3-319-19772-2 978-3-319-19773-9},
year = {2015},
date = {2015-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {9112},
pages = {195–204},
publisher = {Springer International Publishing},
address = {Madrid, Spain},
abstract = {We describe the Situated Pedagogical Authoring (SitPed) system that seeks to allow non-technical authors to create ITS content for soft-skills training, such as counseling skills. SitPed is built on the assertion that authoring tools should use the learner’s perspective to the greatest extent possible. SitPed provides tools for creating tasks lists, authoring assessment knowledge, and creating tutor messages. We present preliminary findings of a two-phase study comparing authoring in SitPed to an ablated version of the same system and a spreadsheet-based control. Findings suggest modest advantages for SitPed in terms of the quality of the authored content and student learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Knowles, Megan; Lucas, Gale; Baumeister, Roy; Gardner, Wendi
Choking Under Social Pressure: Social Monitoring Among the Lonely Journal Article
In: Personality and Social Psychology Bulletin, vol. 41, no. 6, pp. 805–821, 2015, ISSN: 0146-1672, 1552-7433.
@article{knowles_choking_2015,
title = {Choking Under Social Pressure: Social Monitoring Among the Lonely},
author = {Megan Knowles and Gale Lucas and Roy Baumeister and Wendi Gardner},
url = {http://ict.usc.edu/pubs/Choking%20Under%20Social%20Pressure%20-%20Social%20Monitoring%20Among%20the%20Lonely.pdf},
doi = {10.1177/0146167215580775},
issn = {0146-1672, 1552-7433},
year = {2015},
date = {2015-06-01},
journal = {Personality and Social Psychology Bulletin},
volume = {41},
number = {6},
pages = {805–821},
abstract = {Lonely individuals may decode social cues well but have difficulty putting such skills to use precisely when they need them—in social situations. In four studies, we examined whether lonely people choke under social pressure by asking participants to complete social sensitivity tasks framed as diagnostic of social skills or nonsocial skills. Across studies, lonely participants performed worse than nonlonely participants on social sensitivity tasks framed as tests of social aptitude, but they performed just as well or better than the nonlonely when the same tasks were framed as tests of academic aptitude. Mediational analyses in Study 3 and misattribution effects in Study 4 indicate that anxiety plays an important role in this choking effect. This research suggests that lonely individuals may not need to acquire social skills to escape loneliness; instead, they must learn to cope with performance anxiety in interpersonal interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nye, Benjamin D.; Morrison, Donald M.; Samei, Borhan
Automated Session-Quality Assessment for Human Tutoring Based on Expert Ratings of Tutoring Success Proceedings Article
In: Proceedings of Educational Data Mining (EDM) 2015, pp. 195–202, Springer, Madrid, Spain, 2015.
@inproceedings{nye_automated_2015,
title = {Automated Session-Quality Assessment for Human Tutoring Based on Expert Ratings of Tutoring Success},
author = {Benjamin D. Nye and Donald M. Morrison and Borhan Samei},
url = {http://ict.usc.edu/pubs/Automated%20Session-Quality%20Assessment%20for%20Human%20Tutoring%20Based%20on%20Expert%20Ratings%20of%20Tutoring%20Success.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of Educational Data Mining (EDM) 2015},
pages = {195–202},
publisher = {Springer},
address = {Madrid, Spain},
abstract = {Archived transcripts from tens of millions of online human tutoring sessions potentially contain important knowledge about how online tutors help, or fail to help, students learn. However, without ways of automatically analyzing these large corpora, any knowledge in this data will remain buried. One way to approach this issue is to train an estimator for the learning e⬚ectiveness of an online tutoring interaction. While significant work has been done on automated assessment of student responses and artifacts (e.g., essays), automated assessment has not traditionally automated assessments of human-to-human tutoring sessions. In this work, we trained a model for estimating tutoring session quality based on a corpus of 1438 online tutoring sessions rated by expert tutors. Each session was rated for evidence of learning (outcomes) and educational soundness (process). Session features for this model included dialog act classifcations, mode classifcations (e.g., Scaffolding), statistically distinctive subsequences of such classifcations, dialog initiative (e.g., statements by tutor vs. student), and session length. The model correlated more highly with evidence of learning than educational soundness ratings, in part due to the greater difficulty of classifying tutoring modes. This model was then applied to a corpus of 242k online tutoring sessions, to examine the relationships between automated assessments and other available metadata (e.g., the tutor's self-assessment). On this large corpus, the automated assessments followed similar patterns as the expert rater's assessments, but with lower overall correlation strength. Based on the analyses presented, the assessment model for online tutoring sessions emulates the ratings of expert human tutors for session quality ratings with a reasonable degree of accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wienberg, Christopher; Gordon, Andrew S.
Insights on Privacy and Ethics from the Web’s Most Prolific Storytellers Proceedings Article
In: Proceedings of WebSci15, pp. 1 –10, ACM, Oxford, UK, 2015.
@inproceedings{wienberg_insights_2015,
title = {Insights on Privacy and Ethics from the Web’s Most Prolific Storytellers},
author = {Christopher Wienberg and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Insights%20on%20Privacy%20and%20Ethics%20from%20the%20Web's%20Most%20Prolific%20Storytellers.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of WebSci15},
pages = {1 –10},
publisher = {ACM},
address = {Oxford, UK},
abstract = {An analysis of narratives in English-language weblogs reveals a unique population of individuals who post personal stories with extraordinarily high frequency over extremely long periods of time. This population includes people who have posted personal narratives everyday for more than eight years. In this paper we describe our investigation of this interesting subset of web users, where we conducted ethnographic, face-to-face interviews with a sample of these bloggers (n = 11). Our ndings shed light on a culture of public documentation of private life, and provide insight into these bloggers' motivations, interactions with their readers, honesty, and thoughts on research that utilizes their data. We discuss the ethical implications for researchers working with web data, and speak to the relationship between large datasets and the real people behind them.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Lucas, Gale; Gratch, Jonathan; Rizzo, Albert; Morency, Louis-Philippe
Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews Journal Article
In: IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264), no. 99, 2015, ISSN: 1949-3045.
@article{scherer_self-reported_2015,
title = {Self-reported symptoms of depression and PTSD are associated with reduced vowel space in screening interviews},
author = {Stefan Scherer and Gale Lucas and Jonathan Gratch and Albert Rizzo and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Self-reported%20symptoms%20of%20depression%20and%20PTSD%20are%20associated%20with%20reduced%20vowel%20space%20in%20screening%20interviews.pdf},
doi = {10.1109/TAFFC.2015.2440264},
issn = {1949-3045},
year = {2015},
date = {2015-06-01},
journal = {IEEE Transactions on Affective Computing (in press; doi: 10.1109/TAFFC.2015.2440264)},
number = {99},
abstract = {Reduced frequency range in vowel production is a well documented speech characteristic of individuals with psychological and neurological disorders. Affective disorders such as depression and post-traumatic stress disorder (PTSD) are known to influence motor control and in particular speech production. The assessment and documentation of reduced vowel space and reduced expressivity often either rely on subjective assessments or on analysis of speech under constrained laboratory conditions (e.g. sustained vowel production, reading tasks). These constraints render the analysis of such measures expensive and impractical. Within this work, we investigate an automatic unsupervised machine learning based approach to assess a speaker’s vowel space. Our experiments are based on recordings of 253 individuals. Symptoms of depression and PTSD are assessed using standard self-assessment questionnaires and their cut-off scores. The experiments show a significantly reduced vowel space in subjects that scored positively on the questionnaires. We show the measure’s statistical robustness against varying demographics of individuals and articulation rate. The reduced vowel space for subjects with symptoms of depression can be explained by the common condition of psychomotor retardation influencing articulation and motor control. These findings could potentially support treatment of affective disorders, like depression and PTSD in the future.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Garten, Justin; Sagae, Kenji; Ustun, Volkan; Dehghani, Morteza
Combining Distributed Vector Representations for Words Proceedings Article
In: Proceedings of NAACL-HLT 2015, pp. 95–101, Association for Computational Linguistics, Denver, Colorado, 2015.
@inproceedings{garten_combining_2015,
title = {Combining Distributed Vector Representations for Words},
author = {Justin Garten and Kenji Sagae and Volkan Ustun and Morteza Dehghani},
url = {http://ict.usc.edu/pubs/Combining%20Distributed%20Vector%20Representations%20for%20Words.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of NAACL-HLT 2015},
pages = {95–101},
publisher = {Association for Computational Linguistics},
address = {Denver, Colorado},
abstract = {Recent interest in distributed vector representations for words has resulted in an increased diversity of approaches, each with strengths and weaknesses. We demonstrate how diverse vector representations may be inexpensively composed into hybrid representations, effectively leveraging strengths of individual components, as evidenced by substantial improvements on a standard word analogy task. We further compare these results over different sizes of training sets and find these advantages are more pronounced when training data is limited. Finally, we explore the relative impacts of the differences in the learning methods themselves and the size of the contexts they access.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2003
Marsella, Stacy C.; Gratch, Jonathan
Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 313–320, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2003,
title = {Modeling Coping Behaviors in Virtual Humans: Don't worry, Be Happy},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20Coping%20Behavior%20in%20Virtual%20Humans-%20Dont%20worry%20Be%20happy.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {313–320},
address = {Melbourne, Australia},
abstract = {This article builds on insights into how humans cope with emotion to guide the design of virtual humans. Although coping is increasingly viewed in the psychological literature as having a central role in human adaptive behavior, it has been largely ignored in computational models of emotion. In this paper, we show how psychological research on the interplay between human emotion, cognition and coping behavior can serve as a central organizing principle for the behavior of human-like autonomous agents. We present a detailed domain-independent model of coping based on this framework that significantly extends our previous work. We argue that this perspective provides novel insights into realizing adaptive behavior.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem (Extended Version) Technical Report
University of Southern California Institute for Creative Technologies no. ICT TR 02 2003, 2003.
Links | BibTeX | Tags: Virtual Humans
@techreport{mao_social_2003,
title = {The Social Credit Assignment Problem (Extended Version)},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/ICT%20TR%2002%202003.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 02 2003},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Gardner, Andrew; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Linear Light Source Reflectometry Proceedings Article
In: ACM Transactions on Graphics, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{gardner_linear_2003,
title = {Linear Light Source Reflectometry},
author = {Andrew Gardner and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Linear%20Light%20Source%20Reflectometry.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {ACM Transactions on Graphics},
abstract = {This paper presents a technique for estimating the spatially-varying reflectance properties of a surface based on its appearance during a single pass of a linear light source. By using a linear light rather than a point light source as the illuminant, we are able to reliably observe and estimate the diffuse color, specular color, and specular roughness of each point of the surface. The reflectometry apparatus we use is simple and inexpensive to build, requiring a single direction of motion for the light source and a fixed camera viewpoint. Our model fitting technique first renders a reflectance table of how diffuse and specular reflectance lobes would appear under moving linear light source illumination. Then, for each pixel we compare its series of intensity values to the tabulated reflectance lobes to determine which reflectance model parameters most closely produce the observed reflectance values. Using two passes of the linear light source at different angles, we can also estimate per-pixel surface normals as well as the reflectance parameters. Additionally our system records a per-pixel height map for the object and estimates its per-pixel translucency. We produce real-time renderings of the captured objects using a custom hardware shading algorithm. We apply the technique to a test object exhibiting a variety of materials as well as to an illuminated manuscript with gold lettering. To demonstrate the technique's accuracy, we compare renderings of the captured models to real photographs of the original objects.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Pair, Jarrell; Neumann, Ulrich; Piepol, Diane; Swartout, William
FlatWorld: Combining Hollywood Set-Design Techniques with VR Journal Article
In: IEEE Computer Graphics and Applications, no. January/February, 2003.
@article{pair_flatworld_2003,
title = {FlatWorld: Combining Hollywood Set-Design Techniques with VR},
author = {Jarrell Pair and Ulrich Neumann and Diane Piepol and William Swartout},
editor = {Lawrence Rosenblum and Macedonia},
url = {http://ict.usc.edu/pubs/FlatWorld-%20Combining%20Hollywood%20Set-Design%20Techniques%20with%20VR.pdf},
year = {2003},
date = {2003-01-01},
journal = {IEEE Computer Graphics and Applications},
number = {January/February},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan; Mao, Wenji
Automating After Action Review: Attributing Blame or Credit in Team Training Proceedings Article
In: Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation, Scottsdale, AZ, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_automating_2003,
title = {Automating After Action Review: Attributing Blame or Credit in Team Training},
author = {Jonathan Gratch and Wenji Mao},
url = {http://ict.usc.edu/pubs/Automating%20After%20Action%20Review-%20Attributing%20Blame%20or%20Credit%20in%20Team%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 12th Conference on Behavior Representation in Modeling and Simulation},
address = {Scottsdale, AZ},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Mao, Wenji; Gratch, Jonathan
The Social Credit Assignment Problem Proceedings Article
In: Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA), Kloster Irsee, Germany, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{mao_social_2003-1,
title = {The Social Credit Assignment Problem},
author = {Wenji Mao and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Social%20Credit%20Assignment%20Problem.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Lecture Notes in Computer Science; Proceedings of the 4th International Workshop on Intelligent Virtual Agents (IVA)},
volume = {2792},
address = {Kloster Irsee, Germany},
abstract = {Social credit assignment is a process of social judgment whereby one singles out individuals to blame or credit for multi-agent activities. Such judgments are a key aspect of social intelligence and underlie social planning, social learning, natural language pragmatics and computational models of emotion. Based on psychological attribution theory, this paper presents a preliminary computational approach to forming such judgments based on an agent's causal knowledge and conversation interactions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Larsson, Staffan
The Information State Approach to Dialogue Management Book Section
In: Current and New Directions in Discourse and Dialogue, pp. 325–353, 2003.
Links | BibTeX | Tags: Virtual Humans
@incollection{traum_information_2003,
title = {The Information State Approach to Dialogue Management},
author = {David Traum and Staffan Larsson},
url = {http://ict.usc.edu/pubs/The%20Information%20State%20Approach%20to%20Dialogue%20Management.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Current and New Directions in Discourse and Dialogue},
pages = {325–353},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Cao, Yong; Faloutsos, Petros; Pighin, Frédéric
Unsupervised Learning for Speech Motion Editing Proceedings Article
In: Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{cao_unsupervised_2003,
title = {Unsupervised Learning for Speech Motion Editing},
author = {Yong Cao and Petros Faloutsos and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Unsupervised%20Learning%20for%20Speech%20Motion%20Editing.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of Eurographics/SIGGRAPH Symposium on Computer Animation},
abstract = {We present a new method for editing speech related facial motions. Our method uses an unsupervised learning technique, Independent Component Analysis (ICA), to extract a set of meaningful parameters without any annotation of the data. With ICA, we are able to solve a blind source separation problem and describe the original data as a linear combination of two sources. One source captures content (speech) and the other captures style (emotion). By manipulating the independent components we can edit the motions in intuitive ways.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Shapiro, Ari; Pighin, Frédéric
Hybrid Control For Interactive Character Animation Proceedings Article
In: Proceedings of the 11th Pacific Conference on Computer Graphics and Applications, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{shapiro_hybrid_2003,
title = {Hybrid Control For Interactive Character Animation},
author = {Ari Shapiro and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Hybrid%20Control%20For%20Interactive%20Character%20Animation.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 11th Pacific Conference on Computer Graphics and Applications},
abstract = {We implement a framework for animating interactive characters by combining kinematic animation with physical simulation. The combination of animation techniques allows the characters to exploit the advantages of each technique. For example, characters can perform natural-looking kinematic gaits and react dynamically to unexpected situations.Kinematic techniques such as those based on motion capture data can create very natural-looking animation. However, motion capture based techniques are not suitable for modeling the complex interactions between dynamically interacting characters. Physical simulation, on the other hand, is well suited for such tasks. Our work develops kinematic and dynamic controllers and transition methods between the two control methods for interactive character animation. In addition, we utilize the motion graph technique to develop complex kinematic animation from shorter motion clips as a method of kinematic control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Williams, Josh
The Gestalt of Virtual Environments Proceedings Article
In: International Workshop on Presence, 2003.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_gestalt_2003,
title = {The Gestalt of Virtual Environments},
author = {Jacquelyn Morie and Josh Williams},
url = {http://ict.usc.edu/pubs/The%20Gestalt%20of%20Virtual%20Environments.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Workshop on Presence},
abstract = {The majority of research in the field of virtual reality to date has focused on increasing the fidelity of the environments created and trying to determine the quality of the participant experience. Efforts have been made to quantify such aspects, especially in regards to visuals and sound, and to a lesser extent to the user experience. Recent thinking has tended towards the assumption that ever-greater fidelity would ensure a better user experience. However, such emphasis on photo-realism and audio-realism does not take into account the collective results of our multimodal sensory inputs with their intertwined effects. Our design philosophy for the creation of virtual environments attempts to replicate the human experience, and asks the question: Is there an underlying fidelity of feels-real through which the quality of the participant experience could be improved?},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff; Gratch, Jonathan; Marsella, Stacy C.
Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 441–448, Melbourne, Australia, 2003.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_negotiation_2003,
title = {Negotiation over Tasks in Hybrid Human-Agent Teams for Simulation-Based Training},
author = {David Traum and Jeff Rickel and Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Negotiation%20over%20Tasks%20in%20Hybrid%20Human-Agent%20Teams%20for%20Simulation-Based%20Training.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
pages = {441–448},
address = {Melbourne, Australia},
abstract = {The effectiveness of simulation-based training for individual tasks – such as piloting skills – is well established, but its use for team training raises challenging technical issues. Ideally, human users could gain valuable leadership experience by interacting with synthetic teammates in realistic and potentially stressful scenarios. However, creating human-like teammates that can support flexible, natural interactions with humans and other synthetic agents requires integrating a wide variety of capabilities, including models of teamwork, models of human negotiation, and the ability to participate in face-to-face spoken conversations in virtual worlds. We have developed such virtual humans by integrating and extending prior work in these areas, and we have applied our virtual humans to an example peacekeeping training scenario to guide and evaluate our research. Our models allow agents to reason about authority and responsibility for individual actions in a team task and, as appropriate, to carry out actions, give and accept orders, monitor task execution, and negotiate options. Negotiation is guided by the agents' dynamic assessment of alternative actions given the current scenario conditions, with the aim of guiding the human user towards an ability to make similar assessments.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Habash, Nizar; Dorr, Bonnie; Traum, David
Hybrid Natural Language Generation from Lexical Conceptual Structures Journal Article
In: Machine Translation, vol. 18, pp. 81–127, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{habash_hybrid_2003,
title = {Hybrid Natural Language Generation from Lexical Conceptual Structures},
author = {Nizar Habash and Bonnie Dorr and David Traum},
url = {http://ict.usc.edu/pubs/Hybrid%20Natural%20Language%20Generation%20from%20Lexical%20%20Conceptual%20Structures.pdf},
year = {2003},
date = {2003-01-01},
journal = {Machine Translation},
volume = {18},
pages = {81–127},
abstract = {This paper describes Lexogen, a system for generating natural-language sentences from Lexical Conceptual Structure, an interlingual representation. The system has been developed as part of a Chinese–English Machine Translation (MT) system; however, it is designed to be used for many other MT language pairs and natural language applications. The contributions of this work include: (1) development of a large-scale Hybrid Natural Language Generation system with language-independent components; (2) enhancements to an interlingual representation and asso- ciated algorithm for generation from ambiguous input; (3) development of an efficient reusable language-independent linearization module with a grammar description language that can be used with other systems; (4) improvements to an earlier algorithm for hierarchically mapping thematic roles to surface positions; and (5) development of a diagnostic tool for lexicon coverage and correct- ness and use of the tool for verification of English, Spanish, and Chinese lexicons. An evaluation of Chinese–English translation quality shows comparable performance with a commercial translation system. The generation system can also be extended to other languages and this is demonstrated and evaluated for Spanish.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Martinovski, Bilyana; Traum, David
The Error Is the Clue: Breakdown In Human-Machine Interaction Proceedings Article
In: Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association, Switzerland, 2003.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{martinovski_error_2003,
title = {The Error Is the Clue: Breakdown In Human-Machine Interaction},
author = {Bilyana Martinovski and David Traum},
url = {http://ict.usc.edu/pubs/The%20Error%20Is%20the%20Clue-%20Breakdown%20In%20Human-Machine%20Interaction.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of ISCA Tutorial and Research Workshop International Speech Communication Association},
address = {Switzerland},
abstract = {This paper focuses not on the detection and correction of specific errors in the interaction between machines and humans, but rather cases of massive deviation from the user's conversational expectations and desires. This can be the result of too many or too unusual errors, but also from dialogue strategies disigned to minimize error, which make the interaction unnatutal in other ways. We study causes of irritation such as over-fragmentation, over-clarity, over-coordination, over-directedness, and repetiveness of verbal action, syntax, and intonation. Human reations to these irritating features typically appear in the following order: tiredness, tolerance, anger, confusion, irony, humor, exhaustion, uncertainty, lack of desire to communicate. The studied features of human expressions of irritation in non-face-to-face interaction are: intonation, emphatic speech, elliptic speech, speed of speech, extra-linguistic signs, speed of verbal action, and overlap.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Unger, J.; Wenger, Andreas; Hawkins, Tim; Gardner, Andrew; Debevec, Paul
Capturing and Rendering With Incident Light Fields Proceedings Article
In: Proceedings of the 14th Eurographics workshop on Rendering, 2003.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{unger_capturing_2003,
title = {Capturing and Rendering With Incident Light Fields},
author = {J. Unger and Andreas Wenger and Tim Hawkins and Andrew Gardner and Paul Debevec},
url = {http://ict.usc.edu/pubs/Capturing%20and%20Rendering%20With%20Incident%20Light%20Fields.pdf},
year = {2003},
date = {2003-01-01},
booktitle = {Proceedings of the 14th Eurographics workshop on Rendering},
abstract = {This paper presents a process for capturing spatially and directionally varying illumination from a real-world scene and using this lighting to illuminate computer-generated objects. We use two devices for capturing such illumination. In the first we photograph an array of mirrored spheres in high dynamic range to capture the spatially varying illumination. In the second, we obtain higher resolution data by capturing images with an high dynamic range omnidirectional camera as it traverses across a plane. For both methods we apply the light field technique to extrapolate the incident illumination to a volume. We render computer-generated objects as illuminated by this captured illumination using a custom shader within an existing global illumination rendering system. To demonstrate our technique we capture several spatially-varying lighting environments with spotlights, shadows, and dappled lighting and use them to illuminate synthetic scenes. We also show comparisons to real objects under the same illumination.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Moore, Benjamin
QuBit Documentation Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2003, 2003.
@techreport{moore_qubit_2003,
title = {QuBit Documentation},
author = {Benjamin Moore},
url = {http://ict.usc.edu/pubs/QuBit%20Documentation.pdf},
year = {2003},
date = {2003-01-01},
number = {ICT TR 01.2003},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
2002
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of Spatial Averaging Equalization Methods: A Statistical Approach Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002-1,
title = {Robustness of Spatial Averaging Equalization Methods: A Statistical Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Spatial%20Averaging%20Equalization%20Methods-%20A%20Statistical%20Approach.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of room responses. However, the performance of averaging based equalization, at the listeners, may be affected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to performance of spatial averaging based equalization. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
Perceptual Multiple Location Equalization with Clustering Proceedings Article
In: IEEE 36th Asilomar Conference on Signals, Systems & Computers, Pacific Grove, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_perceptual_2002,
title = {Perceptual Multiple Location Equalization with Clustering},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Perceptual%20Multiple%20Location%20Equalization%20with%20Clustering.pdf},
year = {2002},
date = {2002-11-01},
booktitle = {IEEE 36th Asilomar Conference on Signals, Systems & Computers},
address = {Pacific Grove, CA},
abstract = {Typically, room equalization techniques do not focus on designing ï¬lters that equalize the room transfer functions on perceptually relevant spectral features. In this paper we address the problem of room equalization for multiple listeners, simultaneously, using a perceptually designed equalization ï¬lter based on pattern recognition techniques. Some features of the proposed ï¬lter are, its ability to perform simultaneous equalization at multiple locations, a reduced order, and a psychoacoustically motivated design. In summary, the simultaneous multiple location equalization, using a pattern recognition method, is performed over perceptually relevant spectral components derived from the auditory ï¬ltering mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Hilmes, Philip; Kyriakakis, Chris
Robustness of Multiple Listener Equalization With Magnitude Response Averaging Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2002,
title = {Robustness of Multiple Listener Equalization With Magnitude Response Averaging},
author = {Sunil Bharitkar and Philip Hilmes and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Robustness%20of%20Multiple%20Listener%20Equalization%20With%20Magnitude%20Response%20Averaging.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {Traditionally, room response equalization is performed to improve sound quality at a given listener. However, room responses vary with source and listener positions. Hence, in a multiple listener environment, equalization may be performed through spatial averaging of magnitude responses at locations of interest. However, the performance of averaging based equalization, at the listeners, may be a!ected when listener positions change. In this paper, we present a statistical approach to map variations in listener positions to a performance metric of equalization for magnitude response averaging. The results indicate that, for the analyzed listener conï¬gurations, the zone of equalization depends on distance of microphones from a source and the frequencies in the sound.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, Los Angeles, CA, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{georgiou_alternative_2002,
title = {An Alternative Model for Sound Signals Encountered in Reverberant Environments; Robust Maximum Likelihood Localization and Parameter Estimation Based on a Sub-Gaussian Model},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/An%20Alternative%20Model%20for%20Sound%20Signals%20Encountered%20in%20Reverberant%20Environments%3b%20Robust%20Maximum%20Likelihood%20Localization%20and%20Parameter%20Estimation%20Based%20on%20a%20Sub-Gaussian%20Model.pdf},
year = {2002},
date = {2002-10-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {Los Angeles, CA},
abstract = {In this paper we investigate an alternative to the Gaussian density for modeling signals encountered in audio environments. The observation that sound signals are impulsive in nature, combined with the reverberation e!ects commonly encountered in audio, motivates the use of the Sub-Gaussian density. The new Sub-Gaussian statistical model and the separable solution of its Maximum Likelihood estimator are derived. These are used in an array scenario to demonstrate with both simulations and two different microphone arrays the achievable performance gains. The simulations exhibit the robustness of the sub-Gaussian based method while the real world experiments reveal a signiï¬cant performance gain, supporting the claim that the sub-Gaussian model is better suited for sound signals.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, W. Lewis; Narayanan, Shrikanth; Whitney, Richard; Das, Rajat; Labore, Catherine
Limited Domain Synthesis of Expressive Military Speech for Animated Characters Proceedings Article
In: IEEE 2002 Workshop on Speech Synthesis, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{johnson_limited_2002,
title = {Limited Domain Synthesis of Expressive Military Speech for Animated Characters},
author = {W. Lewis Johnson and Shrikanth Narayanan and Richard Whitney and Rajat Das and Catherine Labore},
url = {http://ict.usc.edu/pubs/Limited%20Domain%20Synthesis%20of%20Expressive%20Military%20Speech%20for%20Animated%20Characters.pdf},
year = {2002},
date = {2002-09-01},
booktitle = {IEEE 2002 Workshop on Speech Synthesis},
abstract = {Text-to-speech synthesis can play an important role in interactive education and training applications, as voices for animated agents. Such agents need high-quality voices capable of expressing intent and emotion. This paper presents preliminary results in an effort aimed at synthesizing expressive military speech for training applications. Such speech has acoustic and prosodic characteristics that can differ markedly from ordinary conversational speech. A limited domain synthesis approach is used employing samples of expressive speech, classified according to speaking style. The resulting synthesizer was tested both in isolation and in the context of a virtual reality training scenario with animated characters.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
The Theory of Mind in Strategy Representations Proceedings Article
In: Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci), Lawrence Erlbaum Associates, George Mason University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_theory_2002,
title = {The Theory of Mind in Strategy Representations},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/The%20Theory%20of%20Mind%20in%20Strategy%20Representations.PDF},
year = {2002},
date = {2002-08-01},
booktitle = {Proceedings of the Twenty-fourth Annual Meeting of the Cognitive Science Society (CogSci)},
publisher = {Lawrence Erlbaum Associates},
address = {George Mason University},
abstract = {Many scientific fields continue to explore cognition related to Theory of Mind abilities, where people reason about the mental states of themselves and others. Experimental and theoretical approaches to this problem have largely avoided issues concerning the contents of representations employed in this class of reasoning. In this paper, we describe a new approach to the investigation of representations related to Theory of Mind abilities that is based on the analysis of commonsense strategies. We argue that because the mental representations of strategies must include concepts of mental states and processes, the large-scale analysis of strategies can be informative of the representational scope of Theory of Mind abilities. The results of an analysis of this sort are presented as a description of thirty representational areas that organize the breadth of Theory of Mind concepts. Implications for Theory Theories and Simulation Theories of Theory of Mind reasoning are discussed.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Angros, Richard Jr.; Johnson, W. Lewis; Rickel, Jeff; Scholer, Andrew
Learning Domain Knowledge for Teaching Procedural Skills Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{angros_learning_2002,
title = {Learning Domain Knowledge for Teaching Procedural Skills},
author = {Richard Jr. Angros and W. Lewis Johnson and Jeff Rickel and Andrew Scholer},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Skills.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
abstract = {This paper describes a method for acquiring procedural knowledge for use by pedagogical agents in interactive simulation-based learning environments. Such agents need to be able to adapt their behavior to the changing conditions of the simulated world, and respond appropriately in mixed-initiative interactions with learners. This requires a good understanding of the goals and causal dependencies in the procedures being taught. Our method, inspired by human tutorial dialog, combines direct speciï¬cation, demonstration, and experimentation. The human instructor demonstrates the skill being taught, while the agent observes the effects of the procedure on the simulated world. The agent then autonomously experiments with the procedure, making modiï¬cations to it, in order to understand the role of each step in the procedure. At various points the instructor can provide clariï¬cations, and modify the developing procedural description as needed. This method is realized in a system called Diligent, which acquires procedural knowledge for the STEVE animated pedagogical agent.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Rickel, Jeff
Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), Bologna, Italy, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_embodied_2002,
title = {Embodied Agents for Multi-party Dialogue in Immersive Virtual Worlds},
author = {David Traum and Jeff Rickel},
url = {http://ict.usc.edu/pubs/Embodied%20Agents%20for%20Multi-party%20Dialogue%20in%20Immersive%20%20Virtual%20Worlds.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
address = {Bologna, Italy},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kovar, Lucas; Gleicher, Michael; Pighin, Frédéric
Motion Graphs Proceedings Article
In: Proceedings of SIGGRAPH '02, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{kovar_motion_2002,
title = {Motion Graphs},
author = {Lucas Kovar and Michael Gleicher and Frédéric Pighin},
url = {http://ict.usc.edu/pubs/Motion%20Graphs.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {Proceedings of SIGGRAPH '02},
address = {San Antonio, TX},
abstract = {n this paper we present a novel method for creating realistic, controllable motion. Given a corpus of motion capture data, we automatically construct a directed graph called a motion graph that encapsulates connections among the database. The motion graph consists both of pieces of original motion and automatically generated transitions. Motion can be generated simply by building walks on the graph. We present a general framework for extracting particular graph walks that meet a user's specifications. We then show how this framework can be applied to the specific problem of generating different styles of locomotion along arbitrary paths.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul; Wenger, Andreas; Tchou, Chris; Gardner, Andrew; Waese, Jamie; Hawkins, Tim
A Lighting Reproduction Approach to Live-Action Compositing Proceedings Article
In: SIGGRAPH 2002, pp. 547–556, San Antonio, TX, 2002.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{debevec_lighting_2002,
title = {A Lighting Reproduction Approach to Live-Action Compositing},
author = {Paul Debevec and Andreas Wenger and Chris Tchou and Andrew Gardner and Jamie Waese and Tim Hawkins},
url = {http://ict.usc.edu/pubs/A%20Lighting%20Reproduction%20Approach%20to%20Live-Action%20Compositing.pdf},
year = {2002},
date = {2002-07-01},
booktitle = {SIGGRAPH 2002},
pages = {547–556},
address = {San Antonio, TX},
abstract = {We describe a process for compositing a live performance of an actor into a virtual set wherein the actor is consistently illuminated by the virtual environment. The Light Stage used in this work is a two-meter sphere of inward-pointing RGB light emitting diodes focused on the actor, where each light can be set to an arbitrary color and intensity to replicate a real-world or virtual lighting environment. We implement a digital two-camera infrared matting system to composite the actor into the background plate of the environment without affecting the visible-spectrum illumination on the actor. The color reponse of the system is calibrated to produce correct color renditions of the actor as illuminated by the environment. We demonstrate moving-camera composites of actors into real-world environments and virtual sets such that the actor is properly illuminated by the environment into which they are composited.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
Modeling the Influence of Emotion on Belief for Virtual Training Simulations Proceedings Article
In: Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_modeling_2002,
title = {Modeling the Influence of Emotion on Belief for Virtual Training Simulations},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20influence%20of%20emotion.pdf},
year = {2002},
date = {2002-06-01},
booktitle = {Proceedings of the 11th Conference on Computer Generated Forces and Behavioral Simulation},
address = {Orlando, FL},
abstract = {Recognizing and managing emotion in oneself and in those under ones command is an important component of leadership training. Most computational models of emotion have focused on the problem of identifying emotional features of the physical environment and mapping that into motivations to act in the world. But emotions also influence how we perceive the world and how we communicate that perception to others. This paper outlines an initial computational foray into this more vexing problem.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Lent, Michael
Virtual Humans as Participants vs. Virtual Humans as Actors Proceedings Article
In: AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_virtual_2002,
title = {Virtual Humans as Participants vs. Virtual Humans as Actors},
author = {Andrew S. Gordon and Michael Lent},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20as%20Participants%20vs%20Virtual%20Humans%20as%20Actors.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu Proceedings Article
In: The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment, Stanford University, 2002.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_enabling_2002,
title = {Enabling and recognizing strategic play in strategy games: Lessons from Sun Tzu},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Enabling%20and%20recognizing%20strategic%20play%20in%20strategy%20games-%20Lessons%20from%20Sun%20Tzu.PDF},
year = {2002},
date = {2002-03-01},
booktitle = {The 2002 AAAI Spring Symposium on Artificial Intelligence and Interactive Entertainment},
address = {Stanford University},
abstract = {The interactive entertainment genre of the strategy game entertains users by allowing them to engage in strategic play, which should encourage game designers to devote development efforts toward facilitating users that wish to employ commonsense strategies, and to recognize and react to specific user strategies during game play. This paper attempts to facilitate these development efforts by identifying and analyzing 43 strategies from Sun Tzu's The Art of War, which are broadly applicable across games in the strategy game genre. For each strategy, a set of specific actions are identified that should be provided to users to enable their execution, along with generalized recognition rules that can facilitatethe design of entertaining responses to users' strategic behavior. Consideration of how the enabling actions could be incorporated into an existing strategy game is provided.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Fleischman, Michael; Hovy, Eduard
Emotional Variation in Speech-Based Natural Language Generation Proceedings Article
In: International Natural Language Generation Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{fleischman_emotional_2002,
title = {Emotional Variation in Speech-Based Natural Language Generation},
author = {Michael Fleischman and Eduard Hovy},
url = {http://ict.usc.edu/pubs/Emotional%20Variation%20in%20Speech-Based%20Natural%20Language%20Generation.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Natural Language Generation Conference},
abstract = {We present a framework for handling emotional variations in a speech-based natural language system for use in the MRE virtual training environment. The system is a first step toward addressing issues in emotion-based modeling of verbal communicative behavior. We cast the problem of emotional generation as a distance minimization task, in which the system chooses between multiple valid realizations for a given input based on the emotional distance of each realization from the speaker's attitude toward that input.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Debevec, Paul
A Tutorial on Image-Based Lighting Journal Article
In: IEEE Computer Graphics and Applications, 2002.
Links | BibTeX | Tags: Graphics
@article{debevec_tutorial_2002,
title = {A Tutorial on Image-Based Lighting},
author = {Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-Based%20Lighting.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Computer Graphics and Applications},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.; Han, Changhee; Lent, Michael
Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments Proceedings Article
In: Proceedings of 14th Innovative Applications of Artificial Intelligence Conference, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{hill_applying_2002,
title = {Applying Perceptually Driven Cognitive Mapping To Virtual Urban Environments},
author = {Randall W. Hill and Changhee Han and Michael Lent},
url = {http://ict.usc.edu/pubs/Applying%20Perceptually%20Driven%20Cognitive%20Mapping%20To%20Virtual%20Urban%20Environments.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of 14th Innovative Applications of Artificial Intelligence Conference},
abstract = {This paper describes a method for building a cognitive map of a virtual urban environment. Our routines enable virtual humans to map their environment using a realistic model of perception. We based our implementation on a computational framework proposed by Yeap and Jefferies (Yeap & Jefferies 1999) for representing a local environment as a structure called an Absolute Space Representation (ASR). Their algorithms compute and update ASRs from a 2-1/2D 1 sketch of the local environment, and then connect the ASRs together to form a raw cognitive map. Our work extends the framework developed by Yeap and Jefferies in three important ways. First, we implemented the framework in a virtual training environment, the Mission Rehearsal Exercise (Swartout et al. 2001). Second, we describe a method for acquiring a 2- 1/2D sketch in a virtual world, a step omitted from their framework, but which is essential for computing an ASR. Third, we extend the ASR algorithm to map regions that are partially visible through exits of the local space. Together, the implementation of the ASR algorithm along with our extensions will be useful in a wide variety of applications involving virtual humans and agents who need to perceive and reason about spatial concepts in urban environments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David
Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication Proceedings Article
In: Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting, 2002.
Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_ideas_2002,
title = {Ideas on Multi-layer Dialogue Management for Multi-party, Multi-conversation, Multi-modal Communication},
author = {David Traum},
url = {http://ict.usc.edu/pubs/Ideas%20on%20Multi-layer%20Dialogue%20Management%20for%20Multi-party,%20Multi-conversation,%20Multi-modal%20Communication.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Computational Linguistics in the Netherlands 2001: Selected Papers from the Twelth CLIN Meeting},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan
A step toward irrationality: using emotion to change belief Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 334–341, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_step_2002,
title = {A step toward irrationality: using emotion to change belief},
author = {Stacy C. Marsella and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20step%20toward%20irrationality-%20using%20emotion%20to%20change%20belief.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {1},
pages = {334–341},
address = {Bologna, Italy},
abstract = {Emotions have a powerful impact on behavior and beliefs. The goal of our research is to create general computational models of this interplay of emotion, cognition and behavior to inform the design of virtual humans. Here, we address an aspect of emotional behavior that has been studied extensively in the psychological literature but largely ignored by computational approaches, emotion-focused coping. Rather than motivating external action, emotion-focused coping strategies alter beliefs in response to strong emotions. For example an individual may alter beliefs about the importance of a goal that is being threatened, thereby reducing their distress. We present a preliminary model of emotion-focused coping and discuss how coping processes, in general, can be coupled to emotions and behavior. The approach is illustrated within a virtual reality training environment where the models are used to create virtual human characters in high-stress social situations.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pighin, Frédéric; Szeliski, Richard; Salesin, David H.
Modeling and Animating Realistic Faces from Images Journal Article
In: International Journal on Computer Vision, vol. 50, pp. 143–169, 2002.
Abstract | Links | BibTeX | Tags:
@article{pighin_modeling_2002,
title = {Modeling and Animating Realistic Faces from Images},
author = {Frédéric Pighin and Richard Szeliski and David H. Salesin},
url = {http://ict.usc.edu/pubs/Modeling%20and%20Animating%20Realistic%20Faces%20from%20Images.pdf},
year = {2002},
date = {2002-01-01},
journal = {International Journal on Computer Vision},
volume = {50},
pages = {143–169},
abstract = {We present a new set of techniques f or mo deling and animating realistic f aces f rom photographs and videos. Given a set of face photographs taken simultaneously, our modeling technique allows the interactive recovery of a textured 3D face model. By repeating this process for several facial expressions, we acquire a set of faces models that can be linearly combined to express a wide range of expressions. Given a video sequence, this linear face model can be used to estimate the face position, orientation, and facial expression at each frame. We illustrate these techniques on several datasets and demonstrate robust estimations of detailed face geometry and motion.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Details of the CFOR Planner Technical Report
University of Southern California Institute for Creative Technologies Marina del Rey, CA, no. ICT TR 01.2002, 2002.
Links | BibTeX | Tags: Virtual Humans
@techreport{gratch_details_2002,
title = {Details of the CFOR Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Details%20of%20the%20CFOR%20Planner.pdf},
year = {2002},
date = {2002-01-01},
number = {ICT TR 01.2002},
address = {Marina del Rey, CA},
institution = {University of Southern California Institute for Creative Technologies},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {techreport}
}
Hill, Randall W.; Kim, Youngjun; Gratch, Jonathan
Anticipating where to look: predicting the movements of mobile agents in complex terrain Proceedings Article
In: International Conference on Autonomous Agents and Multiagent Systems (AAMAS), pp. 821–827, Bologna, Italy, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{hill_anticipating_2002,
title = {Anticipating where to look: predicting the movements of mobile agents in complex terrain},
author = {Randall W. Hill and Youngjun Kim and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Anticipating%20Where%20to%20Look-%20Predicting%20the%20Movements%20of%20Mobile%20Agents%20in%20Complex%20Terrain.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
volume = {2},
pages = {821–827},
address = {Bologna, Italy},
abstract = {This paper describes a method for making short-term predictions about the movement of mobile agents in complex terrain. Virtual humans need this ability in order to shift their visual attention between dynamic objects-predicting where an object will be located a few seconds in the future facilitates the visual reacquisition of the target object. Our method takes into account environmental cues in making predictions and it also indicates how long the prediction is valid, which varies depending on the context. We implemented this prediction technique in a virtual pilot that flies a helicopter in a synthetic environment.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff; Marsella, Stacy C.; Gratch, Jonathan; Hill, Randall W.; Traum, David; Swartout, William
Toward a New Generation of Virtual Humans for Interactive Experiences Journal Article
In: IEEE Intelligent Systems, 2002.
Links | BibTeX | Tags: Social Simulation, Virtual Humans
@article{rickel_toward_2002,
title = {Toward a New Generation of Virtual Humans for Interactive Experiences},
author = {Jeff Rickel and Stacy C. Marsella and Jonathan Gratch and Randall W. Hill and David Traum and William Swartout},
url = {http://ict.usc.edu/pubs/Toward%20a%20New%20Generation%20of%20Virtual%20Humans%20for%20Interactive%20Experiences.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rickel, Jeff; Johnson, W. Lewis
Extending Virtual Human to Support Team Training in Virtual Reality Book Section
In: Lakemeyer, G.; Nebel, B. (Ed.): Exploring Artificial Intelligence in the New Millennium, Morgan Kaufmann Publishers, San Francisco, CA, 2002.
Abstract | Links | BibTeX | Tags:
@incollection{rickel_extending_2002,
title = {Extending Virtual Human to Support Team Training in Virtual Reality},
author = {Jeff Rickel and W. Lewis Johnson},
editor = {G. Lakemeyer and B. Nebel},
url = {http://ict.usc.edu/pubs/Extending%20Virtual%20Humans%20to%20Support%20Team%20Training%20in%20Virtual%20Reality.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Exploring Artificial Intelligence in the New Millennium},
publisher = {Morgan Kaufmann Publishers},
address = {San Francisco, CA},
abstract = {This paper describes the use of virtual humans and distributed virtual reality to support team training, where students must learn their individual role in the team as well as how to coordinate their actions with their teammates. Students, instructors, and virtual humans cohabit a three-dimensional, interactive, simulated mock-up of their work environment, where they can practice together in realistic situations. The virtual humans can serve as instructors for individual students, and they can substitute for missing team members, allowing students to practive team tasks when some or all human instructors and teammates are unavailable. The paper describes our learning environment, the issues that arise in developing virtual humans for team training, and our design for the virtual humans, which is an extension of our Steve agent previously used for one-on-one tutoring.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Morie, Jacquelyn; Iyer, Kumar; Valanejad, R.; Sadek, Ramy; Miraglia, D.; Milam, D.
Emotionally Evocative Environments for Training Proceedings Article
In: Proceedings of the 23th Army Science Conference, Orlando, FL, 2002.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_emotionally_2002,
title = {Emotionally Evocative Environments for Training},
author = {Jacquelyn Morie and Kumar Iyer and R. Valanejad and Ramy Sadek and D. Miraglia and D. Milam},
url = {http://ict.usc.edu/pubs/EMOTIONALLY%20EVOCATIVE%20ENVIRONMENTS%20FOR%20TRAINING.pdf},
year = {2002},
date = {2002-01-01},
booktitle = {Proceedings of the 23th Army Science Conference},
address = {Orlando, FL},
abstract = {This paper describes a project currently in progress at the University of Southern California's Institute for Creative Technologies (ICT). Much of the research at ICT involves developing better graphics, sound and artificial intelligence to be used in creating the next generation of training tools for the United States Army. Our project focuses on the use of emotional responses as an enhancement for training. Research indicates that an emotional connection is a strong factor in how and what we remember. In addition, real world situations often evoke surprising and significant emotional reactions that soldiers must deal with. Few current immersive training scenarios, however, focus on the emotional state of the trainee, limiting training scenarios to basic objective elements. The Sensory Environments Evaluation (SEE) Project at ICT is investigating the potential of emotionally compelling environments for more effective training. We do this by skillfully combining the sensory inputs available in virtual environments. Our current efforts concentrate on sight and sound; smell will be included as scent delivery methods improve. Evaluation studies are planned to determine the effectiveness of the techniques we are developing.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Rickel, Jeff; André, Elisabeth; Cassell, Justine; Petajan, Eric; Badler, Norman
Creating Interactive Virtual Humans: Some Assembly Required Journal Article
In: IEEE Intelligent Systems, pp. 54–63, 2002.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{gratch_creating_2002,
title = {Creating Interactive Virtual Humans: Some Assembly Required},
author = {Jonathan Gratch and Jeff Rickel and Elisabeth André and Justine Cassell and Eric Petajan and Norman Badler},
url = {http://ict.usc.edu/pubs/Creating%20Interactive%20Virtual%20Humans-%20Some%20Assembly%20Required.pdf},
year = {2002},
date = {2002-01-01},
journal = {IEEE Intelligent Systems},
pages = {54–63},
abstract = {Science fiction has long imagined a future populated with artificial humans–human-looking devices with human-like intelligence. Although Asimov's benevolent robots and the Terminator movies' terrible war machines are still a distant fantasy, researchers across a wide range of disciplines are beginning to work together toward a more modest goal–building virtual humans. These software entities look and act like people and can engage in conversation and collaborative tasks, but they live in simulated environments. With the untidy problems of sensing and acting in the physical world thus dispensed, the focus of virtual human research is on capturing the richness and dynamics of human behavior.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2001
Bharitkar, Sunil; Kyriakakis, Chris
Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation Proceedings Article
In: IEEE Workshop on Applications of Signal Processing to Audio and Acoustics, New Paltz, New York, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_robustness_2001,
title = {Robustness of the Eigenfilter for Variations in Listener Responses for Selective Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/ROBUSTNESS%20OF%20THE%20EIGENFILTER%20FOR%20VARIATIONS%20IN%20LISTENER%20RESPONSES%20FOR%20SELECTIVE%20SIGNAL%20CANCELLATION.pdf},
year = {2001},
date = {2001-10-01},
booktitle = {IEEE Workshop on Applications of Signal Processing to Audio and Acoustics},
address = {New Paltz, New York},
abstract = {Selectively cancelling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. We have proposed the eigenfilter for selectively cancelling signals in one direction, while attempting to retain them at unintentional directions. In this paper we investigate the behaviour of the performance measure (i.e., the gain) for a vowel and an unvoiced fricative, when the listener moves his head, in an automobile type environment. We show that in such a situation, a large energy in the difference between the impulse responses at a listener's location may affect the gain substantially. listeners in which only a subset wish to listen to the audio signal.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Douglas, Jay
Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after Proceedings Article
In: Proceedings of International Conference on Virtual Storytelling, pp. 100–112, Avignon, France, 2001, ISBN: 3-540-42611-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_adaptive_2001,
title = {Adaptive narrative: How autonomous agents, hollywood, and multiprocessing operating systems can live happily ever after},
author = {Jonathan Gratch and Jay Douglas},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
doi = {10.1007/3-540-45420-9_12},
isbn = {3-540-42611-6},
year = {2001},
date = {2001-10-01},
booktitle = {Proceedings of International Conference on Virtual Storytelling},
pages = {100–112},
address = {Avignon, France},
series = {LNCS},
abstract = {Interacting Storytelling systems integrate AI techniques such as planning with narrative representations to generate stories. In this paper, we discuss the use of planning formalisms in Interactive Storytelling from the perspective of story generation and authoring. We compare two different planning formalisms, Hierarchical Task Network (HTN) planning and Heuristic Search Planning (HSP). While HTN provide a strong basis for narrative coherence in the context of interactivity, HSP offer additional flexibility and the generation of stories and the mechanisms for generating comic situations.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rickel, Jeff
Intelligent Virtual Agents for Education and Training: Opportunities and Challenges Proceedings Article
In: Intelligent Virtual Agents: The 3rd International Workshop, Madrid, Spain, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{rickel_intelligent_2001,
title = {Intelligent Virtual Agents for Education and Training: Opportunities and Challenges},
author = {Jeff Rickel},
url = {http://ict.usc.edu/pubs/Intelligent%20Virtual%20Agents%20for%20Education%20and%20Training-%20Opportunities%20and%20Challenges.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Intelligent Virtual Agents: The 3rd International Workshop},
address = {Madrid, Spain},
abstract = {Interactive virtual worlds provide a powerful medium for ex- periential learning. Intelligent virtual agents can cohabit virtual worlds with people and facilitate such learning as guides, mentors, and team- mates. This paper reviews the main pedagogical advantages of animated agents in virtual worlds, discusses two key research challenges, and out- lines an ambitious new project addressing those challenges.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bharitkar, Sunil; Kyriakakis, Chris
New Factors in Room Equalization Using a Fuzzy Logic Approach Proceedings Article
In: Proceedings of the Audio Engineering Society Convention, New York, NY, 2001.
Abstract | Links | BibTeX | Tags:
@inproceedings{bharitkar_new_2001,
title = {New Factors in Room Equalization Using a Fuzzy Logic Approach},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/New%20Factors%20in%20Room%20Equalization%20Using%20a%20Fuzzy%20Logic%20Approach.pdf},
year = {2001},
date = {2001-09-01},
booktitle = {Proceedings of the Audio Engineering Society Convention},
address = {New York, NY},
abstract = {Room acoustical modes, particularly in small rooms, cause a signiï¬cant variation in the room responses measured at di!erent locations. Responses measured only a few cm apart can vary by up to 15-20 dB at certain frequencies. This makes it diffcult to equalize an audio system for multiple simultaneous listeners. Previous methods have utilized multiple microphones and spatial averaging with equal weighting. In this paper we present a different multiple point equalization method. We ï¬rst determine representative prototypical room responses derived from several room responses that share similar characteristics, using the fuzzy unsupervised learning method. These prototypical responses can then be combined to form a general point response. When we use the inverse of the general point response as an equalizing ï¬lter, our results show a signiï¬cant improvement in equalization performance over the spatial averaging methods. This simultaneous equalization is achieved by suppressing the peaks in the room magnitude spectrums. Applications of this method thus include equalization and multiple point sound control at home and in automobiles.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marsella, Stacy C.; Gratch, Jonathan; Rickel, Jeff
The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans Proceedings Article
In: Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{marsella_effect_2001,
title = {The Effect of Affect: Modeling the Impact of Emotional State on the Behavior of Interactive Virtual Humans},
author = {Stacy C. Marsella and Jonathan Gratch and Jeff Rickel},
url = {http://ict.usc.edu/pubs/The%20Effect%20of%20Affect-%20Modeling%20the%20Impact%20of%20Emotional%20State%20on%20the%20Behavior%20of%20Interactive%20Virtual%20Humans.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Workshop on Representing, Annotating, and Evaluating Non-Verbal and Verbal Communicative Acts to Achieve Contextual Embodied Agents},
address = {Montreal, Canada},
abstract = {A person's behavior provides signiï¬cant information about their emotional state, attitudes, and attention. Our goal is to create virtual humans that convey such information to people while interacting with them in virtual worlds. The virtual humans must respond dynamically to the events surrounding them, which are fundamentally influenced by users' actions, while providing an illusion of human-like behavior. A user must be able to interpret the dynamic cognitive and emotional state of the virtual humans using the same nonverbal cues that people use to understand one another. Towards these goals, we are integrating and extending components from three prior systems: a virtual human architecture with a range of cognitive and motor capabilities, a model of emotional appraisal, and a model of the impact of emotional state on physical behavior. We describe the key research issues, our approach, and an initial implementation in an Army peacekeeping scenario.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Douglas, Jay; Gratch, Jonathan
Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{douglas_adaptive_2001,
title = {Adaptive Narrative: How Autonomous Agents, Hollywood, and Multiprocessing Operating Systems Can Live Happily Ever After},
author = {Jay Douglas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Adaptive%20Narrative-%20How%20Autonomous%20Agents,%20Hollywood,%20and%20Multiprocessing%20Operating%20Systems%20Can%20Live%20Happily%20Ever%20After.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {Creating dramatic narratives for real-time virtual reality environments is complicated by the lack of temporal distance between the occurrence of an event and its telling in the narrative. This paper describes the application of a multiprocessing operating system architecture to the creation of adaptive narratives, narratives that use autonomous actors or agents to create real-time dramatic experiences for human interactors. We also introduce the notion of dramatic acts and dramatic functions and indicate their use in constructing this real-time drama.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Cohen, Jonathan; Tchou, Chris; Hawkins, Tim; Debevec, Paul
Real-Time High-Dynamic Range Texture Mapping Proceedings Article
In: Eurographics Rendering Workshop, 2001.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{cohen_real-time_2001,
title = {Real-Time High-Dynamic Range Texture Mapping},
author = {Jonathan Cohen and Chris Tchou and Tim Hawkins and Paul Debevec},
url = {http://ict.usc.edu/pubs/Real-Time%20High-Dynamic%20Range%20Texture%20Mapping.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Eurographics Rendering Workshop},
abstract = {This paper presents a technique for representing and displaying high dynamic-range texture maps (HDRTMs) using current graphics hardware. Dynamic range in real-world environments often far exceeds the range representable in 8-bit per-channel texture maps. The increased realism afforded by a high-dynamic range representation provides improved fidelity and expressiveness for interactive visualization of image-based models. Our technique allows for real-time rendering of scenes with arbitrary dynamic range, limited only by available texture memory. In our technique, high-dynamic range textures are decomposed into sets of 8- bit textures. These 8-bit textures are dynamically reassembled by the graphics hardware's programmable multitexturing system or using multipass techniques and framebuffer image processing. These operations allow the exposure level of the texture to be adjusted continuously and arbitrarily at the time of rendering, correctly accounting for the gamma curve and dynamic range restrictions of the display device. Further, for any given exposure only two 8-bit textures must be resident in texture memory simultaneously. We present implementation details of this technique on various 3D graphics hardware architectures. We demonstrate several applications, including high-dynamic range panoramic viewing with simulated auto-exposure, real-time radiance environment mapping, and simulated Fresnel reflection.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Swartout, William; Hill, Randall W.; Gratch, Jonathan; Johnson, W. Lewis; Kyriakakis, Chris; Labore, Catherine; Lindheim, Richard; Marsella, Stacy C.; Miraglia, D.; Moore, Bridget; Morie, Jacquelyn; Rickel, Jeff; Thiebaux, Marcus; Tuch, L.; Whitney, Richard; Douglas, Jay
Toward the Holodeck: Integrating Graphics, Sound, Character and Story Proceedings Article
In: Proceedings of the 5th International Conference on Autonomous Agents, Montreal, Canada, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans, Virtual Worlds
@inproceedings{swartout_toward_2001,
title = {Toward the Holodeck: Integrating Graphics, Sound, Character and Story},
author = {William Swartout and Randall W. Hill and Jonathan Gratch and W. Lewis Johnson and Chris Kyriakakis and Catherine Labore and Richard Lindheim and Stacy C. Marsella and D. Miraglia and Bridget Moore and Jacquelyn Morie and Jeff Rickel and Marcus Thiebaux and L. Tuch and Richard Whitney and Jay Douglas},
url = {http://ict.usc.edu/pubs/Toward%20the%20Holodeck-%20Integrating%20Graphics,%20Sound,%20Character%20and%20Story.pdf},
year = {2001},
date = {2001-06-01},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
address = {Montreal, Canada},
abstract = {We describe an initial prototype of a holodeck-like environment that we have created for the Mission Rehearsal Exercise Project. The goal of the project is to create an experience learning system where the participants are immersed in an environment where they can encounter the sights, sounds, and circumstances of realworld scenarios. Virtual humans act as characters and coaches in an interactive story with pedagogical goals.},
keywords = {Social Simulation, Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.
Modeling Emotions in the Mission Rehearsal Exercise Proceedings Article
In: Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation, pp. 457–466, Orlando, FL, 2001.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{gratch_modeling_2001,
title = {Modeling Emotions in the Mission Rehearsal Exercise},
author = {Jonathan Gratch and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Modeling%20Emotions%20in%20the%20Mission%20Rehearsal%20Exercise.pdf},
year = {2001},
date = {2001-05-01},
booktitle = {Proceedings of the 10th Conference on Computer Generated Forces and Behavioral Representation},
pages = {457–466},
address = {Orlando, FL},
abstract = {This paper discusses our attempts to model realistic human behavior in the context of the Mission Rehearsal Exercise system (MRE), a high-end virtual training environment designed to support dismounted infantry training between a human participant and elements of his command. The system combines immersive graphics, sound, and interactive characters controlled by artificial intelligence programs. Our goal in this paper is to show how some of the daunting subtlety in human behavior can be modeled by intelligent agents and in particular to focus on the role of modeling typical human emotional responses to environmental stimuli.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ligorio, M. Beatrice; Mininni, Giuseppe; Traum, David
Interlocution Scenarios for Problem Solving in an Educational MUD Environment Proceedings Article
In: 1st European Conference on Computer-Supported Collaborative Learning, 2001.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{ligorio_interlocution_2001,
title = {Interlocution Scenarios for Problem Solving in an Educational MUD Environment},
author = {M. Beatrice Ligorio and Giuseppe Mininni and David Traum},
url = {http://ict.usc.edu/pubs/INTERLOCUTION%20SCENARIOS%20FOR%20PROBLEM%20SOLVING%20IN%20AN%20EDUCATIONAL%20MUD%20ENVIRONMENT.pdf},
year = {2001},
date = {2001-03-01},
booktitle = {1st European Conference on Computer-Supported Collaborative Learning},
abstract = {This paper presents an analysis of computer mediated collaboration on a problem-solving task in a virtual world. The theoretical framework of this research combines research in Computer Mediated Communication with a social psychology theory of conflict. An experiment was conducted involving universitybstudents performing a problem solving task with a peer in an Educational MUD. Each performance was guided by a predefined script, designed based on the 'common speech' concepts. Al the performances were analyzed in terms of identity perception, conflict perception and cooperation. By looking at the relationship among the CMC environment features, the social influence activated on this environment, the conflict elaboration, and the problem solving strategies, a distinctive 'interlocution scenario' emerged. The results are discussed using contributions from the two theoretical approaches embraced.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}