Publications
Search
Ustun, Volkan; Rosenbloom, Paul S.
Towards Adaptive, Interactive Virtual Humans in Sigma Proceedings Article
In: Intelligent Virtual Agents, pp. 98 –108, Springer, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0.
@inproceedings{ustun_towards_2015,
title = {Towards Adaptive, Interactive Virtual Humans in Sigma},
author = {Volkan Ustun and Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Towards%20Adaptive,%20Interactive%20Virtual%20Humans%20in%20Sigma.pdf},
doi = {10.1007/978-3-319-21996-7_10},
isbn = {978-3-319-21995-0},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {98 –108},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {Sigma is a nascent cognitive architecture/system that combines concepts from graphical models with traditional symbolic architectures. Here an initial Sigma-based virtual human (VH) is introduced that combines probabilistic reasoning, rule-based decision-making, Theory of Mind, Simultaneous Localization and Mapping and reinforcement learning in a unified manner. This non-modular unification of diverse cognitive, robotic and VH capabilities provides an important first step towards fully adaptive and interactive VHs in Sigma.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; DeVault, David; Lucas, Gale M.; Marsella, Stacy
Negotiation as a Challenge Problem for Virtual Humans Proceedings Article
In: Brinkman, Willem-Paul; Broekens, Joost; Heylen, Dirk (Ed.): Intelligent Virtual Agents, pp. 201–215, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{gratch_negotiation_2015,
title = {Negotiation as a Challenge Problem for Virtual Humans},
author = {Jonathan Gratch and David DeVault and Gale M. Lucas and Stacy Marsella},
editor = {Willem-Paul Brinkman and Joost Broekens and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Negotiation%20as%20a%20Challenge%20Problem%20for%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7_21},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {201–215},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {We argue for the importance of negotiation as a challenge problem for virtual human research, and introduce a virtual conversational agent that allows people to practice a wide range of negotiation skills. We describe the multi-issue bargaining task, which has become a de facto standard for teaching and research on negotiation in both the social and computer sciences. This task is popular as it allows scientists or instructors to create a variety of distinct situations that arise in real-life negotiations, simply by manipulating a small number of mathematical parameters. We describe the development of a virtual human that will allow students to practice the interpersonal skills they need to recognize and navigate these situations. An evaluation of an early wizard-controlled version of the system demonstrates the promise of this technology for teaching negotiation and supporting scientific research on social intelligence.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Susan; Morency, Louis-Philippe; Pynadath, David; Traum, David
Exploring the Implications of Virtual Human Research for Human-Robot Teams Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 186–196, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{gratch_exploring_2015,
title = {Exploring the Implications of Virtual Human Research for Human-Robot Teams},
author = {Jonathan Gratch and Susan Hill and Louis-Philippe Morency and David Pynadath and David Traum},
url = {http://ict.usc.edu/pubs/Exploring%20the%20Implications%20of%20Virtual%20Human%20Research%20for%20Human-Robot%20Teams.pdf},
doi = {10.1007/978-3-319-21067-4_20},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {186–196},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
abstract = {This article briefly explores potential synergies between the fields of virtual human and human-robot interaction research. We consider challenges in advancing the effectiveness of human-robot teams makes recommendations for enhancing this by facilitating synergies between robotics and virtual human research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hoegen, Rens; Stratou, Giota; Lucas, Gale M.; Gratch, Jonathan
Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma Proceedings Article
In: Intelligent Virtual Agents, pp. 452–460, Springer International Publishing, Delft, Netherlands, 2015, ISBN: 978-3-319-21995-0 978-3-319-21996-7.
@inproceedings{hoegen_comparing_2015,
title = {Comparing Behavior Towards Humans and Virtual Humans in a Social Dilemma},
author = {Rens Hoegen and Giota Stratou and Gale M. Lucas and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Comparing%20Behavior%20Towards%20Humans%20and%20Virtual%20Humans%20in%20a%20Social%20Dilemma.pdf},
doi = {10.1007/978-3-319-21996-7 48},
isbn = {978-3-319-21995-0 978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {9238},
pages = {452–460},
publisher = {Springer International Publishing},
address = {Delft, Netherlands},
abstract = {The difference of shown social behavior towards virtual humans and real humans has been subject to much research. Many of these studies compare virtual humans (VH) that are presented as either virtual agents controlled by a computer or as avatars controlled by real humans. In this study we directly compare VHs with real humans. Participants played an economic game against a computer-controlled VH or a visible human opponent. Decisions made throughout the game were logged, additionally participants’ faces were filmed during the study and analyzed with expression recognition software. The analysis of choices showed participants are far more willing to violate social norms with VHs: they are more willing to steal and less willing to forgive. Facial expressions show trends that suggest they are treating VHs less socially. The results highlight, that even in impoverished social interactions, VHs have a long way to go before they can evoke truly human-like responses.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Unnikrishnan, K. V.; Shankar, Santosh; Merchant, Chirag
Intelligent Agents for Virtual Simulation of Human-Robot Interaction Proceedings Article
In: Virtual, Augmented and Mixed Reality, pp. 228 – 239, Springer International Publishing, Los Angeles, CA, 2015, ISBN: 978-3-319-21066-7 978-3-319-21067-4.
@inproceedings{wang_intelligent_2015,
title = {Intelligent Agents for Virtual Simulation of Human-Robot Interaction},
author = {Ning Wang and David V. Pynadath and K. V. Unnikrishnan and Santosh Shankar and Chirag Merchant},
url = {http://ict.usc.edu/pubs/Intelligent%20Agents%20for%20Virtual%20Simulation%20of%20Human-Robot%20Interaction.pdf},
doi = {10.1007/978-3-319-21067-4 24},
isbn = {978-3-319-21066-7 978-3-319-21067-4},
year = {2015},
date = {2015-08-01},
booktitle = {Virtual, Augmented and Mixed Reality},
volume = {9179},
pages = {228 – 239},
publisher = {Springer International Publishing},
address = {Los Angeles, CA},
series = {Lecture Notes in Computer Science},
abstract = {To study how robots can work better with humans as a team, we have designed an agent-based online testbed that supports virtual simulation of domain-independent human-robot interaction. The simulation is implemented as an online game where humans and virtual robots work together in simulated scenarios. This testbed allows researchers to carry out human-robot interaction studies and gain better understanding of, for example, how a robot’s communication can improve human-robot team performance by fostering better trust relationships among humans and their robot teammates. In this paper, we discuss the requirements, challenges and the design of such human-robot simulation. We illustrate its operation with an example human-robot joint reconnaissance task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Bruijnes, Merijn; Akker, Rieks; Hartholt, Arno; Heylen, Dirk
Virtual Suspect William Proceedings Article
In: Intelligent Virtual Agents, pp. 67–76, Springer, 2015.
@inproceedings{bruijnes_virtual_2015,
title = {Virtual Suspect William},
author = {Merijn Bruijnes and Rieks Akker and Arno Hartholt and Dirk Heylen},
url = {http://ict.usc.edu/pubs/Virtual%20Suspect%20William.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {67–76},
publisher = {Springer},
abstract = {We evaluate an algorithm which computes the responses of an agent that plays the role of a suspect in simulations of police interrogations. The algorithm is based on a cognitive model - the response model - that is centred around keeping track of interpersonal relations. The model is parametrized in such a way that different personalities of the virtual suspect can be defined. In the evaluation we defined three different personalities and had participants guess the personality based on the responses the model provided in an interaction with the participant. We investigate what factors contributed to the ability of a virtual agent to show behaviour that was recognized by participants as belonging to a persona.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Furbach, Ulrich; Gordon, Andrew S.; Schon, Claudia
Tackling Benchmark Problems of Commonsense Reasoning Proceedings Article
In: Proceedings of the Workshop on Bridging the Gap between Human and Automated Reasoning, pp. 47 – 59, Berlin, Germany, 2015.
@inproceedings{furbach_tackling_2015,
title = {Tackling Benchmark Problems of Commonsense Reasoning},
author = {Ulrich Furbach and Andrew S. Gordon and Claudia Schon},
url = {http://ict.usc.edu/pubs/Tackling%20Benchmark%20Problems%20of%20Commonsense%20Reasoning.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the Workshop on Bridging the Gap between Human and Automated Reasoning},
volume = {1412},
pages = {47 – 59},
address = {Berlin, Germany},
abstract = {There is increasing interest in the field of automated commonsense reasoning to find real world benchmarks to challenge and to further develop reasoning systems. One interesting example is the Triangle Choice of Plausible Alternatives (Triangle-COPA), which is a set of problems presented in first-order logic. The setting of these problems stems from the famous Heider-Simmel film used in early experiments in social psychology. This paper illustrates with two logical approaches abductive logic programming and deonitc logictextbackslashtextbackslashtextbarhow these problems can be solved. Furthermore, we propose an idea of how to use background knowledge to support the reasoning process.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham
Skin Stretch - Simulating Dynamic Skin Microgeometry Proceedings Article
In: ACM SIGGRAPH 2015 Computer Animation Festival, pp. 133, Los Angeles, CA, 2015.
@inproceedings{nagano_skin_2015-1,
title = {Skin Stretch - Simulating Dynamic Skin Microgeometry},
author = {Koki Nagano and Graham Fyffe},
url = {http://ict.usc.edu/pubs/Skin%20Stretch%20-%20Simulating%20Dynamic%20Skin%20Microgeometry.pdf},
doi = {10.1145/2766894},
year = {2015},
date = {2015-08-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
volume = {34},
number = {4},
pages = {133},
address = {Los Angeles, CA},
abstract = {This demonstration of the effects of skin microstructure deformation on high-resolution dynamic facial rendering features the state-of-the-art skin in microstructure simulation, facial scanning, and rendering. Facial animations made with the technique show more realistic and expressive skin under facial expression.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graham, Paul; Fyffe, Graham; Tonwattanapong, Borom; Ghosh, Abhijeet; Debevec, Paul
Near-Instant Capture of High-Resolution Facial Geometry and Reflectance Proceedings Article
In: Proceedings of ACM SIGGRAPH 2015 Talks, pp. 1–1, ACM Press, 2015, ISBN: 978-1-4503-3636-9.
@inproceedings{graham_near-instant_2015,
title = {Near-Instant Capture of High-Resolution Facial Geometry and Reflectance},
author = {Paul Graham and Graham Fyffe and Borom Tonwattanapong and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Near-Instant%20Capture%20of%20High-Resolution%20Facial%20Geometry%20and%20Reflectance.pdf},
doi = {10.1145/2775280.2792561},
isbn = {978-1-4503-3636-9},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of ACM SIGGRAPH 2015 Talks},
pages = {1–1},
publisher = {ACM Press},
abstract = {Modeling realistic human characters is frequently done using 3D recordings of the shape and appearance of real people, often across a set of different facial expressions to build blendshape facial models. Believable characters that cross the "Uncanny Valley" require high-quality geometry, texture maps, reflectance properties, and surface detail at the level of skin pores and fine wrinkles. Unfortunately, there has not yet been a technique for recording such datasets that is near-instantaneous and low-cost. While some facial capture techniques are instantaneous and inexpensive [Beeler et al. 2010], these do not generally provide lighting-independent texture maps, specular reflectance information, or high-resolution surface normal detail for relighting. In contrast, techniques which use multiple photographs from spherical lighting setups [Ghosh et al. 2011] do capture such reflectance properties, at the expense of longer capture times and complicated custom equipment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; Paetzel, Maike; DeVault, David
Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection Proceedings Article
In: Proceedings of SEMDIAL 2015 goDIAL, pp. 113 – 121, Gothenburg, Sweden, 2015.
@inproceedings{manuvinakurike_reducing_2015,
title = {Reducing the Cost of Dialogue System Training and Evaluation with Online, Crowd-Sourced Dialogue Data Collection},
author = {Ramesh Manuvinakurike and Maike Paetzel and David DeVault},
url = {http://ict.usc.edu/pubs/Reducing%20the%20Cost%20of%20Dialogue%20System%20Training%20and%20Evaluation%20with%20Online,%20Crowd-Sourced%20Dialogue%20Data%20Collection.pdf},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of SEMDIAL 2015 goDIAL},
pages = {113 – 121},
address = {Gothenburg, Sweden},
abstract = {This paper presents and analyzes an approach to crowd-sourced spoken dialogue data collection. Our approach enables low cost collection of browser-based spoken dialogue interactions between two remote human participants (human-human condition) as well as one remote human participant and an automated dialogue system (human-agent condition). We present a case study in which 200 remote participants were recruited to participate in a fast-paced image matching game, and which included both human-human and human-agent conditions. We discuss several technical challenges encountered in achieving this crowd-sourced data collection, and analyze the costs in time and money of carrying out the study. Our results suggest the potential of crowdsourced spoken dialogue data to lower costs and facilitate a range of research in dialogue modeling, dialogue system design, and system evaluation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; Cheng, Lin; Marsella, Stacy
When the going gets tough: Grit predicts costly perseverance Journal Article
In: Journal of Research in Personality, vol. 59, pp. 15–22, 2015, ISSN: 00926566.
@article{lucas_when_2015,
title = {When the going gets tough: Grit predicts costly perseverance},
author = {Gale M. Lucas and Jonathan Gratch and Lin Cheng and Stacy Marsella},
url = {http://ict.usc.edu/pubs/When%20the%20going%20gets%20tough-Grit%20predicts%20costly%20perseverance.pdf},
doi = {10.1016/j.jrp.2015.08.004},
issn = {00926566},
year = {2015},
date = {2015-08-01},
journal = {Journal of Research in Personality},
volume = {59},
pages = {15–22},
abstract = {In this research, we investigate how grittier individuals might incur some costs by persisting when they could move on. Grittier participants were found to be less willing to give up when failing even though they were likely to incur a cost for their persistence. First, grittier participants are more willing to risk failing to complete a task by persisting on individual items. Second, when they are losing, they expend more effort and persist longer in a game rather than quit. Gritty participants have more positive emotions and expectations toward the task, which mediates the relationship between grit and staying to persist when they are losing. Results show gritty individuals are more willing to risk suffering monetary loss to persist.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kang, Sin-Hwa; Feng, Andrew; Leuski, Anton; Casas, Dan; Shapiro, Ari
Smart Mobile Virtual Humans: “Chat with Me!” Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 475–478, Springer, Delft, Netherlands, 2015.
@inproceedings{kang_smart_2015,
title = {Smart Mobile Virtual Humans: “Chat with Me!”},
author = {Sin-Hwa Kang and Andrew Feng and Anton Leuski and Dan Casas and Ari Shapiro},
url = {http://ict.usc.edu/pubs/Smart%20Mobile%20Virtual%20Humans%20-%20Chat%20with%20Me.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {475–478},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {In this study, we are interested in exploring whether people would talk with 3D animated virtual humans using a smartphone for a longer amount of time as a sign of feeling rapport [5], compared to non-animated or audio-only characters in everyday life. Based on previous studies [2, 7, 10], users prefer animated characters in emotionally engaged interactions when the characters were displayed on mobile devices, yet in a lab setting. We aimed to reach a broad range of users outside of the lab in natural settings to investigate the potential of our virtual human on smartphones to facilitate casual, yet emotionally engaging conversation. We also found that the literature has not reached a consensus regarding the ideal gaze patterns for a virtual human, one thing researchers agree on is that inappropriate gaze could negatively impact conversations at times, even worse than receiving no visual feedback at all [1, 4]. Everyday life may bring the experience of awkwardness or uncomfortable sentiments in reaction to continuous mutual gaze. On the other hand, gaze aversion could also make a speaker think their partner is not listening. Our work further aims to address this question of what constitutes appropriate eye gaze in emotionally engaged interactions. We developed a 3D animated and chat-based virtual human which presented emotionally expressive nonverbal behaviors such as facial expressions, head gestures, gaze, and other upper body movements (see Figure 1). The virtual human displayed appropriate gaze that was either consisted of constant mutual gaze or gaze aversion based on a statistical model of saccadic eye movement [8] while listening. Both gaze patterns were accompanied by other forms of appropriate nonverbal feedback. To explore the question of optimal communicative medium, we distributed our virtual human application to users via an app store for Android-powered phones (i.e. Google Play Store) in order to target users who owned a smartphone and could use our application in various natural settings.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Leuski, Anton; Marsella, Stacy; Casas, Dan; Kang, Sin-Hwa; Shapiro, Ari
A Platform for Building Mobile Virtual Humans Proceedings Article
In: Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA), pp. 310–319, Springer, Delft, Netherlands, 2015.
@inproceedings{feng_platform_2015,
title = {A Platform for Building Mobile Virtual Humans},
author = {Andrew Feng and Anton Leuski and Stacy Marsella and Dan Casas and Sin-Hwa Kang and Ari Shapiro},
url = {http://ict.usc.edu/pubs/A%20Platform%20for%20Building%20Mobile%20Virtual%20Humans.pdf},
doi = {10.1007/978-3-319-21996-7},
year = {2015},
date = {2015-08-01},
booktitle = {Proceedings of the 15th International Conference on Intelligent Virtual Agents (IVA)},
pages = {310--319},
publisher = {Springer},
address = {Delft, Netherlands},
abstract = {We describe an authoring framework for developing virtual humans on mobile applications. The framework abstracts many elements needed for virtual human generation and interaction, such as the rapid development of nonverbal behavior, lip syncing to speech, dialogue management, access to speech transcription services, and access to mobile sensors such as the microphone, gyroscope and location components.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Bolas, Mark; Debevec, Paul
An Automultiscopic Projector Array for Interactive Digital Humans Proceedings Article
In: SIGGRAPH 2015, pp. 1–1, ACM Press, Los Angeles, CA, 2015, ISBN: 978-1-4503-3635-2.
@inproceedings{jones_automultiscopic_2015,
title = {An Automultiscopic Projector Array for Interactive Digital Humans},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Mark Bolas and Paul Debevec},
url = {http://ict.usc.edu/pubs/An%20Automultiscopic%20Projector%20Array%20for%20Interactive%20Digital%20Humans.pdf},
doi = {10.1145/2782782.2792494},
isbn = {978-1-4503-3635-2},
year = {2015},
date = {2015-08-01},
booktitle = {SIGGRAPH 2015},
pages = {1–1},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear. Our display uses a dense array of video projectors to generate many images with high-angular density over a wide-field of view. As each user moves around the display, their eyes smoothly transition from one view to the next. The display is ideal for displaying life-size human subjects as it allows for natural personal interactions with 3D cues such as eye gaze and spatial hand gestures. In this installation, we will explore ”time-offset” interactions with recorded 3D human subjects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Gratch, Jonathan; Ustun, Volkan
Towards Emotion in Sigma: From Appraisal to Attention Proceedings Article
In: Proceedings of AGI 2015, pp. 142 – 151, Springer International Publishing, Berlin, Germany, 2015.
@inproceedings{rosenbloom_towards_2015,
title = {Towards Emotion in Sigma: From Appraisal to Attention},
author = {Paul S. Rosenbloom and Jonathan Gratch and Volkan Ustun},
url = {http://ict.usc.edu/pubs/Towards%20Emotion%20in%20Sigma%20-%20From%20Appraisal%20to%20Attention.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of AGI 2015},
volume = {9205},
pages = {142 – 151},
publisher = {Springer International Publishing},
address = {Berlin, Germany},
abstract = {A first step is taken towards incorporating emotional processing into Sigma, a cognitive architecture that is grounded in graphical models, with the addition of appraisal variables for expectedness and desirability plus their initial implications for attention at two levels of the control hierarchy. The results leverage many of Sigma's existing capabilities but with a few key additions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Demski, Abram
Expression Graphs Unifying Factor Graphs and Sum-Product Networks Proceedings Article
In: Artificial General Intelligence, pp. 241–250, Springer, Berlin, Germany, 2015.
@inproceedings{demski_expression_2015,
title = {Expression Graphs Unifying Factor Graphs and Sum-Product Networks},
author = {Abram Demski},
url = {http://ict.usc.edu/pubs/Expression%20Graphs%20Unifying%20Factor%20Graphs%20and%20Sum-Product%20Networks.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Artificial General Intelligence},
pages = {241–250},
publisher = {Springer},
address = {Berlin, Germany},
abstract = {Factor graphs are a very general knowledge representation, subsuming many existing formalisms in AI. Sum-product networks are a more recent representation, inspired by studying cases where factor graphs are tractable. Factor graphs emphasize expressive power, while sum-product networks restrict expressiveness to get strong guarantees on speed of inference. A sum-product network is not simply a restricted factor graph, however. Although the inference algorithms for the two structures are very similar, translating a sum-product network into factor graph representation can result in an exponential slowdown. We propose a formalism which generalizes factor graphs and sum-product networks, such that inference is fast in cases whose structure is close to a sum-product network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Fyffe, Graham; Alexander, Oleg; Barbiç, Jernej; Li, Hao; Ghosh, Abhijeet; Debevec, Paul
Skin Microstructure Deformation with Displacement Map Convolution Journal Article
In: ACM Transactions on Graphics, vol. 34, no. 4, pp. 1–10, 2015, ISSN: 07300301.
@article{nagano_skin_2015,
title = {Skin Microstructure Deformation with Displacement Map Convolution},
author = {Koki Nagano and Graham Fyffe and Oleg Alexander and Jernej Barbiç and Hao Li and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Skin%20Microstructure%20Deformation%20with%20Displacement%20Map%20Convolution.pdf},
doi = {10.1145/2766894},
issn = {07300301},
year = {2015},
date = {2015-07-01},
booktitle = {ACM SIGGRAPH 2015 Computer Animation Festival},
journal = {ACM Transactions on Graphics},
volume = {34},
number = {4},
pages = {1–10},
address = {Los Angeles, CA},
abstract = {We present a technique for synthesizing the effects of skin microstructure deformation by anisotropically convolving a high-resolution displacement map to match normal distribution changes in measured skin samples. We use a 10-micron resolution scanning technique to measure several in vivo skin samples as they are stretched and compressed in different directions, quantifying how stretching smooths the skin and compression makes it rougher. We tabulate the resulting surface normal distributions, and show that convolving a neutral skin microstructure displacement map with blurring and sharpening filters can mimic normal distribution changes and microstructure deformations. We implement the spatially-varying displacement map filtering on the GPU to interactively render the effects of dynamic microgeometry on animated faces obtained from high-resolution facial scans.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nouri, Elnaz; Traum, David
Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game Proceedings Article
In: Proceeding of AHFE 2015, Las Vegas, NV, 2015.
@inproceedings{nouri_cross_2015,
title = {Cross cultural report of values and decisions in the multi round ultimatum game and the centipede game},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Cross%20cultural%20report%20of%20values%20and%20decisions%20in%20the%20multi%20round%20ultimatum%20game%20and%20the%20centipede%20game.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceeding of AHFE 2015},
address = {Las Vegas, NV},
abstract = {This paper investigates the cultural differences in decision making behavior of people from the US and India. We study players from these cultures playing the Multi Round Ultimatum Game and the Centipede Game online. In order to study how people from different cultures evaluate decisions we use criteria from the Multi Attribute Relational Values (MARV) survey. Our results confirm the existence of cultural differences in how people from US and India make decisions in the Ultimatum and Centipede games. We also observe differences in responses to survey questions implying differences in the amount of importance that the two cultures assign to the MARV decision making criteria.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jon; Lucas, Gale; Malandrakis, Nikolaos; Szablowski, Evan; Fessler, Eli
To tweet or not to tweet: The question of emotion and excitement about sporting events Proceedings Article
In: Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion, Geneva, Switzerland, 2015.
@inproceedings{gratch_tweet_2015,
title = {To tweet or not to tweet: The question of emotion and excitement about sporting events},
author = {Jon Gratch and Gale Lucas and Nikolaos Malandrakis and Evan Szablowski and Eli Fessler},
url = {http://ict.usc.edu/pubs/To%20tweet%20or%20not%20to%20tweet%20-The%20question%20of%20emotion%20and%20excitement%20about%20sporting%20events.pdf},
year = {2015},
date = {2015-07-01},
booktitle = {Proceedings of the Bi-Annual Conference of the International Society for Research on Emotion},
address = {Geneva, Switzerland},
abstract = {Sporting events can serve as laboratories to explore emotion and computational tools provide new ways to examine emotional processes “in the wild”. Moreover, emotional processes are assumed -but untested- in sports economics. For example, according to the well-studied uncertainty of outcome hypothesis (UOH), “close” games are more exciting and therefore better attended. If one team were certain to win, it would take away a major source of excitement, reducing positive affect, and therefore decreasing attendance. The role of emotion here is assumed but has not been tested; furthermore, the measures used (ticket sales, attendance, TV-viewership) do not allow for such a test because they are devoid of emotional content. To address this problem, we use tweets per minute (specifically, tweets posted during 2014 World Cup with official game hashtags). Sentiment analysis of these tweets can give interesting insights into what emotional processes are involved. Another benefit of tweets is that they are dynamic, and novel results from dynamic analyses (of TV-viewership) suggest that the UOH effect can actually reverse as games unfold (people switch channels away from close games). We therefore also reconsider the UOH, specifically, extending it by both examining sentiment and dynamic changes during the game. To consider such changes, we focus on games that could have been close (high in uncertainty), but ended up being lower in uncertainty. We operationalize such unexpected certainty of outcome as the extent to which games are predicted to be “close” (based on betting odds), but ended up with a bigger difference between the teams’ scores than was expected. Statistical analyses revealed that, contrary to the UOH, games with a bigger difference in score between teams than expected had higher tweets per minute. We also performed sentiment analysis, categorizing each tweet as positive, negative or neutral, and found that games with higher tweets per minute also have a higher percentage of negative tweets. Furthermore, games that have a bigger difference than expected have a higher percentage of negative tweets (compared to games closer to what is expected). This analysis seems to suggest that, contrary to assumptions in sports economics, excitement relates to expressions of negative emotion (and not positive emotion). The results are discussed in terms of innovations in methodology and understanding the role of emotion for “tuning in” to real world events. Further research could explore the specific mechanisms that link negative sentiment to excitement, such as worry or out-group derogation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cummins, Nicholas; Scherer, Stefan; Krajewski, Jarek; Schnieder, Sebastian; Epps, Julien; Quatieri, Thomas F.
A Review of Depression and Suicide Risk Assessment Using Speech Analysis Journal Article
In: Speech Communication, vol. 71, pp. 10 – 49, 2015, ISSN: 0167-6393.
@article{cummins_review_2015,
title = {A Review of Depression and Suicide Risk Assessment Using Speech Analysis},
author = {Nicholas Cummins and Stefan Scherer and Jarek Krajewski and Sebastian Schnieder and Julien Epps and Thomas F. Quatieri},
url = {http://www.sciencedirect.com/science/article/pii/S0167639315000369},
doi = {http://dx.doi.org/10.1016/j.specom.2015.03.004},
issn = {0167-6393},
year = {2015},
date = {2015-07-01},
journal = {Speech Communication},
volume = {71},
pages = {10 – 49},
abstract = {This paper is the first review into the automatic analysis of speech for use as an objective predictor of depression and suicidality. Both conditions are major public health concerns; depression has long been recognised as a prominent cause of disability and burden worldwide, whilst suicide is a misunderstood and complex course of death that strongly impacts the quality of life and mental health of the families and communities left behind. Despite this prevalence the diagnosis of depression and assessment of suicide risk, due to their complex clinical characterisations, are difficult tasks, nominally achieved by the categorical assessment of a set of specific symptoms. However many of the key symptoms of either condition, such as altered mood and motivation, are not physical in nature; therefore assigning a categorical score to them introduces a range of subjective biases to the diagnostic procedure. Due to these difficulties, research into finding a set of biological, physiological and behavioural markers to aid clinical assessment is gaining in popularity. This review starts by building the case for speech to be considered a key objective marker for both conditions; reviewing current diagnostic and assessment methods for depression and suicidality including key non-speech biological, physiological and behavioural markers and highlighting the expected cognitive and physiological changes associated with both conditions which affect speech production. We then review the key characteristics; size, associated clinical scores and collection paradigm, of active depressed and suicidal speech databases. The main focus of this paper is on how common paralinguistic speech characteristics are affected by depression and suicidality and the application of this information in classification and prediction systems. The paper concludes with an in-depth discussion on the key challenges – improving the generalisability through greater research collaboration and increased standardisation of data collection, and the mitigating unwanted sources of variability – that will shape the future research directions of this rapidly growing field of speech processing research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
1999
Gratch, Jonathan
Why You Should Buy an Emotional Planner Proceedings Article
In: Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures, 1999.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gratch_why_1999,
title = {Why You Should Buy an Emotional Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Why%20You%20Should%20Buy%20an%20Emotional%20Planner.pdf},
year = {1999},
date = {1999-01-01},
booktitle = {Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures},
abstract = {Computation models of emotion have begun to address the problem of how agents arrive at a given emotional state, and how that state might alter their reactions to the environment. Existing work has focused on reactive models of behavior and does not, as of yet, provide much insight on how emotion might relate to the construction and execution of complex plans. This article focuses on this later question. I present a model of how agents ap- praise the emotion significance of events that illustrates a complementary relationship between classical planning methods and models of emotion processing. By building on classical planning methods, the model clarifies prior accounts of emotional appraisal and extends these ac- counts to handle the generation and execution of com- plex multi-agent plans.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
0369
Bosnak, Robert E.; Bosnak, David E.; Rizzo, Albert
Systems and methods for ai driven generation of content attuned to a user Patent
US20240005583A1, 0369.
Abstract | Links | BibTeX | Tags:
@patent{bosnak_systems_369,
title = {Systems and methods for ai driven generation of content attuned to a user},
author = {Robert E. Bosnak and David E. Bosnak and Albert Rizzo},
url = {https://patentimages.storage.googleapis.com/2a/a6/76/2607333241cd11/US20240005583A1.pdf},
year = {0369},
date = {0369-01-01},
number = {US20240005583A1},
abstract = {Systems and methods enabling rendering an avatar attuned to a user. The systems and methods include receiving audio-visual data of user communications of a user. Using the audio-visual data, the systems and methods may determine vocal characteristics of the user, facial action units representative of facial features of the user, and speech of the user based on a speech recognition model and/or natural language understanding model. Based on the vocal characteristics, an acoustic emotion metric can be determined. Based on the speech recognition data, a speech emotion metric may be determined. Based on the facial action units, a facial emotion metric may be determined. An emotional complex signature may be determined to represent an emotional state of the user for rendering the avatar attuned to the emotional state based on a combination of the acoustic emotion metric, the speech emotion metric and the facial emotion metric.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
0000
Bosnak, David E.; Bosnak, Robert E.; Rizzo, Albert
US11798217B2, 0000.
Abstract | Links | BibTeX | Tags:
@patent{bosnak_systems_nodate,
title = {Systems and methods for automated real-time generation of an interactive avatar utilizing short-term and long-term computer memory structures},
author = {David E. Bosnak and Robert E. Bosnak and Albert Rizzo},
url = {https://patentimages.storage.googleapis.com/8f/a5/ad/3e30e0837c20ee/US11798217.pdf},
number = {US11798217B2},
abstract = {Systems and methods enabling rendering an avatar attuned to a user. The systems and methods include receiving audio-visual data of user communications of a user. Using the audio-visual data, the systems and methods may determine vocal characteristics of the user, facial action units representative of facial features of the user, and speech of the user based on a speech recognition model and/or natural language understanding model. Based on the vocal characteristics, an acoustic emotion metric can be determined. Based on the speech recognition data, a speech emotion metric may be determined. Based on the facial action units, a facial emotion metric may be determined. An emotional complex signature may be determined to represent an emotional state of the user for rendering the avatar attuned to the emotional state based on a combination of the acoustic emotion metric, the speech emotion metric and the facial emotion metric.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
Gratch, Jonathan
Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing Journal Article
In: pp. 9, 0000.
BibTeX | Tags: Emotions, Virtual Humans
@article{gratch_emotion_nodate,
title = {Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing},
author = {Jonathan Gratch},
pages = {9},
keywords = {Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate-1,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
The Interservice Industry, Training, Simulation, and Education Conference Miscellaneous
0000.
@misc{noauthor_interservice_nodate,
title = {The Interservice Industry, Training, Simulation, and Education Conference},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=97189&CID=862},
urldate = {2022-09-22},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
APA PsycNet Miscellaneous
0000.
@misc{noauthor_apa_nodate,
title = {APA PsycNet},
url = {https://psycnet.apa.org/fulltext/2022-19957-001.html},
urldate = {2022-09-13},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chen, Haiwei; Zhao, Yajie
Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting Proceedings Article
In: pp. 7591–7600, 0000.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, VGL
@inproceedings{chen_dont_nodate,
title = {Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting},
author = {Haiwei Chen and Yajie Zhao},
url = {https://openaccess.thecvf.com/content/CVPR2024/html/Chen_Dont_Look_into_the_Dark_Latent_Codes_for_Pluralistic_Image_CVPR_2024_paper.html},
pages = {7591–7600},
abstract = {We present a method for large-mask pluralistic image inpainting based on the generative framework of discrete latent codes. Our method learns latent priors discretized as tokens by only performing computations at the visible locations of the image. This is realized by a restrictive partial encoder that predicts the token label for each visible block a bidirectional transformer that infers the missing labels by only looking at these tokens and a dedicated synthesis network that couples the tokens with the partial image priors to generate coherent and pluralistic complete image even under extreme mask settings. Experiments on public benchmarks validate our design choices as the proposed method outperforms strong baselines in both visual quality and diversity metrics.},
keywords = {DTIC, Graphics, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Chen, Elizabeth
Augmenting Training Data for a Virtual Character Using GPT-3.5 Proceedings Article
In: Tyhe Florida Artificial Intelligence Research Society, 0000.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC, Natural Language
@inproceedings{artstein_augmenting_nodate,
title = {Augmenting Training Data for a Virtual Character Using GPT-3.5},
author = {Ron Artstein and Elizabeth Chen},
url = {https://journals.flvc.org/FLAIRS/article/view/135552},
volume = {37},
publisher = {Tyhe Florida Artificial Intelligence Research Society},
abstract = {This paper compares different methods of using a large lan-guage model (GPT-3.5) for creating synthetic training datafor a retrieval-based conversational character. The trainingdata are in the form of linked questions and answers, whichallow a classifier to retrieve a pre-recorded answer to an un-seen question; the intuition is that a large language modelcould predict what human users might ask, thus saving theeffort of collecting real user questions as training data. Re-sults show small improvements in test performance for allsynthetic datasets. However, a classifier trained on only smallamounts of collected user data resulted in a higher F-scorethan the classifiers trained on much larger amounts of syn-thetic data generated using GPT-3.5. Based on these results,we see a potential in using large language models for gener-ating training data, but at this point it is not as valuable ascollecting actual user data for training.},
keywords = {Dialogue, DTIC, Natural Language},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Rizzo, Albert A; Hartholt, Arno
Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application Proceedings Article
In: 0000.
Abstract | BibTeX | Tags: Virtual Humans, VR
@inproceedings{mozgai_persuasive_nodate,
title = {Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application},
author = {Sharon Mozgai and Albert A Rizzo and Arno Hartholt},
abstract = {We are demoing Battle Buddy, an mHealth application designed to support access to physical and mental wellness content as well as safety planning for U.S. military veterans. This virtual human interface will collect multimodal data through passive sensors native to popular wearables (e.g., Apple Watch) and deliver adaptive multimedia content specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Battle Buddy can deliver health interventions matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). All interactions were specifically designed to engage and motivate by employing the persuasive strategies of (1) personalization, (2) self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise.},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}