Publications
Search
Mozgai, Sharon; Hartholt, Arno; Rizzo, Albert “Skip”
Systematic Representative Design and Clinical Virtual Reality Journal Article
In: Psychological Inquiry, vol. 30, no. 4, pp. 231–245, 2019, ISSN: 1047-840X, 1532-7965.
@article{mozgai_systematic_2019,
title = {Systematic Representative Design and Clinical Virtual Reality},
author = {Sharon Mozgai and Arno Hartholt and Albert “Skip” Rizzo},
url = {https://www.tandfonline.com/doi/full/10.1080/1047840X.2019.1693873},
doi = {10.1080/1047840X.2019.1693873},
issn = {1047-840X, 1532-7965},
year = {2019},
date = {2019-10-01},
journal = {Psychological Inquiry},
volume = {30},
number = {4},
pages = {231–245},
abstract = {The authors of the article, “Causal Inference in Generalizable Environments: Systematic Representative Design”, boldly announce their core point in the opening line of the abstract stating that, “Causal inference and generalizability both matter.” While a surface glance might suggest this to be a simple notion, a closer examination reveals the complexity of what they are proposing. This complexity is apparent when one considers that the bulk of human experimental research has always been challenged in its inability to concurrently deliver on both of these aims. This is no slight on the tens of 1000’s of human researchers and behavioral scientists who have devoted long careers to highly controlled human psychological and social science laboratory research. Rather, it reflects the sheer enormity of the challenges for conducting human studies designed to specify human function with physics-informed lab methods, while at the same time producing results that lead to enhanced understanding and prediction of how people will operate in the complex and ever-changing contexts that make up everyday life. At the core of this issue is a methodological and philosophical challenge that is relevant to all areas of human subjects’ research, beyond the social science focus of the Miller et al. (this issue) article. It is our aim to discuss the central topics in their article through the lens of our own work using Virtual/Augmented Reality and Virtual Human simulation technologies for clinical and training applications},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tavabi, Leili; Stefanov, Kalin; Gilani, Setareh Nasihati; Traum, David; Soleymani, Mohammad
Multimodal Learning for Identifying Opportunities for Empathetic Responses Proceedings Article
In: Proceedings of the 2019 International Conference on Multimodal Interaction, pp. 95–104, ACM, Suzhou China, 2019, ISBN: 978-1-4503-6860-5.
@inproceedings{tavabi_multimodal_2019,
title = {Multimodal Learning for Identifying Opportunities for Empathetic Responses},
author = {Leili Tavabi and Kalin Stefanov and Setareh Nasihati Gilani and David Traum and Mohammad Soleymani},
url = {https://dl.acm.org/doi/10.1145/3340555.3353750},
doi = {10.1145/3340555.3353750},
isbn = {978-1-4503-6860-5},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 2019 International Conference on Multimodal Interaction},
pages = {95–104},
publisher = {ACM},
address = {Suzhou China},
abstract = {Embodied interactive agents possessing emotional intelligence and empathy can create natural and engaging social interactions. Providing appropriate responses by interactive virtual agents requires the ability to perceive users’ emotional states. In this paper, we study and analyze behavioral cues that indicate an opportunity to provide an empathetic response. Emotional tone in language in addition to facial expressions are strong indicators of dramatic sentiment in conversation that warrant an empathetic response. To automatically recognize such instances, we develop a multimodal deep neural network for identifying opportunities when the agent should express positive or negative empathetic responses. We train and evaluate our model using audio, video and language from human-agent interactions in a wizard-of-Oz setting, using the wizard’s empathetic responses and annotations collected on Amazon Mechanical Turk as ground-truth labels. Our model outperforms a textbased baseline achieving F1-score of 0.71 on a three-class classification. We further investigate the results and evaluate the capability of such a model to be deployed for real-world human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ringeval, Fabien; Messner, Eva-Maria; Song, Siyang; Liu, Shuo; Zhao, Ziping; Mallol-Ragolta, Adria; Ren, Zhao; Soleymani, Mohammad; Pantic, Maja; Schuller, Björn; Valstar, Michel; Cummins, Nicholas; Cowie, Roddy; Tavabi, Leili; Schmitt, Maximilian; Alisamir, Sina; Amiriparian, Shahin
AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition Proceedings Article
In: Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19, pp. 3–12, ACM Press, Nice, France, 2019, ISBN: 978-1-4503-6913-8.
@inproceedings{ringeval_avec_2019,
title = {AVEC 2019 Workshop and Challenge: State-of-Mind, Detecting Depression with AI, and Cross-Cultural Affect Recognition},
author = {Fabien Ringeval and Eva-Maria Messner and Siyang Song and Shuo Liu and Ziping Zhao and Adria Mallol-Ragolta and Zhao Ren and Mohammad Soleymani and Maja Pantic and Björn Schuller and Michel Valstar and Nicholas Cummins and Roddy Cowie and Leili Tavabi and Maximilian Schmitt and Sina Alisamir and Shahin Amiriparian},
url = {http://dl.acm.org/citation.cfm?doid=3347320.3357688},
doi = {10.1145/3347320.3357688},
isbn = {978-1-4503-6913-8},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 9th International on Audio/Visual Emotion Challenge and Workshop - AVEC '19},
pages = {3–12},
publisher = {ACM Press},
address = {Nice, France},
abstract = {The Audio/Visual Emotion Challenge and Workshop (AVEC 2019) 'State-of-Mind, Detecting Depression with AI, and Cross-cultural Affect Recognition' is the ninth competition event aimed at the comparison of multimedia processing and machine learning methods for automatic audiovisual health and emotion analysis, with all participants competing strictly under the same conditions. The goal of the Challenge is to provide a common benchmark test set for multimodal information processing and to bring together the health and emotion recognition communities, as well as the audiovisual processing communities, to compare the relative merits of various approaches to health and emotion recognition from real-life data. This paper presents the major novelties introduced this year, the challenge guidelines, the data used, and the performance of the baseline systems on the three proposed tasks: state-of-mind recognition, depression assessment with AI, and cross-cultural affect sensing, respectively.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Wang, Ning; Kamireddy, Sreekar
A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction Proceedings Article
In: Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19, pp. 171–178, ACM Press, Kyoto, Japan, 2019, ISBN: 978-1-4503-6922-0.
@inproceedings{pynadath_markovian_2019,
title = {A Markovian Method for Predicting Trust Behavior in Human-Agent Interaction},
author = {David V. Pynadath and Ning Wang and Sreekar Kamireddy},
url = {http://dl.acm.org/citation.cfm?doid=3349537.3351905},
doi = {10.1145/3349537.3351905},
isbn = {978-1-4503-6922-0},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 7th International Conference on Human-Agent Interaction - HAI '19},
pages = {171–178},
publisher = {ACM Press},
address = {Kyoto, Japan},
abstract = {Trust calibration is critical to the success of human-agent interaction (HAI). However, individual differences are ubiquitous in people’s trust relationships with autonomous systems. To assist its heterogeneous human teammates calibrate their trust in it, an agent must first dynamically model them as individuals, rather than communicating with them all in the same manner. It can then generate expectations of its teammates’ behavior and optimize its own communication based on the current state of the trust relationship it has with them. In this work, we examine how an agent can generate accurate expectations given observations of only the teammate’s trust-related behaviors (e.g., did the person follow or ignore its advice?). In addition to this limited input, we also seek a specific output: accurately predicting its human teammate’s future trust behavior (e.g., will the person follow or ignore my next suggestion?). In this investigation, we construct a model capable of generating such expectations using data gathered in a humansubject study of behavior in a simulated human-robot interaction (HRI) scenario. We first analyze the ability of measures from a presurvey on trust-related traits to accurately predict subsequent trust behaviors. However, as the interaction progresses, this effect is dwarfed by the direct experience. We therefore analyze the ability of sequences of prior behavior by the teammate to accurately predict subsequent trust behaviors. Such behavioral sequences have shown to be indicative of the subjective beliefs of other teammates, and we show here that they have a predictive power as well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xing, Jun; Nagano, Koki; Chen, Weikai; Xu, Haotian; Wei, Li-yi; Zhao, Yajie; Lu, Jingwan; Kim, Byungmoon; Li, Hao
HairBrush for Immersive Data-Driven Hair Modeling Proceedings Article
In: Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19, pp. 263–279, ACM Press, New Orleans, LA, USA, 2019, ISBN: 978-1-4503-6816-2.
@inproceedings{xing_hairbrush_2019,
title = {HairBrush for Immersive Data-Driven Hair Modeling},
author = {Jun Xing and Koki Nagano and Weikai Chen and Haotian Xu and Li-yi Wei and Yajie Zhao and Jingwan Lu and Byungmoon Kim and Hao Li},
url = {http://dl.acm.org/citation.cfm?doid=3332165.3347876},
doi = {10.1145/3332165.3347876},
isbn = {978-1-4503-6816-2},
year = {2019},
date = {2019-10-01},
booktitle = {Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology - UIST '19},
pages = {263–279},
publisher = {ACM Press},
address = {New Orleans, LA, USA},
abstract = {While hair is an essential component of virtual humans, it is also one of the most challenging digital assets to create. Existing automatic techniques lack the generality and flexibility to create rich hair variations, while manual authoring interfaces often require considerable artistic skills and efforts, especially for intricate 3D hair structures that can be difficult to navigate. We propose an interactive hair modeling system that can help create complex hairstyles in minutes or hours that would otherwise take much longer with existing tools. Modelers, including novice users, can focus on the overall hairstyles and local hair deformations, as our system intelligently suggests the desired hair parts. Our method combines the flexibility of manual authoring and the convenience of data-driven automation. Since hair contains intricate 3D structures such as buns, knots, and strands, they are inherently challenging to create using traditional 2D interfaces. Our system provides a new 3D hair authoring interface for immersive interaction in virtual reality (VR). Users can draw high-level guide strips, from which our system predicts the most plausible hairstyles via a deep neural network trained from a professionally curated dataset. Each hairstyle in our dataset is composed of multiple variations, serving as blend-shapes to fit the user drawings via global blending and local deformation. The fitted hair models are visualized as interactive suggestions that the user can select, modify, or ignore. We conducted a user study to confirm that our system can significantly reduce manual labor while improve the output quality for modeling a variety of head and facial hairstyles that are challenging to create via existing techniques.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Carla; Yanov, Volodymyr; Traum, David; Georgila, Kallirroi
A Wizard of Oz Data Collection Framework for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts, pp. 3, SEMDIAL, London, UK, 2019.
@inproceedings{gordon_wizard_2019,
title = {A Wizard of Oz Data Collection Framework for Internet of Things Dialogues},
author = {Carla Gordon and Volodymyr Yanov and David Traum and Kallirroi Georgila},
url = {http://semdial.org/anthology/papers/Z/Z19/Z19-4024/},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 23rd Workshop on the Semantics and Pragmatics of Dialogue - Poster Abstracts},
pages = {3},
publisher = {SEMDIAL},
address = {London, UK},
abstract = {We describe a novel Wizard of Oz dialogue data collection framework in the Internet of Things domain. Our tool is designed for collecting dialogues between a human user, and 8 different system profiles, each with a different communication strategy. We then describe the data collection conducted with this tool, as well as the dialogue corpus that was generated.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lei, Su; Gratch, Jonathan
Smiles Signal Surprise in a Social Dilemma Proceedings Article
In: Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII), IEEE, Cambridge, UK, 2019.
@inproceedings{lei_smiles_2019,
title = {Smiles Signal Surprise in a Social Dilemma},
author = {Su Lei and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/xpl/conhome/8911251/proceeding},
year = {2019},
date = {2019-09-01},
booktitle = {Proceedings of the 8th International Conference on Affective Computing and Intelligent Interaction (ACII)},
publisher = {IEEE},
address = {Cambridge, UK},
abstract = {This study examines spontaneous facial expressions in an iterated prisoner’s dilemma with financial stakes. Our goal was to identify typical facial expressions associated with key events during the interaction (e.g., cooperation or exploitation) and contrast these reactions with alternative theories of the meaning of facial expressions. Specifically, we examined if expressions reflect individual self-interest (e.g., winning) or social motives (e.g., promoting fairness) and the extent to which surprise might moderate the intensity of facial displays. In contrast to predictions of scientific and folk theories of expression, smiles were the only expressions consistently elicited, regardless of the reward or fairness of outcomes. Further, these smiles serve as a reliable indicator of the surprisingness of the event, but not its pleasure (contradicting research on both the meaning of smiles and indicators of surprise). To our knowledge, this is the first study to indicate that smiles signal surprise.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Rizzo, Albert; Gratch, Jonathan; Scherer, Stefan; Stratou, Giota; Boberg, Jill; Morency, Louis-Philippe
Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers Book Section
In: The Impact of Virtual and Augmented Reality on Individuals and Society, pp. 256–264, Frontiers Media SA, 2019.
@incollection{lucas_reporting_2019,
title = {Reporting Mental Health Symptoms: Breaking Down Barriers to Care with Virtual Human Interviewers},
author = {Gale M. Lucas and Albert Rizzo and Jonathan Gratch and Stefan Scherer and Giota Stratou and Jill Boberg and Louis-Philippe Morency},
url = {https://books.google.com/books?hl=en&lr=&id=N724DwAAQBAJ&oi=fnd&pg=PP1&dq=The+Impact+of+Virtual+and+Augmented+Reality+on+Individuals+and+Society&ots=ZMD1P9T-K5&sig=Qqh7iHZ4Xq2iRyYecrECHwNNE38#v=onepage&q=The%20Impact%20of%20Virtual%20and%20Augmented%20Reality%20on%20Individuals%20and%20Society&f=false},
year = {2019},
date = {2019-09-01},
booktitle = {The Impact of Virtual and Augmented Reality on Individuals and Society},
pages = {256–264},
publisher = {Frontiers Media SA},
abstract = {A common barrier to healthcare for psychiatric conditions is the stigma associated with these disorders. Perceived stigma prevents many from reporting their symptoms. Stigma is a particularly pervasive problem among military service members, preventing them from reporting symptoms of combat-related conditions like posttraumatic stress disorder (PTSD). However, research shows (increased reporting by service members when anonymous assessments are used. For example, service members report more symptoms of PTSD when they anonymously answer the Post-Deployment Health Assessment (PDHA) symptom checklist compared to the official PDHA, which is identifiable and linked to their military records. To investigate the factors that influence reporting of psychological symptoms by service members, we used a transformative technology: automated virtual humans that interview people about their symptoms. Such virtual human interviewers allow simultaneous use of two techniques for eliciting disclosure that would otherwise be incompatible; they afford anonymity while also building rapport. We examined whether virtual human interviewers could increase disclosure of mental health symptoms among active-duty service members that just returned from a year-long deployment in Afghanistan. Service members reported more symptoms during a conversation with a virtual human interviewer than on the official PDHA. They also reported more to a virtual human interviewer than on an anonymized PDHA. A second, larger sample of active-duty and former service members found a similar effect that approached statistical significance. Because respondents in both studies shared more with virtual human interviewers than an anonymized PDHA—even though both conditions control for stigma and ramifications for service members’ military records—virtual human interviewers that build rapport may provide a superior option to encourage reporting.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Slotwiner, David J.; Tarakji, Khaldoun G.; Al-Khatib, Sana M.; Passman, Rod S.; Saxon, Leslie A.; Peters, Nicholas S.; McCall, Debbe; Turakhia, Mintu P.; Schaeffer, Jill; Mendenhall, G. Stuart; Hindricks, Gerhard; Narayan, Sanjiv M.; Davenport, Elizabeth E.; Marrouche, Nassir F.
Transparent sharing of digital health data: A call to action Journal Article
In: Heart Rhythm, vol. 16, no. 9, pp. e95–e106, 2019, ISSN: 1547-5271, 1556-3871, (Publisher: Elsevier).
@article{slotwiner_transparent_2019,
title = {Transparent sharing of digital health data: A call to action},
author = {David J. Slotwiner and Khaldoun G. Tarakji and Sana M. Al-Khatib and Rod S. Passman and Leslie A. Saxon and Nicholas S. Peters and Debbe McCall and Mintu P. Turakhia and Jill Schaeffer and G. Stuart Mendenhall and Gerhard Hindricks and Sanjiv M. Narayan and Elizabeth E. Davenport and Nassir F. Marrouche},
url = {https://www.heartrhythmjournal.com/article/S1547-5271(19)30371-6/fulltext#%20},
doi = {10.1016/j.hrthm.2019.04.042},
issn = {1547-5271, 1556-3871},
year = {2019},
date = {2019-09-01},
urldate = {2023-03-31},
journal = {Heart Rhythm},
volume = {16},
number = {9},
pages = {e95–e106},
note = {Publisher: Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lycan, Bethany; Artstein, Ron
Direct and Mediated Interaction with a Holocaust Survivor Proceedings Article
In: Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems, pp. 161–167, Springer, Cham, Switzerland, 2019.
@inproceedings{lycan_direct_2019,
title = {Direct and Mediated Interaction with a Holocaust Survivor},
author = {Bethany Lycan and Ron Artstein},
url = {https://doi.org/10.1007/978-3-319-92108-2_17},
doi = {10.1007/978-3-319-92108-2_17},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the Advanced Social Interaction with Agents: 8th International Workshop on Spoken Dialog Systems},
volume = {510},
pages = {161–167},
publisher = {Springer},
address = {Cham, Switzerland},
series = {Lecture Notes in Electrical Engineering},
abstract = {The New Dimensions in Testimony dialogue system was placed in two museums under two distinct conditions: docent-led group interaction, and free interaction with visitors. Analysis of the resulting conversations shows that docent-led interactions have a lower vocabulary and a higher proportion of user utterances that directly relate to the system’s subject matter, while free interaction is more personal in nature. Under docent-led interaction the system gives a higher proportion of direct appropriate responses, but overall correct system behavior is about the same in both conditions because the free interaction condition has more instances where the correct system behavior is to avoid a direct response.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas; Rizzo, Albert “Skip”
Virtual Human Standardized Patients for Clinical Training Book Section
In: Virtual Reality for Psychological and Neurocognitive Interventions, pp. 387–405, Springer New York, New York, NY, 2019, ISBN: 978-1-4939-9480-9 978-1-4939-9482-3.
@incollection{talbot_virtual_2019-1,
title = {Virtual Human Standardized Patients for Clinical Training},
author = {Thomas Talbot and Albert “Skip” Rizzo},
url = {http://link.springer.com/10.1007/978-1-4939-9482-3_17},
doi = {10.1007/978-1-4939-9482-3_17},
isbn = {978-1-4939-9480-9 978-1-4939-9482-3},
year = {2019},
date = {2019-08-01},
booktitle = {Virtual Reality for Psychological and Neurocognitive Interventions},
pages = {387–405},
publisher = {Springer New York},
address = {New York, NY},
abstract = {Since Dr. Howard Barrows (1964) introduced the human standardized patient in 1963, there have been attempts to game a computer-based simulacrum of a patient encounter; the first being a heart attack simulation using the online PLATO system (Bitzer M, Nursing Research 15:144–150, 1966). With the now ubiquitous use of computers in medicine, interest and effort have expended in the area of Virtual Patients (VPs). There are excellent summaries in the literature (Talbot TB, International Journal of Gaming and Computer Mediated Simulations 4:1–19, 2012) that explain the different types of virtual patients along with their best case applications, strengths and limitations.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Bonial, Claire; Donatelli, Lucia; Lukin, Stephanie M.; Tratz, Stephen; Artstein, Ron; Traum, David; Voss, Clare R.
Augmenting Abstract Meaning Representation for Human-Robot Dialogue Proceedings Article
In: Proceedings of the First International Workshop on Designing Meaning Representations (DMR), pp. 199–210, Association of Computational Linguistics, Florence, Italy, 2019.
@inproceedings{bonial_augmenting_2019,
title = {Augmenting Abstract Meaning Representation for Human-Robot Dialogue},
author = {Claire Bonial and Lucia Donatelli and Stephanie M. Lukin and Stephen Tratz and Ron Artstein and David Traum and Clare R. Voss},
url = {https://www.aclweb.org/anthology/W19-3322},
year = {2019},
date = {2019-08-01},
booktitle = {Proceedings of the First International Workshop on Designing Meaning Representations (DMR)},
pages = {199–210},
publisher = {Association of Computational Linguistics},
address = {Florence, Italy},
abstract = {We detail refinements made to Abstract Meaning Representation (AMR) that make the representation more suitable for supporting a situated dialogue system, where a human remotely controls a robot for purposes of search and rescue and reconnaissance. We propose 36 augmented AMRs that capture speech acts, tense and aspect, and spatial information. This linguistic information is vital for representing important distinctions, for example whether the robot has moved, is moving, or will move. We evaluate two existing AMR parsers for their performance on dialogue data. We also outline a model for graph-to-graph conversion, in which output from AMR parsers is converted into our refined AMRs. The design scheme presentedhere,thoughtask-specific,isextendable for broad coverage of speech acts using AMR in future task-independent work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Forbus, Kenneth D.
Expanding and Repositioning Cognitive Science Journal Article
In: Topics in Cognitive Science, 2019, ISSN: 1756-8757, 1756-8765.
@article{rosenbloom_expanding_2019,
title = {Expanding and Repositioning Cognitive Science},
author = {Paul S. Rosenbloom and Kenneth D. Forbus},
url = {https://onlinelibrary.wiley.com/doi/abs/10.1111/tops.12468},
doi = {10.1111/tops.12468},
issn = {1756-8757, 1756-8765},
year = {2019},
date = {2019-08-01},
journal = {Topics in Cognitive Science},
abstract = {Cognitive science has converged in many ways with cognitive psychology, but while also maintaining a distinctive interdisciplinary nature. Here we further characterize this existing state of the field before proposing how it might be reconceptualized toward a broader and more distinct, and thus more stable, position in the realm of sciences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stocco, Andrea; Steine-Hanson, Zoe; Koh, Natalie; Laird, John E.; Lebiere, Christian J.; Rosenbloom, Paul
Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence Technical Report
Neuroscience 2019.
@techreport{stocco_analysis_2019,
title = {Analysis of the Human Connectome Data Supports the Notion of A “Common Model of Cognition” for Human and Human-Like Intelligence},
author = {Andrea Stocco and Zoe Steine-Hanson and Natalie Koh and John E. Laird and Christian J. Lebiere and Paul Rosenbloom},
url = {http://biorxiv.org/lookup/doi/10.1101/703777},
doi = {10.1101/703777},
year = {2019},
date = {2019-07-01},
pages = {38},
institution = {Neuroscience},
abstract = {The Common Model of Cognition (CMC) is a recently proposed, consensus architecture intended to capture decades of progress in cognitive science on modeling human and human-like intelligence. Because of the broad agreement around it and preliminary mappings of its components to specific brain areas, we hypothesized that the CMC could be a candidate model of the large-scale functional architecture of the human brain. To test this hypothesis, we analyzed functional MRI data from 200 participants and seven different tasks that cover the broad range of cognitive domains. The CMC components were identified with functionally homologous brain regions through canonical fMRI analysis, and their communication pathways were translated into predicted patterns of effective connectivity between regions. The resulting dynamic linear model was implemented and fitted using Dynamic Causal Modeling, and compared against four alternative brain architectures that had been previously proposed in the field of neuroscience (two hierarchical architectures and two hub-and-spoke architectures) using a Bayesian approach. The results show that, in all cases, the CMC vastly outperforms all other architectures, both within each domain and across all tasks. The results suggest that a common, general architecture that could be used for artificial intelligence effectively underpins all aspects of human cognition, from the overall functional architecture of the human brain to higher level thought processes.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Mell, Johnathan; Beissinger, Markus; Gratch, Jonathan
An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 212–214, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{mell_expert-model_2019,
title = {An Expert-Model & Machine Learning Hybrid Approach to Predicting Human-Agent Negotiation Outcomes},
author = {Johnathan Mell and Markus Beissinger and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329433},
doi = {10.1145/3308532.3329433},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {212–214},
publisher = {ACM Press},
address = {Paris, France},
abstract = {We present the results of a machine-learning approach to the analysis of several human-agent negotiation studies. By combining expert knowledge of negotiating behavior compiled over a series of empirical studies with neural networks, we show that a hybrid approach to parameter selection yields promise for designing -more effective and socially intelligent agents. Specifically, we show that a deep feedforward neural network using a theory-driven three-parameter model can be effective in predicting negotiation outcomes. Furthermore, it outperforms other expert-designed models that use more parameters, as well as those using other, more limited techniques (such as linear regression models or boosted decision trees). We anticipate these results will have impact for those seeking to combine extensive domain knowledge with more automated approaches in human-computer negotiation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lee, Minha; Lucas, Gale; Mell, Johnathan; Johnson, Emmanuel; Gratch, Jonathan
What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 38–45, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{lee_whats_2019,
title = {What's on Your Virtual Mind?: Mind Perception in Human-Agent Negotiations},
author = {Minha Lee and Gale Lucas and Johnathan Mell and Emmanuel Johnson and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329465},
doi = {10.1145/3308532.3329465},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {38–45},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Recent research shows that how we respond to other social actors depends on what sort of mind we ascribe to them. In this article we examine how perceptions of a virtual agent’s mind shape behavior in human-agent negotiations. We varied descriptions and communicative behavior of virtual agents on two dimensions according to the mind perception theory: agency (cognitive aptitude) and patiency (affective aptitude). Participants then engaged in negotiations with the different agents. People scored more points and engaged in shorter negotiations with agents described to be cognitively intelligent, and got lower points and had longer negotiations with agents that were described to be cognitively unintelligent. Accordingly, agents described as having low-agency ended up earning more points than those with high-agency. Within the negotiations themselves, participants sent more happy and surprise emojis and emotionally valenced messages to agents described to be emotional. This high degree of described patiency also affected perceptions of the agent’s moral standing and relatability. In short, manipulating the perceived mind of agents affects how people negotiate with them. We discuss these results, which show that agents are perceived not only as social actors, but as intentional actors through negotiations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S; Ustun, Volkan
An Architectural Integration of Temporal Motivation Theory for Decision Making Proceedings Article
In: In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
@inproceedings{rosenbloom_architectural_2019,
title = {An Architectural Integration of Temporal Motivation Theory for Decision Making},
author = {Paul S Rosenbloom and Volkan Ustun},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_7.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17thAnnual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {Temporal Motivation Theory (TMT) is incorporated into the Sigma cognitive architecture to explore the ability of this combination to yield human-like decision making. In conjunction with Lazy Reinforcement Learning (LRL), which provides the inputs required for this form of decision making, experiments are run on a simple reinforcement learning task, a preference reversal task, and an uncertain two-choice task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S
(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition Proceedings Article
In: In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling, pp. 6, Montreal, Canada, 2019.
@inproceedings{rosenbloom_symmetry_2019,
title = {(A)symmetry × (Non)monotonicity: Towards a Deeper Understanding of Key Cognitive Di/Trichotomies and the Common Model of Cognition},
author = {Paul S Rosenbloom},
url = {https://iccm-conference.neocities.org/2019/proceedings/papers/ICCM2019_paper_6.pdf},
year = {2019},
date = {2019-07-01},
booktitle = {In Proceedings of the 17th Annual Meeting of the International Conference on Cognitive Modeling},
pages = {6},
address = {Montreal, Canada},
abstract = {A range of dichotomies from across the cognitive sciences are reduced to either (a)symmetry or (non)monotonicity. Taking the cross-product of these two elemental dichotomies then yields a deeper understanding of both two key trichotomies –based on control and content hierarchies – and the Common Model of Cognition, with results that bear on the structure of integrative cognitive architectures, models and systems, and on their commonalities, differences and gaps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Johnson, Emmanuel; Roediger, Sarah; Lucas, Gale; Gratch, Jonathan
Assessing Common Errors Students Make When Negotiating Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 30–37, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{johnson_assessing_2019,
title = {Assessing Common Errors Students Make When Negotiating},
author = {Emmanuel Johnson and Sarah Roediger and Gale Lucas and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329470},
doi = {10.1145/3308532.3329470},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {30–37},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research has shown that virtual agents can be effective tools for teaching negotiation. Virtual agents provide an opportunity for students to practice their negotiation skills which leads to better outcomes. However, these negotiation training agents often lack the ability to understand the errors students make when negotiating, thus limiting their effectiveness as training tools. In this article, we argue that automated opponent-modeling techniques serve as effective methods for diagnosing important negotiation mistakes. To demonstrate this, we analyze a large number of participant traces generated while negotiating with a set of automated opponents. We show that negotiators’ performance is closely tied to their understanding of an opponent’s preferences. We further show that opponent modeling techniques can diagnose specific errors including: failure to elicit diagnostic information from an opponent, failure to utilize the information that was elicited, and failure to understand the transparency of an opponent. These results show that opponent modeling techniques can be effective methods for diagnosing and potentially correcting crucial negotiation errors.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Lehr, Janina; Krämer, Nicole; Gratch, Jonathan
The Effectiveness of Social Influence Tactics when Used by a Virtual Agent Proceedings Article
In: Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19, pp. 22–29, ACM Press, Paris, France, 2019, ISBN: 978-1-4503-6672-4.
@inproceedings{lucas_effectiveness_2019,
title = {The Effectiveness of Social Influence Tactics when Used by a Virtual Agent},
author = {Gale M. Lucas and Janina Lehr and Nicole Krämer and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3308532.3329464},
doi = {10.1145/3308532.3329464},
isbn = {978-1-4503-6672-4},
year = {2019},
date = {2019-07-01},
booktitle = {Proceedings of the 19th ACM International Conference on Intelligent Virtual Agents - IVA '19},
pages = {22–29},
publisher = {ACM Press},
address = {Paris, France},
abstract = {Research in social science distinguishes between two types of social influence: informational and normative. Informational social influence is driven by the desire to evaluate ambiguous situations correctly, whereas normative social influence is driven by the desire to be liked and gain social acceptance from another person. Although we know from research that humans can effectively use either of these techniques to persuade other humans, scholars have yet to examine the relative effectiveness of informational versus normative social influence when used by virtual agents. We report a study in which users interact with a system that persuades them either using informational or normative social influence. Furthermore, to compare agents to human interlocutors, users are told that the system is either teleoperated by a human (avatar) or fully-automated (agent). Using this design, we are able to compare the effectiveness of virtual agents (vs humans) in employing informational versus normative social influence. Participants interacted with the system, which employed a Wizard-of-Oz operated virtual agent that tried to persuade the user to agree with its rankings on a “survival task.” Controlling for initial divergence in rankings between user and the agent, there was a significant main effect such that informational social influence resulted in greater influence than normative influence. However, this was qualified by an interaction that approached significance; users were, if anything, more persuaded by informational influence when they believe the agent was AI (compared to a human), whereas there was no difference between the agent and avatar in the normative influence condition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2015
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Building a Life-Size Automultiscopic Display Using Consumer Hardware Proceedings Article
In: Proceedings of GPU Technology Conference, San Jose, CA, 2015.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{jones_building_2015,
title = {Building a Life-Size Automultiscopic Display Using Consumer Hardware},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Building%20a%20Life-Size%20Automultiscopic%20Display%20Using%20Consumer%20Hardware.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of GPU Technology Conference},
address = {San Jose, CA},
abstract = {Automultiscopic displays allow multiple users to experience 3D content without the hassle of special glasses or head gear. Such displays generate many simultaneous images with high-angular density, so that each eye perceives a distinct and different view. This presents a unique challenge for content acquisition and rendering. In this talk, we explain how to build an automultiscopic display using off-the-shelf projectors, video-splitters, and graphics cards. We also present a GPU-based algorithm for rendering a large numbers of views from a sparse array of video cameras.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Ward, Nigel G.; DeVault, David
Ten Challenges in Highly-Interactive Dialog Systems Proceedings Article
In: Proceedings of AAAI 2015 Spring Symposium, Palo Alto, CA, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ward_ten_2015,
title = {Ten Challenges in Highly-Interactive Dialog Systems},
author = {Nigel G. Ward and David DeVault},
url = {http://ict.usc.edu/pubs/Ten%20Challenges%20in%20Highly-Interactive%20Dialog%20Systems.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {Proceedings of AAAI 2015 Spring Symposium},
address = {Palo Alto, CA},
abstract = {Systems capable of highly-interactive dialog have recently been developed in several domains. This paper considers how to build on these successes to make systems more robust, easier to develop, more adaptable, and more scientifically significant.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Mell, Jonathan; Gratch, Jonathan
Toward Natural Turn-Taking in a Virtual Human Negotiation Agent Proceedings Article
In: AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction, pp. 2–9, AAAI Press, Palo Alto, California, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{devault_toward_2015,
title = {Toward Natural Turn-Taking in a Virtual Human Negotiation Agent},
author = {David DeVault and Jonathan Mell and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Toward%20Natural%20Turn-Taking%20in%20a%20Virtual%20Human%20Negotiation%20Agent.pdf},
year = {2015},
date = {2015-03-01},
booktitle = {AAAI Spring Symposium on Turn-taking and Coordination in Human-Machine Interaction},
pages = {2–9},
publisher = {AAAI Press},
address = {Palo Alto, California},
abstract = {In this paper we assess our progress toward creating a virtual human negotiation agent with fluid turn-taking skills. To facilitate the design of this agent, we have collected a corpus of human-human negotiation roleplays as well as a corpus of Wizard-controlled human-agent negotiations in the same roleplay scenario.We compare the natural turn-taking behavior in our human-human corpus with that achieved in our Wizard-of-Oz corpus, and quantify our virtual human’s turn-taking skills using a combination of subjective and objective metrics. We also discuss our design for a Wizard user interface to support real-time control of the virtual human’s turntaking and dialogue behavior, and analyze our wizard’s usage of this interface.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Casas, Dan; Alexander, Oleg; Feng, Andrew W.; Fyffe, Graham; Ichikari, Ryosuke; Debevec, Paul; Wang, Rhuizhe; Suma, Evan; Shapiro, Ari
Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors Proceedings Article
In: Proceedings of the 19th Symposium on Interactive 3D Graphics and Games, pp. 134–134, ACM Press, San Francisco, CA, 2015, ISBN: 978-1-4503-3392-4.
Abstract | Links | BibTeX | Tags: Graphics, MxR, UARC, Virtual Humans
@inproceedings{casas_rapid_2015,
title = {Rapid Photorealistic Blendshapes from Commodity RGB-D Sensors},
author = {Dan Casas and Oleg Alexander and Andrew W. Feng and Graham Fyffe and Ryosuke Ichikari and Paul Debevec and Rhuizhe Wang and Evan Suma and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2699276.2721398},
doi = {10.1145/2699276.2721398},
isbn = {978-1-4503-3392-4},
year = {2015},
date = {2015-02-01},
booktitle = {Proceedings of the 19th Symposium on Interactive 3D Graphics and Games},
pages = {134–134},
publisher = {ACM Press},
address = {San Francisco, CA},
abstract = {Creating and animating a realistic 3D human face has been an important task in computer graphics. The capability of capturing the 3D face of a human subject and reanimate it quickly will find many applications in games, training simulations, and interactive 3D graphics. In this paper, we propose a system to capture photorealistic 3D faces and generate the blendshape models automatically using only a single commodity RGB-D sensor. Our method can rapidly generate a set of expressive facial poses from a single Microsoft Kinect and requires no artistic expertise on the part of the capture subject. The system takes only a matter of seconds to capture and produce a 3D facial pose and only requires 4 minutes of processing time to transform it into a blendshape model. Our main contributions include an end-to-end pipeline for capturing and generating face blendshape models automatically, and a registration method that solves dense correspondences between two face scans by utilizing facial landmark detection and optical flow. We demonstrate the effectiveness of the proposed method by capturing 3D facial models of different human subjects and puppeteering their models in an animation system with real-time facial performance retargeting.},
keywords = {Graphics, MxR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Talbot, Thomas
COMRADE: Methods for Adaptive Competency Management and Just-in-Time Clinical Acumen Augmentation Journal Article
In: MedSim Magazine, pp. 26 – 28, 2015.
Abstract | Links | BibTeX | Tags: MedVR, UARC
@article{talbot_comrade_2015,
title = {COMRADE: Methods for Adaptive Competency Management and Just-in-Time Clinical Acumen Augmentation},
author = {Thomas Talbot},
url = {http://ict.usc.edu/pubs/COMRADE%20-%20Methods%20for%20Adaptive%20Competency%20Management%20and%20Just-in-Time%20Clinical%20Acumen%20Augmentation.pdf},
year = {2015},
date = {2015-01-01},
journal = {MedSim Magazine},
pages = {26 – 28},
abstract = {Dr. Thomas Talbot shares ideas for enhancing the electronic medical Record to act as a didactic tool to support physician competency.},
keywords = {MedVR, UARC},
pubstate = {published},
tppubtype = {article}
}
Corbin, Carina; Morbini, Fabrizio; Traum, David
Creating a Virtual Neighbor Proceedings Article
In: Proceedings of International Workshop on Spoken Dialogue Systems, Busan, South Korea, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{corbin_creating_2015,
title = {Creating a Virtual Neighbor},
author = {Carina Corbin and Fabrizio Morbini and David Traum},
url = {http://ict.usc.edu/pubs/Creating%20a%20Virtual%20Neighbor.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of International Workshop on Spoken Dialogue Systems},
address = {Busan, South Korea},
abstract = {We present the first version of our Virtual Neighbor, who can talk with users about people employed in the same institution. The Virtual Neighbor can discuss information about employees in a medium sized company or institute with users. The system acquires information from three sources: a personnel directory database, public web pages, and through dialogue interaction. Users can interact through face to face spoken dialogue, using components from the ICT Virtual human toolkit, or via a chat interface.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Stratou, Giota; DeVault, David; Hartholt, Arno; Lhommet, Margaux; Lucas, Gale; Morbini, Fabrizio; Georgila, Kallirroi; Scherer, Stefan; Gratch, Jonathan; Marsella, Stacy; Traum, David; Rizzo, Albert "Skip"
SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications Proceedings Article
In: Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI), Austin, Texas, 2015.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{morency_simsensei_2015,
title = {SimSensei Demonstration: A Perceptive Virtual Human Interviewer for Healthcare Applications},
author = {Louis-Philippe Morency and Giota Stratou and David DeVault and Arno Hartholt and Margaux Lhommet and Gale Lucas and Fabrizio Morbini and Kallirroi Georgila and Stefan Scherer and Jonathan Gratch and Stacy Marsella and David Traum and Albert "Skip" Rizzo},
url = {http://ict.usc.edu/pubs/SimSensei%20Demonstration%20A%20Perceptive%20Virtual%20Human%20Interviewer%20for%20Healthcare%20Applications.pdf},
year = {2015},
date = {2015-01-01},
booktitle = {Proceedings of the 29th AAAI Conference on Artificial Intelligence (AAAI)},
address = {Austin, Texas},
abstract = {We present the SimSensei system, a fully automatic virtual agent that conducts interviews to assess indicators of psychological distress. We emphasize on the perception part of the system, a multimodal framework which captures and analyzes user state for both behavioral understanding and interactional purposes.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rosenbloom, Paul S.; Demski, Abram; Ustun, Volkan
Efficient message computation in Sigma’s graphical architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 11, pp. 1–9, 2015, ISSN: 2212683X.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC
@article{rosenbloom_efficient_2015,
title = {Efficient message computation in Sigma’s graphical architecture},
author = {Paul S. Rosenbloom and Abram Demski and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X14000723},
doi = {10.1016/j.bica.2014.11.009},
issn = {2212683X},
year = {2015},
date = {2015-01-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {11},
pages = {1–9},
abstract = {Human cognition runs at ∼50 ms per cognitive cycle, implying that any biologically inspired cognitive architecture that strives for real-time performance needs to be able to run at this speed. Sigma is a cognitive architecture built upon graphical models – a broadly applicable state-of-the-art formalism for implementing cognitive capabilities – that are solved via message passing (with complex messages based on n-dimensional piecewise-linear functions). Earlier work explored optimizations to Sigma that reduced by an order of magnitude the number of messages sent per cycle. Here, optimizations are introduced that reduce by an order of magnitude the average time required per message sent.},
keywords = {CogArch, Cognitive Architecture, UARC},
pubstate = {published},
tppubtype = {article}
}
Park, Sunghyun; Scherer, Stefan; Gratch, Jonathan; Carnevale, Peter; Morency, Louis-Philippe
I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation Journal Article
In: IEEE Transactions on Affective Computing, vol. 6, no. 2, pp. 86 –96, 2015, ISSN: 1949-3045.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{park_i_2015,
title = {I Can Already Guess Your Answer: Predicting Respondent Reactions During Dyadic Negotiation},
author = {Sunghyun Park and Stefan Scherer and Jonathan Gratch and Peter Carnevale and Louis-Philippe Morency},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7024926},
doi = {10.1109/TAFFC.2015.2396079},
issn = {1949-3045},
year = {2015},
date = {2015-01-01},
journal = {IEEE Transactions on Affective Computing},
volume = {6},
number = {2},
pages = {86 –96},
abstract = {Negotiation is a component deeply ingrained in our daily lives, and it can be challenging for a person to predict the respondent’s reaction (acceptance or rejection) to a negotiation offer. In this work, we focus on finding acoustic and visual behavioral cues that are predictive of the respondent’s immediate reactions using a face-to-face negotiation dataset, which consists of 42 dyadic interactions in a simulated negotiation setting. We show our results of exploring 4 different sources of information, namely nonverbal behavior of the proposer, that of the respondent, mutual behavior between the interactants related to behavioral symmetry and asymmetry, and past negotiation history between the interactants. Firstly, we show that considering other sources of information (other than the nonverbal behavior of the respondent) can also have comparable performance in predicting respondent reactions. Secondly, we show that automatically extracted mutual behavioral cues of symmetry and asymmetry are predictive partially due to their capturing information of the nature of the interaction itself, whether it is cooperative or competitive. Lastly, we identify audio-visual behavioral cues that are most predictive of the respondent’s immediate reactions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
2014
Marsella, Stacy; Gratch, Jonathan
Computationally Modeling Human Emotion Journal Article
In: Communications of the ACM, vol. 57, no. 12, pp. 56–67, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{marsella_computationally_2014,
title = {Computationally Modeling Human Emotion},
author = {Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2631912},
doi = {10.1145/2631912},
year = {2014},
date = {2014-12-01},
journal = {Communications of the ACM},
volume = {57},
number = {12},
pages = {56–67},
abstract = {EMOTION’S ROLE IN human behavior is an old debate that has become increasingly relevant to the computational sciences. Two-and-a-half millennia ago, Aristotle espoused a view of emotion at times remarkably similar to modern psychological theories, arguing that emotions (such as anger), in moderation, play a useful role, especially in interactions with others. Those who express anger at appropriate times are praiseworthy, while those lacking in anger at appropriate times are treated as a fool. The Stoics took a different view; four centuries after Aristotle, Seneca considered emotions (such as anger) as a threat to reason, arguing, “reason … is only powerful so long as it remains isolated from emotions.” In the 8th century, David Hume radically departed from the Stoic perspective, arguing for the key motivating role of emotions, saying, “Reason is, and ought only to be the slave of the passions.” A similar dichotomy of views can be seen in the history of artificial intelligence (AI) and agent research.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert; Scherer, Stefan; DeVault, David; Gratch, Jonathan; Artstein, Ron; Hartholt, Arno; Lucas, Gale; Marsella, Stacy; Morbini, Fabrizio; Nazarian, Angela; Stratou, Giota; Traum, David; Wood, Rachel; Boberg, Jill; Morency, Louis-Philippe
Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent Proceedings Article
In: Proceedings of ICDVRAT 2014, International Journal of Disability and Human Development, Gothenburg, Sweden, 2014.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, UARC, Virtual Humans
@inproceedings{rizzo_detection_2014,
title = {Detection and Computational Analysis of Psychological Signals Using a Virtual Human Interviewing Agent},
author = {Albert Rizzo and Stefan Scherer and David DeVault and Jonathan Gratch and Ron Artstein and Arno Hartholt and Gale Lucas and Stacy Marsella and Fabrizio Morbini and Angela Nazarian and Giota Stratou and David Traum and Rachel Wood and Jill Boberg and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Detection%20and%20Computational%20Analysis%20of%20Psychological%20Signals%20Using%20a%20Virtual%20Human%20Interviewing%20Agent.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of ICDVRAT 2014},
publisher = {International Journal of Disability and Human Development},
address = {Gothenburg, Sweden},
abstract = {It has long been recognized that facial expressions, body posture/gestures and vocal parameters play an important role in human communication and the implicit signalling of emotion. Recent advances in low cost computer vision and behavioral sensing technologies can now be applied to the process of making meaningful inferences as to user state when a person interacts with a computational device. Effective use of this additive information could serve to promote human interaction with virtual human (VH) agents that may enhance diagnostic assessment. This paper will focus on our current research in these areas within the DARPA-funded “Detection and Computational Analysis of Psychological Signals” project, with specific attention to the SimSensei application use case. SimSensei is a virtual human interaction platform that is able to sense and interpret real-time audiovisual behavioral signals from users interacting with the system. It is specifically designed for health care support and leverages years of virtual human research and development at USC-ICT. The platform enables an engaging face-to-face interaction where the virtual human automatically reacts to the state and inferred intent of the user through analysis of behavioral signals gleaned from facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human to human interaction and communication, SimSensei aims to capture and infer from user non-verbal communication to improve engagement between a VH and a user. The system can also quantify and interpret sensed behavioral signals.},
keywords = {MedVR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Andreatta, Pamela; Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@inproceedings{andreatta_assessment_2014,
title = {Assessment instrument validation for critical clinical competencies - pediatricneonatal intubation and cholinergic crisis management},
author = {Pamela Andreatta and Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Assessment%20instrument%20validation%20for%20critical%20clinical%20competencies%20-%20pediatricneonatal%20intubation%20and%20cholinergic%20crisis%20management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage casualties that necessitate immediate application of critical clinical competencies. Two examples of these critical competencies are the clinical management of injuries resulting from nerve agents and difficult intubation, especially for pediatric or neonatal patients. The opportunity to learn and practice the necessary skills for these rare, but urgent, situations is complicated by the limited ability to replicate essential situational factors that influence performance in the applied clinical environment. Simulation-based training may resolve some of these challenges, however it is imperative that evidence be captured to document the achievement of performance competencies in the training environment that transfer to applied clinical care. The purpose of this study was to establish psychometric characteristics for competency assessment instruments associated with two such critical competencies: management of cholinergic crisis and pediatric-neonatal intubation. Methods: To inform the development of assessment instruments, we conducted comprehensive task analyses across each performance domain (knowledge, performance). Expert review confirmed content validity. Construct validity was established using the instruments to differentiate between the performance abilities of practitioners with variable experience (novice through expert). Purposively selected firstresponder subjects for pediatric-neonatal intubation (N=214) and cholinergic crisis management (N=123) were stratified by level of experience performing the requisite clinical competencies. All subjects completed knowledge and performance assessments. Reliability was established using test-retest (Pearson correlation) and internal consistency (Cronbach’s alpha) for knowledge and performance assessments. Results: Significantly higher scores for subjects with greater levels of experience, compared to those with less experience established construct validity for each assessment instrument (p textbackslashtextbackslashtextless .01). Significant correlations between test-retest outcomes indicated measurement reliability p textbackslashtextbackslashtextless .01. Cronbach’s alpha for knowledge and performance scores demonstrated excellent internal consistency. Conclusions: Psychometric evidence establishes the value of assessment for identifying and remedying critical competency performance gaps.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Klotz, Jessica; Madsen, James M.; Hurst, Charles G.; Talbot, Thomas
Training Effects for First-responder Competency in Cholinergic Crisis Management Proceedings Article
In: Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014, Orlando, FL, 2014.
Abstract | Links | BibTeX | Tags: DoD, MedVR, UARC
@inproceedings{klotz_training_2014,
title = {Training Effects for First-responder Competency in Cholinergic Crisis Management},
author = {Jessica Klotz and James M. Madsen and Charles G. Hurst and Thomas Talbot},
url = {http://ict.usc.edu/pubs/Training%20Effects%20for%20First-responder%20Competency%20in%20Cholinergic%20Crisis%20Management.pdf},
year = {2014},
date = {2014-12-01},
booktitle = {Proceedings of Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2014},
address = {Orlando, FL},
abstract = {Military and civilian first-responders must be able to recognize and effectively manage mass disaster casualties. Clinical management of injuries resulting from nerve agents provides different challenges for first responders than those of conventional weapons. We evaluated the impact of a mixed-methods training program on competency acquisition in cholinergic crisis clinical management. Methods: We developed a multimedia and simulation-based training program based on the more comprehensive USAMRICD courses. The training program was designed to provide first-responders with the necessary abilities to recognize and manage a mass casualty cholinergic crisis event. Training included a learner controlled multimedia iPad app and hands-on instruction using SimMan3G™ mannequin simulators. We evaluated the impact of the training through a purposively selected sample of 204 civilian and military first responders who had not previously completed either of the referenced USAMRICD courses. We assessed knowledge, performance, affect, and self-efficacy measures pre- and post-training using previously validated assessment instruments. We calculated results using analysis of variance with repeated measures, and with statistical significance set at p textbackslashtextbackslashtextless .05. Results: Analyses demonstrated a significant improvement (p = .000) across all domains (knowledge, performance, self-efficacy, and affect). Knowledge scores increased from 60% to 81% correct. Performance scores increased from 16% to 68% correct. Self-efficacy scores increased from 51% to 87% confidence in ability to effectively manage a cholinergic crisis event. Affect scores increased from 75% to 81% personal comfort during procedures. Conclusions: These findings could aid in the selection of instructional methodologies available to a broad community of first-responder personnel in military and civilian service. Although less comprehensive than the USAMRICD courses, training outcomes associated with this easily distributed instruction set demonstrated its value in increasing the competency of first responders in recognizing and managing a mass casualty cholinergic event. Retention outcomes are in process.},
keywords = {DoD, MedVR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Calvo, Rafael A.; D'Mello, Sidney; Gratch, Jonathan; Kappas, Arvid (Ed.)
The Oxford Handbook of Affective Computing Book
Oxford University Press, Oxford ; New York, 2014, ISBN: 978-0-19-994223-7.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@book{calvo_oxford_2014,
title = {The Oxford Handbook of Affective Computing},
editor = {Rafael A. Calvo and Sidney D'Mello and Jonathan Gratch and Arvid Kappas},
url = {https://global.oup.com/academic/product/the-oxford-handbook-of-affective-computing-9780199942237?cc=us&lang=en&},
isbn = {978-0-19-994223-7},
year = {2014},
date = {2014-12-01},
publisher = {Oxford University Press},
address = {Oxford ; New York},
abstract = {The Oxford Handbook of Affective Computing aims to be the definite reference for research in the burgeoning field of affective computing—a field that turns 18 at the time of writing. This introductory chapter is intended to convey the motivations of the editors and content of the chapters in order to orient the readers to the handbook. It begins with a very high overview of the field of affective computing along with a bit of reminiscence about its formation, short history, and major accomplishments. The five main sections of the handbook—history and theory, detection, generation, methodologies, and applications—are then discussed, along with a bird’s eye view of the 41 chapters covered in the book. The introduction is devoted to short descriptions of the chapters featured in the handbook. A brief descript of the Glossary concludes the Introduction.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {book}
}
Venek, Verena; Scherer, Stefan; Morency, Louis-Philippe; Rizzo, Albert; Pestian, John
ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS Proceedings Article
In: Spoken Language Technology Workshop (SLT), 2014 IEEE, pp. 277–282, IEEE, South Lake Tahoe, NV, 2014, ISBN: 978-1-4799-7129-9.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@inproceedings{venek_adolescent_2014,
title = {ADOLESCENT SUICIDAL RISK ASSESSMENT IN CLINICIAN-PATIENT INTERACTION: A STUDY OF VERBAL AND ACOUSTIC BEHAVIORS},
author = {Verena Venek and Stefan Scherer and Louis-Philippe Morency and Albert Rizzo and John Pestian},
url = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=7078587},
doi = {10.1109/SLT.2014.7078587},
isbn = {978-1-4799-7129-9},
year = {2014},
date = {2014-12-01},
booktitle = {Spoken Language Technology Workshop (SLT), 2014 IEEE},
pages = {277–282},
publisher = {IEEE},
address = {South Lake Tahoe, NV},
abstract = {Suicide among adolescents is a major public health problem: it is the third leading cause of death in the US for ages 13-18. Up to now, there is no objective ways to assess the suicidal risk, i.e. whether a patient is non-suicidal, suicidal re-attempter (i.e. repeater) or suicidal non-repeater (i.e. individuals with one suicide attempt or showing signs of suicidal gestures or ideation). Therefore, features of the conversation including verbal information and nonverbal acoustic information were investigated from 60 audio-recorded interviews of 30 suicidal (13 repeaters and 17 non-repeaters) and 30 non-suicidal adolescents interviewed by a social worker. The interaction between clinician and patients was statistically analyzed to reveal differences between suicidal vs. non-suicidal adolescents and to investigate suicidal repeaters' behaviors in comparison to suicidal non-repeaters. By using a hierarchical ensemble classifier we were able to successfully discriminate non-suicidal patients, suicidal repeaters and suicidal non-repeaters.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Fyffe, Graham; Jones, Andrew; Alexander, Oleg; Ichikari, Ryosuke; Debevec, Paul
Driving High-Resolution Facial Scans with Video Performance Capture Journal Article
In: ACM Transactions on Graphics (TOG), vol. 34, no. 1, pp. 1– 13, 2014.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@article{fyffe_driving_2014,
title = {Driving High-Resolution Facial Scans with Video Performance Capture},
author = {Graham Fyffe and Andrew Jones and Oleg Alexander and Ryosuke Ichikari and Paul Debevec},
url = {http://ict.usc.edu/pubs/Driving%20High-Resolution%20Facial%20Scans%20with%20Video%20Performance%20Capture.pdf},
year = {2014},
date = {2014-11-01},
journal = {ACM Transactions on Graphics (TOG)},
volume = {34},
number = {1},
pages = {1– 13},
abstract = {We present a process for rendering a realistic facial performance with control of viewpoint and illumination. The performance is based on one or more high-quality geometry and reflectance scans of an actor in static poses, driven by one or more video streams of a performance. We compute optical flow correspondences between neighboring video frames, and a sparse set of correspondences between static scans and video frames. The latter are made possible by leveraging the relightability of the static 3D scans to match the viewpoint(s) and appearance of the actor in videos taken in arbitrary environments. As optical flow tends to compute proper correspondence for some areas but not others, we also compute a smoothed, per-pixel confidence map for every computed flow, based on normalized cross-correlation. These flows and their confidences yield a set of weighted triangulation constraints among the static poses and the frames of a performance. Given a single artist-prepared face mesh for one static pose, we optimally combine the weighted triangulation constraints, along with a shape regularization term, into a consistent 3D geometry solution over the entire performance that is drift free by construction. In contrast to previous work, even partial correspondences contribute to drift minimization, for example, where a successful match is found in the eye region but not the mouth. Our shape regularization employs a differential shape term based on a spatially varying blend of the differential shapes of the static poses and neighboring dynamic poses, weighted by the associated flow confidences. These weights also permit dynamic reflectance maps to be produced for the performance by blending the static scan maps. Finally, as the geometry and maps are represented on a consistent artist-friendly mesh, we render the resulting high-quality animated face geometry and animated reflectance maps using standard rendering tools.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {article}
}
Ghosh, Sayan; Chatterjee, Moitreya; Morency, Louis-Philippe
A Multimodal Context-based Approach for Distress Assessment Proceedings Article
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 240–246, ACM Press, Istanbul, Turkey, 2014, ISBN: 978-1-4503-2885-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{ghosh_multimodal_2014,
title = {A Multimodal Context-based Approach for Distress Assessment},
author = {Sayan Ghosh and Moitreya Chatterjee and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663274},
doi = {10.1145/2663204.2663274},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {240–246},
publisher = {ACM Press},
address = {Istanbul, Turkey},
abstract = {The increasing prevalence of psychological distress disorders, such as depression and post-traumatic stress, necessitates a serious effort to create new tools and technologies to help with their diagnosis and treatment. In recent years, new computational approaches were proposed to objectively analyze patient non-verbal behaviors over the duration of the entire interaction between the patient and the clinician. In this paper, we go beyond non-verbal behaviors and propose a tri-modal approach which integrates verbal behaviors with acoustic and visual behaviors to analyze psychological distress during the course of the dyadic semi-structured interviews. Our approach exploits the advantages of the dyadic nature of these interactions to contextualize the participant responses based on the affective components (intimacy and polarity levels) of the questions. We validate our approach using one of the largest corpus of semi-structured interviews for distress assessment which consists of 154 multimodal dyadic interactions. Our results show significant improvement on distress prediction performance when integrating verbal behaviors with acoustic and visual behaviors. In addition, our analysis shows that contextualizing the responses improves the prediction performance, most significantly with positive and intimate questions.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Park, Sunghyun; Shim, Han Suk; Chatterjee, Moitreya; Sagae, Kenji; Morency, Louis-Philippe
Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach Proceedings Article
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 50–57, ACM Press, 2014, ISBN: 978-1-4503-2885-2.
Abstract | Links | BibTeX | Tags: The Narrative Group, UARC, Virtual Humans
@inproceedings{park_computational_2014,
title = {Computational Analysis of Persuasiveness in Social Multimedia: A Novel Dataset and Multimodal Prediction Approach},
author = {Sunghyun Park and Han Suk Shim and Moitreya Chatterjee and Kenji Sagae and Louis-Philippe Morency},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663260},
doi = {10.1145/2663204.2663260},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {50–57},
publisher = {ACM Press},
abstract = {Our lives are heavily influenced by persuasive communication, and it is essential in almost any types of social interactions from business negotiation to conversation with our friends and family. With the rapid growth of social multimedia websites, it is becoming ever more important and useful to understand persuasiveness in the context of social multimedia content online. In this paper, we introduce our newly created multimedia corpus of 1,000 movie review videos obtained from a social multimedia website called ExpoTV.com, which will be made freely available to the research community. Our research results presented here revolve around the following 3 main research hypotheses. Firstly, we show that computational descriptors derived from verbal and nonverbal behavior can be predictive of persuasiveness. We further show that combining descriptors from multiple communication modalities (audio, text and visual) improve the prediction performance compared to using those from single modality alone. Secondly, we investigate if having prior knowledge of a speaker expressing a positive or negative opinion helps better predict the speaker's persuasiveness. Lastly, we show that it is possible to make comparable prediction of persuasiveness by only looking at thin slices (shorter time windows) of a speaker's behavior.},
keywords = {The Narrative Group, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Hammal, Zakia; Yang, Ying; Morency, Louis-Philippe; Cohn, Jeffrey F.
Dyadic Behavior Analysis in Depression Severity Assessment Interviews Proceedings Article
In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 112–119, ACM Press, Istanbul, Turkey, 2014, ISBN: 978-1-4503-2885-2.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{scherer_dyadic_2014,
title = {Dyadic Behavior Analysis in Depression Severity Assessment Interviews},
author = {Stefan Scherer and Zakia Hammal and Ying Yang and Louis-Philippe Morency and Jeffrey F. Cohn},
url = {http://dl.acm.org/citation.cfm?doid=2663204.2663238},
doi = {10.1145/2663204.2663238},
isbn = {978-1-4503-2885-2},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the 16th International Conference on Multimodal Interaction},
pages = {112–119},
publisher = {ACM Press},
address = {Istanbul, Turkey},
abstract = {Previous literature suggests that depression impacts vocal timing of both participants and clinical interviewers but is mixed with respect to acoustic features. To investigate further, 57 middle-aged adults (men and women) with Major Depression Disorder and their clinical interviewers (all women) were studied. Participants were interviewed for depression severity on up to four occasions over a 21 week period using the Hamilton Rating Scale for Depression (HRSD), which is a criterion measure for depression severity in clinical trials. Acoustic features were extracted for both participants and interviewers using COVAREP Toolbox. Missing data occurred due to missed appointments, technical problems, or insufficient vocal samples. Data from 36 participants and their interviewers met criteria and were included for analysis to compare between high and low depression severity. Acoustic features for participants varied between men and women as expected, and failed to vary with depression severity for participants. For interviewers, acoustic characteristics strongly varied with severity of the interviewee's depression. Accommodation - the tendency of interactants to adapt their communicative behavior to each other - between interviewers and interviewees was inversely related to depression severity. These findings suggest that interviewers modify their acoustic features in response to depression severity, and depression severity strongly impacts interpersonal accommodation.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz
Training Agents by Crowds Proceedings Article
In: Proceedings of HCOMP 2014, Pittsburgh, PA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nouri_training_2014,
title = {Training Agents by Crowds},
author = {Elnaz Nouri},
url = {http://ict.usc.edu/pubs/Training%20Agents%20by%20Crowds.pdf},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of HCOMP 2014},
address = {Pittsburgh, PA},
abstract = {On-line learning algorithms are particularly suitable for developing interactive computational agents. These algorithm can be used to teach the agents the abilities needed for engaging in social interactions with humans. If humans are used as teachers in the context of on-line learning algorithms a serious challenge arises: their lack of commitment and availability during the required extensive training. In this work we address this challenge by showing how ”crowds of human workers” rather than ”single users” can be recruited as teachers for training each learning agent. This paper proposes a framework for training agents by the crowds. The focus of this proposal is narrowed by using Reinforcement Learning as the human guidance method for teaching agents how to engage in simple negotiation games (such as the Ultimatum Bargaining Game and the Dictator Game).},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Feng, Andrew; Lucas, Gale; Marsella, Stacy; Suma, Evan; Chiu, Chung-Cheng; Casas, Dan; Shapiro, Ari
Acting the Part: The Role of Gesture on Avatar Identity Proceedings Article
In: Proceedings of the Seventh International Conference on Motion in Games (MIG 2014), pp. 49–54, ACM Press, Playa Vista, CA, 2014, ISBN: 978-1-4503-2623-0.
Abstract | Links | BibTeX | Tags: MxR, Social Simulation, UARC, Virtual Humans
@inproceedings{feng_acting_2014,
title = {Acting the Part: The Role of Gesture on Avatar Identity},
author = {Andrew Feng and Gale Lucas and Stacy Marsella and Evan Suma and Chung-Cheng Chiu and Dan Casas and Ari Shapiro},
url = {http://dl.acm.org/citation.cfm?doid=2668064.2668102},
doi = {10.1145/2668064.2668102},
isbn = {978-1-4503-2623-0},
year = {2014},
date = {2014-11-01},
booktitle = {Proceedings of the Seventh International Conference on Motion in Games (MIG 2014)},
pages = {49–54},
publisher = {ACM Press},
address = {Playa Vista, CA},
abstract = {Recent advances in scanning technology have enabled the widespread capture of 3D character models based on human subjects. However, in order to generate a recognizable 3D avatar, the movement and behavior of the human subject should be captured and replicated as well. We present a method of generating a 3D model from a scan, as well as a method to incorporate a subjects style of gesturing into a 3D character. We present a study which shows that 3D characters that used the gestural style as their original human subjects were more recognizable as the original subject than those that don’t.},
keywords = {MxR, Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scherer, Stefan; Stratou, Giota; Lucas, Gale; Mahmoud, Marwa; Boberg, Jill; Gratch, Jonathan; Rizzo, Albert (Skip); Morency, Louis-Philippe
Automatic audiovisual behavior descriptors for psychological disorder analysis Journal Article
In: Image and Vision Computing Journal, vol. 32, no. 10, pp. 648–658, 2014, ISSN: 02628856.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{scherer_automatic_2014,
title = {Automatic audiovisual behavior descriptors for psychological disorder analysis},
author = {Stefan Scherer and Giota Stratou and Gale Lucas and Marwa Mahmoud and Jill Boberg and Jonathan Gratch and Albert (Skip) Rizzo and Louis-Philippe Morency},
url = {http://linkinghub.elsevier.com/retrieve/pii/S0262885614001000},
doi = {10.1016/j.imavis.2014.06.001},
issn = {02628856},
year = {2014},
date = {2014-10-01},
journal = {Image and Vision Computing Journal},
volume = {32},
number = {10},
pages = {648–658},
abstract = {We investigate the capabilities of automatic audiovisual nonverbal behavior descriptors to identify indicators of psychological disorders such as depression, anxiety, and post-traumatic stress disorder. Due to strong correlations between these disordersas measured with standard self-assessment questionnaires in this study, we focus our investigations in particular on a generic distress measure as identified using factor analysis. Within this work, we seek to confirm and enrich present state of the art, predominantly based on qualitative manual annotations, with automatic quantitative behavior descriptors. We propose a number of nonverbal behavior descriptors that can be automatically estimated from audiovisual signals. Such automatic behavior descriptors could be used to support healthcare providers with quantified and objective observations that could ultimately improve clinical assessment. We evaluate our work on the dataset called the Distress Assessment Interview Corpus (DAIC) which comprises dyadic interactions between a confederate interviewer and a paid participant. Our evaluation on this dataset shows correlation of our automatic behavior descriptors with the derived general distress measure. Our analysis also includes a deeper study of self-adaptor and fidgeting behaviors based on detailed annotations of where these behaviors occur.},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Joshi, Himanshu; Rosenbloom, Paul S.; Ustun, Volkan
Isolated word recognition in the Sigma cognitive architecture Journal Article
In: Biologically Inspired Cognitive Architectures, vol. 10, pp. 1–9, 2014, ISSN: 2212683X.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC
@article{joshi_isolated_2014,
title = {Isolated word recognition in the Sigma cognitive architecture},
author = {Himanshu Joshi and Paul S. Rosenbloom and Volkan Ustun},
url = {http://linkinghub.elsevier.com/retrieve/pii/S2212683X14000644},
doi = {10.1016/j.bica.2014.11.001},
issn = {2212683X},
year = {2014},
date = {2014-10-01},
journal = {Biologically Inspired Cognitive Architectures},
volume = {10},
pages = {1–9},
abstract = {Symbolic architectures are effective at complex cognitive reasoning, but typically are incapable of important forms of sub-cognitive processing – such as perception – without distinct modules connected to them via low-bandwidth interfaces. Neural architectures, in contrast, may be quite effective at the latter, but typically struggle with the former. Sigma has been designed to leverage the state-of-the-art hybrid (discrete + continuous) mixed (symbolic + probabilistic) capability of graphical models to provide in a uniform non-modular fashion effective forms of, and integration across, both cognitive and sub-cognitive behavior. Here it is shown that Sigma is not only capable of performing a simple variant of speech recognition via the same knowledge structures and reasoning algorithm used for cognitive processing, but also of leveraging its existing knowledge templates and learning algorithm to acquire automatically most of the structures and parameters needed for this recognition activity.},
keywords = {CogArch, Cognitive Architecture, UARC},
pubstate = {published},
tppubtype = {article}
}
Nazarian, Angela; Nouri, Elnaz; Traum, David
Initiative Patterns in Dialogue Genres Proceedings Article
In: Proceedings of Semdial 2014, Edinburgh, UK, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nazarian_initiative_2014,
title = {Initiative Patterns in Dialogue Genres},
author = {Angela Nazarian and Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Patterns%20in%20Dialogue%20Genres.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Proceedings of Semdial 2014},
address = {Edinburgh, UK},
abstract = {One of the ways of distinguishing different dialogue genres is the differences in patterns of interactions between the participants. Morbini et al (2013) informally define dialogue genres on the basis of features like user vs system initiative, amongst other criteria. In this paper, we apply the multi-label initiative annotation scheme and related features from (Nouri and Traum, 2014) to a set of dialogue corpora from different domains. In our initial study, we examine two questionanswering domains, a “slot-filling” service application domain, and several human-human negotiation domains.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Baltrušaitis, Tadas; Robinson, Peter; Morency, Louis-Philippe
Continuous Conditional Neural Fields for Structured Regression Book Section
In: Computer Vision–ECCV 2014, pp. 593–608, Springer, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{baltrusaitis_continuous_2014,
title = {Continuous Conditional Neural Fields for Structured Regression},
author = {Tadas Baltrušaitis and Peter Robinson and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Continuous%20Conditional%20Neural%20Fields%20for%20Structured%20Regression.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Computer Vision–ECCV 2014},
pages = {593–608},
publisher = {Springer},
abstract = {An increasing number of computer vision and pattern recognition problems require structured regression techniques. Problems like human pose estimation, unsegmented action recognition, emotion prediction and facial landmark detection have temporal or spatial output dependencies that regular regression techniques do not capture. In this paper we present continuous conditional neural fields (CCNF) textbackslashtextbackslashvphantom a novel structured regression model that can learn non-linear input-output dependencies, and model temporal and spatial output relationships of vary- ing length sequences. We propose two instances of our CCNF framework: Chain-CCNF for time series modelling, and Grid-CCNF for spatial relationship modelling. We evaluate our model on five public datasets spanning three different regression problems: facial landmark detection in the wild, emotion prediction in music and facial action unit recognition. Our CCNF model demonstrates state-of-the-art performance on all of the datasets used.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Feng, Andrew; Shapiro, Ari; Lhommet, Margaux; Marsella, Stacy
Embodied Autonomous Agents Book Section
In: Handbook of Virtual Environments: Design, Implementation, and Applications, pp. 335–352, 2014.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@incollection{feng_embodied_2014,
title = {Embodied Autonomous Agents},
author = {Andrew Feng and Ari Shapiro and Margaux Lhommet and Stacy Marsella},
url = {http://books.google.com/books?hl=en&lr=&id=7zzSBQAAQBAJ&oi=fnd&pg=PP1&dq=+Handbook+of+Virtual+Environments&ots=Vx3ia0S2Uu&sig=LaVbSdoG3FahlbVYbuCxLmKgFIA#v=onepage&q=Handbook%20of%20Virtual%20Environments&f=false},
year = {2014},
date = {2014-09-01},
booktitle = {Handbook of Virtual Environments: Design, Implementation, and Applications},
pages = {335–352},
abstract = {Since the last decade, virtual environments have been extensively used for a wide range of application, from training systems to video games. Virtual humans are animated characters that are designed to populate these environments and to interact with the objects of the world as well as with the user. A virtual agent must perceive the world in which it exists, reason about those perceptions, and decide on how to act on them in pursuit of its own agenda.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Shivakumar, Prashanth Gurunath; Potamianos, Alexandros; Lee, Sungbok; Narayanan, Shrikanth
Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling Journal Article
In: Proceedings of Workshop on Child Computer Interaction, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{shivakumar_improving_2014,
title = {Improving Speech Recognition for Children using Acoustic Adaptation and Pronunciation Modeling},
author = {Prashanth Gurunath Shivakumar and Alexandros Potamianos and Sungbok Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Improving%20Speech%20Recognition%20for%20Children%20using%20Acoustic%20Adaptation%20and%20Pronunciation%20Modeling.pdf},
year = {2014},
date = {2014-09-01},
journal = {Proceedings of Workshop on Child Computer Interaction},
abstract = {Developing a robust Automatic Speech Recognition (ASR) system for children is a challenging task because of increased variability in acoustic and linguistic correlates as function of young age. The acoustic variability is mainly due to the developmental changes associated with vocal tract growth. On the linguistic side, the variability is associated withlimited knowledge of vocabulary, pronunciations and other linguistic constructs. This paper presents a preliminary study towards better acoustic modeling, pronunciation modeling and front-end processing for children’s speech. Results are presented as a function of age. Speaker adaptation significantly reduces mismatch and variability improving recognition results across age groups. In addition, introduction of pronunciation modeling shows promising performance improvements.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Huang, Che-Wei; Xiao, Bo; Georgiou, Panayiotis G.; Narayanan, Shrikanth S.
Unsupervised Speaker Diarization Using Riemannian Manifold Clustering Proceedings Article
In: Fifteenth Annual Conference of the International Speech Communication Association, Singapore, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{huang_unsupervised_2014,
title = {Unsupervised Speaker Diarization Using Riemannian Manifold Clustering},
author = {Che-Wei Huang and Bo Xiao and Panayiotis G. Georgiou and Shrikanth S. Narayanan},
url = {http://ict.usc.edu/pubs/Unsupervised%20Speaker%20Diarization%20Using%20Riemannian%20Manifold%20Clustering.pdf},
year = {2014},
date = {2014-09-01},
booktitle = {Fifteenth Annual Conference of the International Speech Communication Association},
address = {Singapore},
abstract = {We address the problem of speaker clustering for robust unsupervised speaker diarization. We model each speakerhomogeneous segment as one single full multivariate Gaussian probability density function (pdf) and take into consideration the Riemannian property of Gaussian pdfs. By assuming that segments from different speakers lie on different (possibly intersected) sub-manifolds of the manifold of Gaussian pdfs, we formulate the original problem as a Riemannian manifold clustering problem. To apply the computationally simple Riemannian locally linear embedding (LLE) algorithm, we impose a constraint on the length of each segment so as to ensure the fitness of single-Gaussian modeling and to increase the chance that all k-nearest neighbors of a pdf are from the same submanifold (speaker). Experiments on the microphone-recorded conversational interviews from NIST 2010 speaker recognition evaluation set demonstrate promising results of less than 1% DER.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Zadeh, AmirAli B.; Sagae, Kenji; Morency, Louis Philippe
Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words Proceedings Article
In: Intelligent Virtual Agents, pp. 496–503, Springer, Boston, MA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{zadeh_towards_2014,
title = {Towards Learning Nonverbal Identities from the Web: Automatically Identifying Visually Accentuated Words},
author = {AmirAli B. Zadeh and Kenji Sagae and Louis Philippe Morency},
url = {http://ict.usc.edu/pubs/Towards%20Learning%20Nonverbal%20Identities%20from%20the%20Web%20-%20Automatically%20Identifying%20Visually-Accentuated%20Words.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {496–503},
publisher = {Springer},
address = {Boston, MA},
abstract = {This paper presents a novel long-term idea to learn automatically from online multimedia content, such as videos from YouTube channels, a portfolio of nonverbal identities in the form of computational representation of prototypical gestures of a speaker. As a first step towards this vision, this paper presents proof-of-concept experiments to automatically identify visually accentuated words from a collection of online videos of the same person. The experimental results are promising with many accentuated words automatically identified and specific head motion patterns were associated with these words.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Xu, Yuyu; Pelachaud, Catherine; Marsella, Stacy
Compound Gesture Generation: A Model Based on Ideational Units Proceedings Article
In: Intelligent Virtual Agents, pp. 477–491, Springer, Boston, MA, 2014.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{xu_compound_2014,
title = {Compound Gesture Generation: A Model Based on Ideational Units},
author = {Yuyu Xu and Catherine Pelachaud and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Compound%20Gesture%20Generation%20-%20A%20Model%20Based%20on%20Ideational%20Units.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {477–491},
publisher = {Springer},
address = {Boston, MA},
abstract = {This work presents a hierarchical framework that generates continuous gesture animation performance for virtual characters. As opposed to approaches that focus more on realizing individual gesture, the focus of this work is on the relation between gestures as part of an overall gesture performance. Following Calbris’ work [3], our approach is to structure the performance around ideational units and determine gestural features within and across these ideational units. Furthermore, we use Calbris’ work on the relation between form and meaning in gesture to help inform how individual gesture’s expressivity is manipulated. Our framework takes in high level communicative function descriptions, generates behavior descriptions and realizes them using our character animation engine. We define the specifications for these different levels of descriptions. Finally, we show the general results as well as experiments illustrating the impacts of the key features.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lhommet, Margot; Marsella, Stacy
Metaphoric Gestures: Towards Grounded Mental Spaces Proceedings Article
In: Intelligent Virtual Agents, pp. 264–274, Springer, Boston, MA, 2014.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{lhommet_metaphoric_2014,
title = {Metaphoric Gestures: Towards Grounded Mental Spaces},
author = {Margot Lhommet and Stacy Marsella},
url = {http://ict.usc.edu/pubs/Metaphoric%20Gestures%20-%20Towards%20Grounded%20Mental%20Spaces.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {264–274},
publisher = {Springer},
address = {Boston, MA},
abstract = {Gestures are related to the mental states and unfolding processes of thought, reasoning and verbal language production. This is especially apparent in the case of metaphors and metaphoric gestures. For example, talking about the importance of an idea by calling it a big idea and gesturing to indicate that large size is a manifestation of the use of metaphors in language and gesture. We propose a computational model of the influence of conceptual metaphors on gestures that maps from mental state representations of ideas to their expression in concrete, physical metaphoric gestures. This model relies on conceptual primary metaphors to map the abstract elements of the mental space to concrete physical elements that can be conveyed with gestures.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Gratch, Jonathan; King, Aisha; Morency, Louis-Philippe
It’s only a computer: Virtual humans increase willingness to disclose Journal Article
In: Computers in Human Behavior, vol. 37, pp. 94–100, 2014, ISSN: 07475632.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@article{lucas_its_2014,
title = {It’s only a computer: Virtual humans increase willingness to disclose},
author = {Gale M. Lucas and Jonathan Gratch and Aisha King and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/It%27s%20Only%20a%20Computer%20-%20Virtual%20Humans%20Increase%20Willingness%20to%20Disclose.pdf},
doi = {10.1016/j.chb.2014.04.043},
issn = {07475632},
year = {2014},
date = {2014-08-01},
journal = {Computers in Human Behavior},
volume = {37},
pages = {94–100},
abstract = {Research has begun to explore the use of virtual humans (VHs) in clinical interviews (Bickmore, Gruber, & Picard, 2005). When designed as supportive and ‘‘safe’’ interaction partners, VHs may improve such screenings by increasing willingness to disclose information (Gratch, Wang, Gerten, & Fast, 2007). In health and mental health contexts, patients are often reluctant to respond honestly. In the context of health-screening interviews, we report a study in which participants interacted with a VH interviewer and were led to believe that the VH was controlled by either humans or automation. As predicted, compared to those who believed they were interacting with a human operator, participants who believed they were interacting with a computer reported lower fear of self-disclosure, lower impression management, displayed their sadness more intensely, and were rated by observers as more willing to disclose. These results suggest that automated VHs can help overcome a significant barrier to obtaining truthful patient information.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Azmandian, Mahdi; Bolas, Mark; Suma, Evan
Countering User Deviation During Redirected Walking Proceedings Article
In: Proceedings of the ACM Symposium on Applied Perception, Vancouver, British Columbia, Canada, 2014.
Abstract | Links | BibTeX | Tags: MxR, UARC
@inproceedings{azmandian_countering_2014,
title = {Countering User Deviation During Redirected Walking},
author = {Mahdi Azmandian and Mark Bolas and Evan Suma},
url = {http://ict.usc.edu/pubs/Countering%20User%20Deviation%20During%20Redirected%20Walking.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception},
address = {Vancouver, British Columbia, Canada},
abstract = {Redirected Walking is technique that leverages human perception characteristics to allow locomotion in virtual environments larger than the tracking area. Among the many redirection techniques, some strictly depend on the user’s current position and orientation, while more recent algorithms also depend on the user’s predicted behavior. This prediction serves as an input to a computationally expensive search to determine an optimal path. The search output is formulated as a series of gains to be applied at different stages along the path. An example prediction could be if a user is walking down a corridor, a natural prediction would be that the user will walk along a straight line down the corridor, and she will choose one of the possible directions with equal probability. In practice, deviations from the expected virtual path are inevitable, and as a result, the real world path traversed will differ from the original prediction. These deviations can not only force the search to select a less optimal path in the next iteration, but also in cases cause the users to go off bounds, requiring resets, causing a jarring experience for the user. We propose a method to account for these deviations by modifying the redirection gains per update frame, aiming to keep the user on the intended predicted physical path.},
keywords = {MxR, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Lubetich, Shannon; Sagae, Kenji
Data-driven Measurement of Child Language Development with Simple Syntactic Templates Proceedings Article
In: Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers, pp. 2151 – 2160, Dublin, Ireland, 2014.
Abstract | Links | BibTeX | Tags: The Narrative Group, UARC, Virtual Humans
@inproceedings{lubetich_data-driven_2014,
title = {Data-driven Measurement of Child Language Development with Simple Syntactic Templates},
author = {Shannon Lubetich and Kenji Sagae},
url = {http://ict.usc.edu/pubs/Data-driven%20Measurement%20of%20Child%20Language%20Development%20with%20Simple%20Syntactic%20Templates.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: Technical Papers},
pages = {2151 – 2160},
address = {Dublin, Ireland},
abstract = {When assessing child language development, researchers have traditionally had to choose between easily computable metrics focused on superficial aspects of language, and more expressive metrics that are carefully designed to cover specific syntactic structures and require substantial and tedious labor. Recent work has shown that existing expressive metrics for child language development can be automated and produce accurate results. We go a step further and propose that measurement of syntactic development can be performed automatically in a completely data-driven way without the need for definition of language-specific inventories of grammatical structures. As a crucial step in that direction, we show that four simple feature templates are as expressive of language development as a carefully crafted standard inventory of grammatical structures that is commonly used and has been validated empirically.},
keywords = {The Narrative Group, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ustun, Volkan; Rosenbloom, Paul S.; Sagae, Kenji; Demski, Abram
Distributed Vector Representations of Words in the Sigma Cognitive Architecture Proceedings Article
In: Proceedings of the 7th Conference on Artificial General Intelligence 2014, Québec City, Canada, 2014.
Abstract | Links | BibTeX | Tags: CogArch, Cognitive Architecture, UARC, Virtual Humans
@inproceedings{ustun_distributed_2014,
title = {Distributed Vector Representations of Words in the Sigma Cognitive Architecture},
author = {Volkan Ustun and Paul S. Rosenbloom and Kenji Sagae and Abram Demski},
url = {http://ict.usc.edu/pubs/Distributed%20Vector%20Representations%20of%20Words%20in%20the%20Sigma%20Cognitive%20Architecture.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Conference on Artificial General Intelligence 2014},
address = {Québec City, Canada},
abstract = {Recently reported results with distributed-vector word representations in natural language processing make them appealing for incorporation into a general cognitive architecture like Sigma. This paper describes a new algorithm for learning such word representations from large, shallow information resources, and how this algorithm can be implemented via small modifications to Sigma. The effectiveness and speed of the algorithm are evaluated via a comparison of an external simulation of it with state-of-the-art algorithms. The results from more limited experiments with Sigma are also promising, but more work is required for it to reach the effectiveness and speed of the simulation.},
keywords = {CogArch, Cognitive Architecture, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pahlen, Javier; Jimenez, Jorge; Danvoye, Etienne; Debevec, Paul; Fyffe, Graham; Alexander, Oleg
Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters Proceedings Article
In: SIGGRAPH '14 ACM SIGGRAPH 2014 Courses, pp. 1–384, ACM Press, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2962-0.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{von_der_pahlen_digital_2014,
title = {Digital Ira and Beyond: Creating Photoreal Real-Time Digital Characters},
author = {Javier Pahlen and Jorge Jimenez and Etienne Danvoye and Paul Debevec and Graham Fyffe and Oleg Alexander},
url = {http://ict.usc.edu/pubs/Digial%20Ira%20and%20Beyond%20-%20Creating%20Photoreal%20Real-Time%20Digital%20Characters%20(course%20notes).pdf},
doi = {10.1145/2614028.2615407},
isbn = {978-1-4503-2962-0},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH '14 ACM SIGGRAPH 2014 Courses},
pages = {1–384},
publisher = {ACM Press},
address = {Vancouver, British Columbia, Canada},
abstract = {This course explains a complete process for creating next-generation realtime digital human characters, using the Digital Ira collaboration between USC ICT and Activision as an example, covering highres facial scanning, blendshape rigging, video-based performance capture, animation compression, realtime skin and eye shading, hair, latest results, and future directions.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Hartholt, Arno; Traum, David; Marsella, Stacy; Morency, Louis-Philippe; Shapiro, Ari; Gratch, Jonathan
A Shared, Modular Architecture for Developing Virtual Humans Proceedings Article
In: Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014, pp. 4–7, Boston, MA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{hartholt_shared_2014,
title = {A Shared, Modular Architecture for Developing Virtual Humans},
author = {Arno Hartholt and David Traum and Stacy Marsella and Louis-Philippe Morency and Ari Shapiro and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Shared%20Modular%20Architecture%20for%20Developing%20Virtual%20Humans.pdf},
doi = {10.2390/biecoll-wasiva2014-02},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Workshop on Architectures and Standards for Intelligent Virtual Agents at IVA 2014},
pages = {4–7},
address = {Boston, MA},
abstract = {Realizing the full potential of intelligent virtual agents requires compelling characters that can engage users in meaningful and realistic social interactions, and an ability to develop these characters effectively and efficiently. Advances are needed in individual capabilities, but perhaps more importantly, fundamental questions remain as to how best to integrate these capabilities into a single framework that allows us to efficiently create characters that can engage users in meaningful and realistic social interactions. This integration requires in-depth, inter-disciplinary understanding few individuals, or even teams of individuals, possess.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chatterjee, Moitreya; Park, Sunghyun; Shim, Han Suk; Sagae, Kenji; Morency, Louis-Philippe
Verbal Behaviors and Persuasiveness in Online Multimedia Content Proceedings Article
In: Proceedings of the Second Workshop on Natural Language Processing for Social Media (SocialNLP), pp. 50, Dublin, Ireland, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{chatterjee_verbal_2014,
title = {Verbal Behaviors and Persuasiveness in Online Multimedia Content},
author = {Moitreya Chatterjee and Sunghyun Park and Han Suk Shim and Kenji Sagae and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/Verbal%20Behaviors%20and%20Persuasiveness%20in%20Online%20Multimedia%20Content.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the Second Workshop on Natural Language Processing for Social Media (SocialNLP)},
pages = {50},
address = {Dublin, Ireland},
abstract = {Persuasive communication is an essential component of our daily lives, whether it is negotiating, reviewing a product, or campaigning for the acceptance of a point of view. With the rapid expansion of social media websites such as YouTube, Vimeo and ExpoTV, it is becoming ever more important and useful to understand persuasiveness in social multimedia content. In this paper we present a novel analysis of verbal behavior, based on lexical usage and paraverbal markers of hesitation, in the context of predicting persuasiveness in online multi-media content. Toward the end goal of predicting perceived persuasion, this work also explores the potential differences in verbal behavior of people expressing a positive opinion (e.g., a positive movie review) versus a negative one. The analysis is performed on a multimedia corpus of 1,000 movie review videos annotated for persuasiveness. Our results show that verbal behavior can be a significant predictor of persuasiveness in such online multimedia content.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Jones, Andrew; Unger, Jonas; Nagano, Koki; Busch, Jay; Yu, Xueming; Peng, Hsuan-Yueh; Alexander, Oleg; Debevec, Paul
Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man” Proceedings Article
In: SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques, Vancouver, Canada, 2014.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{jones_creating_2014,
title = {Creating a life-sized automulitscopic Morgan Spurlock for CNNs “Inside Man”},
author = {Andrew Jones and Jonas Unger and Koki Nagano and Jay Busch and Xueming Yu and Hsuan-Yueh Peng and Oleg Alexander and Paul Debevec},
url = {http://ict.usc.edu/pubs/Creating%20a%20life-sized%20automulitscopic%20Morgan%20Spurlock%20for%20CNNs%20%e2%80%9cInside%20Man%e2%80%9d%20(abstract).pdf},
year = {2014},
date = {2014-08-01},
booktitle = {SIGGRAPH 2014 The 41st International Conference and Exhibition on Computer Graphics and Interactive Techniques},
address = {Vancouver, Canada},
abstract = {We present a system for capturing and rendering life-size 3D human subjects on an automultiscopic display. Automultiscopic 3D displays allow a large number of viewers to experience 3D content simultaneously without the hassle of special glasses or head gear.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Malandrakis, Nikolaos; Falcone, Michael; Vaz, Colin; Bisogni, Jesse; Potamianos, Alexandros; Narayanan, Shrikanth
SAIL: Sentiment analysis using semantic similarity and contrast features Proceedings Article
In: Proceedings of SemEval 2014, pp. 512–516, Dublin, Ireland, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{malandrakis_sail_2014,
title = {SAIL: Sentiment analysis using semantic similarity and contrast features},
author = {Nikolaos Malandrakis and Michael Falcone and Colin Vaz and Jesse Bisogni and Alexandros Potamianos and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/SAIL%20-%20Sentiment%20Analysis%20using%20Semantic%20Similarity%20and%20Contrast%20Features.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SemEval 2014},
pages = {512–516},
address = {Dublin, Ireland},
abstract = {This paper describes our submission to SemEval2014 Task 9: Sentiment Analysis in Twitter. Our model is primarily a lexicon based one, augmented by some preprocessing, including detection of Multi-Word Expressions, negation propagation and hashtag expansion and by the use of pairwise semantic similarity at the tweet level. Feature extraction is repeated for sub-strings and contrasting sub-string features are used to better capture complex phenomena like sarcasm. The resulting supervised system, using a Naive Bayes model, achieved high performance in classifying entire tweets, ranking 7th on the main set and 2nd when applied to sarcastic tweets.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Pynadath, David V.; Rosenbloom, Paul S.; Marsella, Stacy C.
Reinforcement Learning for Adaptive Theory of Mind in the Sigma Cognitive Architecture Proceedings Article
In: Proceedings of the 7th Annual Conference on Artificial General Intelligence, pp. 143 – 154, Springer International Publishing, Quebec City, Canada, 2014, ISBN: 978-3-319-09273-7.
Abstract | Links | BibTeX | Tags: Social Simulation, UARC, Virtual Humans
@inproceedings{pynadath_reinforcement_2014,
title = {Reinforcement Learning for Adaptive Theory of Mind in the Sigma Cognitive Architecture},
author = {David V. Pynadath and Paul S. Rosenbloom and Stacy C. Marsella},
url = {http://ict.usc.edu/pubs/Reinforcement%20learning%20for%20adaptive%20Theory%20of%20Mind%20in%20the%20Sigma%20cognitive%20architecture.pdf},
doi = {10.1007/978-3-319-09274-4_14},
isbn = {978-3-319-09273-7},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of the 7th Annual Conference on Artificial General Intelligence},
pages = {143 – 154},
publisher = {Springer International Publishing},
address = {Quebec City, Canada},
abstract = {One of the most common applications of human intelligence is social interaction, where people must make effective decisions despite uncertainty about the potential behavior of others around them. Reinforcement learning (RL) provides one method for agents to acquire knowledge about such interactions. We investigate different methods of multiagent reinforcement learning within the Sigma cognitive architecture. We leverage Sigma’s architectural mechanism for gradient descent to realize four different approaches to multiagent learning: (1) with no explicit model of the other agent, (2) with a model of the other agent as following an unknown stationary policy, (3) with prior knowledge of the other agent’s possible reward functions, and (4) through inverse reinforcement learn- ing (IRL) of the other agent’s reward function. While the first three variations re-create existing approaches from the literature, the fourth represents a novel combination of RL and IRL for social decision-making. We show how all four styles of adaptive Theory of Mind are realized through Sigma’s same gradient descent algorithm, and we illustrate their behavior within an abstract negotiation task.},
keywords = {Social Simulation, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nagano, Koki; Alexander, Oleg; Barbic, Jernej; Debevec, Paul
Measurement and Modeling of Microfacet Distributions under Deformation Proceedings Article
In: Proceedings of SIGDIAL 2014, ACM, Vancouver, British Columbia, Canada, 2014, ISBN: 978-1-4503-2960-6.
Abstract | Links | BibTeX | Tags: Graphics, UARC
@inproceedings{nagano_measurement_2014,
title = {Measurement and Modeling of Microfacet Distributions under Deformation},
author = {Koki Nagano and Oleg Alexander and Jernej Barbic and Paul Debevec},
url = {http://ict.usc.edu/pubs/Measurement%20and%20Modeling%20of%20Microfacet%20Distribution%20under%20Deformation%20(abstract%20for%20talk).pdf},
doi = {10.1145/2614106.2614124},
isbn = {978-1-4503-2960-6},
year = {2014},
date = {2014-08-01},
booktitle = {Proceedings of SIGDIAL 2014},
publisher = {ACM},
address = {Vancouver, British Columbia, Canada},
abstract = {We endeavor to model dynamic microfacet distributions of rough surfaces such as skin to simulate the changes in surface BRDF under stretching and compression. We begin by measuring microfacet distributions at 5-micron scale of several surface patches under controlled deformation. Generally speaking, rough surfaces become flatter and thus shinier as they are pulled tighter, and become rougher under compression. From this data, we build a model of how surface reflectance changes as the material deforms. We then simulate dynamic surface reflectance by modifying the anisotropic roughness parameters of a microfacet distribution model in accordance with animated surface deformations. Furthermore, we directly render such dynamic appearance by driving dynamic micro geometries to demonstrate how they influence the meso-scale surface reflectance.},
keywords = {Graphics, UARC},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Generative Models of Cultural Decision Making for Virtual Agents Based on User’s Reported Values Proceedings Article
In: Intelligent Virtual Agents, pp. 310–315, Springer International Publishing, Boston, MA, 2014, ISBN: 978-3-319-09766-4.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nouri_generative_2014,
title = {Generative Models of Cultural Decision Making for Virtual Agents Based on User’s Reported Values},
author = {Elnaz Nouri and David Traum},
url = {http://link.springer.com/chapter/10.1007/978-3-319-09767-1_39},
doi = {10.1007/978-3-319-09767-1_39},
isbn = {978-3-319-09766-4},
year = {2014},
date = {2014-08-01},
booktitle = {Intelligent Virtual Agents},
pages = {310–315},
publisher = {Springer International Publishing},
address = {Boston, MA},
series = {8637},
abstract = {Building computational models of cultural decision making for virtual agents based on behavioral data is a challenge because finding a reasonable mapping between the statistical data and the computational model is a difficult task. This paper shows how the weights on a multi attribute utility based decision making model can be set according to the values held by people elicited through a survey. If survey data from different cultures is available then this can be done to simulate cultural decision making behavior. We used the survey data of two sets of players from US and India playing the Dictator Game and the Ultimatum Game on-line. Analyzing their reported values in the survey enabled us to set up our model’s parameters based on their culture and simulate their behavior in the Ultimatum Game.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Carnevale, Peter; Gratch, Jonathan
USING VIRTUAL CONFEDERATES TO RESEARCH INTERGROUP BIAS AND CONFLICT Proceedings Article
In: Best Paper Proceedings of the Annual Meeting of the Academy of Management (AOM’14), Philadelphia, PA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{de_melo_using_2014,
title = {USING VIRTUAL CONFEDERATES TO RESEARCH INTERGROUP BIAS AND CONFLICT},
author = {Celso M. Melo and Peter Carnevale and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Using%20Virtual%20Confederates%20to%20Research%20Intergroup%20Bias%20and%20Conflict.pdf},
year = {2014},
date = {2014-08-01},
booktitle = {Best Paper Proceedings of the Annual Meeting of the Academy of Management (AOM’14)},
address = {Philadelphia, PA},
abstract = {Virtual confederates–i.e., three-dimensional virtual characters that look and act like humans–have been gaining in popularity as a research method in the social and medical sciences. Interest in this research method stems from the potential for increased experimental control, ease of replication, facilitated access to broader samples and lower costs. We argue that virtual confederates are also a promising research tool for the study of intergroup behavior. To support this claim we replicate and extend with virtual confederates key findings in the literature. In Experiment 1 we demonstrate that people apply racial stereotypes to virtual confederates, and show a corresponding bias in terms of money offered in the dictator game. In Experiment 2 we show that people also show an in-group bias when group membership is artificially created and based on interdependence through shared payoffs in a nested social dilemma. Our results further demonstrate that social categorization and bias can occur not only when people believe confederates are controlled by humans (i.e., they are avatars), but also when confederates are believed to be controlled by computer algorithms (i.e., they are agents). The results, nevertheless, show a basic bias in favor of avatars (the in-group in the “human category”) to agents (the out-group). Finally, our results (Experiments 2 and 3) establish that people can combine, in additive fashion, the effects of these social categories; a mechanism that, accordingly, can be used to reduce intergroup bias. We discuss implications for research in social categorization, intergroup bias and conflict.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
DeVault, David; Stone, Matthew
Pursuing and Demonstrating Understanding in Dialogue Book Section
In: Natural Language Generation in Interactive Systems, pp. 34–62, Cambridge University Press, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@incollection{devault_pursuing_2014,
title = {Pursuing and Demonstrating Understanding in Dialogue},
author = {David DeVault and Matthew Stone},
url = {http://www.cs.rutgers.edu/ mdstone/pubs/dialogue11.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Natural Language Generation in Interactive Systems},
pages = {34–62},
publisher = {Cambridge University Press},
abstract = {The appeal of dialogue as an interface modality is its ability to support open-nded mixed-initiative interaction. Many systems o⬚er rich and extensive capabilities, but must support infrequent and untrained users. In such cases, it's unreasonable to expect users to know the actions they need in advance, or to be able to specify them using a regimented scheme of commands or menu options. Dialogue o⬚ers the potential for the user to talk through their needs with the system and arrive collaboratively at a feasible solution.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Rosenbloom, Paul S.
Deconstructing Episodic Memory and Learning in Sigma Proceedings Article
In: Proceedings of the 36th Annual Conference of the Cognitive Science Society, Cognitive Science Society, Quebec City, Canada, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{rosenbloom_deconstructing_2014,
title = {Deconstructing Episodic Memory and Learning in Sigma},
author = {Paul S. Rosenbloom},
url = {http://ict.usc.edu/pubs/Deconstructing%20Reinforcement%20Learning%20in%20Sigma.pdf},
year = {2014},
date = {2014-07-01},
booktitle = {Proceedings of the 36th Annual Conference of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Quebec City, Canada},
abstract = {In an experiment in functional elegance, episodic memory and learning have been deconstructed in the Sigma cognitive architecture in terms of pre-existing memory and learning mechanisms plus a template-based structure generator. As a side effect, base-level activation also becomes deconstructed in terms of a learned temporal prior.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Hartholt, Arno; Grimani, Mario; Leeds, Andrew; Liewer, Matt
Virtual Reality Exposure Therapy for Combat-Related Posttraumatic Stress Disorder Journal Article
In: IEEE Computer Society, vol. 47, Issue 7, no. 7, pp. 31–37, 2014.
Abstract | Links | BibTeX | Tags: MedVR, UARC, Virtual Humans
@article{rizzo_virtual_2014,
title = {Virtual Reality Exposure Therapy for Combat-Related Posttraumatic Stress Disorder},
author = {Albert Rizzo and Arno Hartholt and Mario Grimani and Andrew Leeds and Matt Liewer},
url = {http://ict.usc.edu/pubs/Virtual%20Reality%20Exposure%20Therapy%20for%20Treating%20Combat-Related%20PTSD.pdf},
year = {2014},
date = {2014-07-01},
journal = {IEEE Computer Society},
volume = {47, Issue 7},
number = {7},
pages = {31–37},
abstract = {Virtual reality (VR) technology is rapidly evolving to support prolonged exposure (PE) therapy, a proven treatment for combat-related posttraumatic stress disorder. Building on the successful 2007 Virtual Iraq/Afghanistan VRET system, a team of behavioral scientists, software engineers, and virtual artists has created Bravemind, a flexible VR system that offers significantly enhanced PE treatment possibilities. The first Web extra at http://youtu.be/EiYg-kMNMtQ is a video demonstration of an original early virtual reality exposure therapy (VRET) prototype that shows a small section of an Iraqi city with a landing helicopter (2004). The second Web extra at http://youtu.be/_cS-ynWZmeQ is a video demonstration of virtual reality exposure therapy (VRET) that simulates driving a Humvee in a rural part of Iraq, showcasing several encounters, including IED and road-side attacks (2007). The third Web extra at http://youtu.be/78QXX_F4mc8 is a video demonstration of virtual reality exposure therapy (VRET) that simulates an overview of several Iraqi city areas (2007). The fourth Web extra at http://youtu.be/_AnixslkVLU is a video demonstration of virtual reality exposure therapy (VRET) that simulates a patrol entering interior buildings in Iraq (2007). The fifth Web extra at http://youtu.be/S22aQ-DqKKU is a video demonstration of an original virtual reality exposure therapy (VRET) tablet interface that allows the clinician to change virtual reality settings and trigger encounters (2007). The sixth Web extra at http://youtu.be/C-fspuLo4vw is a video demonstration of the Bravemind virtual reality exposure therapy (VRET) prototype showing a variety of driving and dismounted scenarios with encounters in Iraq and Afghanistan (2013). The sixth Web extra at http://youtu.be/HSPDomDAigg is a video collection of Iraqi and Afghanistan virtual reality exposure therapy (VRET) scenarios within the Bravemind prototype (2013).},
keywords = {MedVR, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; Traum, David
SAWDUST: a Semi-Automated Wizard Dialogue Utterance Selection Tool for domain-independent large-domain dialogue Proceedings Article
In: SIGDIAL 2014 Conference, Association for Computational Linguistics, Philadelphia, PA, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{gandhe_sawdust_2014,
title = {SAWDUST: a Semi-Automated Wizard Dialogue Utterance Selection Tool for domain-independent large-domain dialogue},
author = {Sudeep Gandhe and David Traum},
url = {http://ict.usc.edu/pubs/SAWDUST%20-%20a%20Semi-Automated%20Wizard%20Dialogue%20Utterance%20Selection%20Tool%20for%20domain-independent%20large-domain%20dialogue.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {SIGDIAL 2014 Conference},
publisher = {Association for Computational Linguistics},
address = {Philadelphia, PA},
abstract = {We present a tool that allows human wizards to select appropriate response utterances for a given dialogue context from a set of utterances observed in a dialogue corpus. Such a tool can be used in Wizard-of-Oz studies and for collecting data which can be used for training and/or evaluating automatic dialogue models. We also propose to incorporate such automatic dialogue models back into the tool as an aid in selecting utterances from a large dialogue corpus. The tool allows a user to rank candidate utterances for selection according to these automatic models.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Nouri, Elnaz; Traum, David
Initiative Taking in Negotiation Proceedings Article
In: Proceedings of the 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL), pp. 186–193, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{nouri_initiative_2014,
title = {Initiative Taking in Negotiation},
author = {Elnaz Nouri and David Traum},
url = {http://ict.usc.edu/pubs/Initiative%20Taking%20in%20Negotiation.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {Proceedings of the 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL)},
pages = {186–193},
abstract = {We examine the relationship between initiative behavior in negotiation dialogues and the goals and outcomes of the negotiation. We propose a novel annotation scheme for dialogue initiative, including four labels for initiative and response behavior in a dialogue turn. We annotate an existing human-human negotiation dataset, and use initiative-based features to try to predict both negotiation goal and outcome, comparing our results to prior work using other (non-initiative) features sets. Results show that combining initiative features with other features leads to improvements over either set and a majority class baseline.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Morbini, Fabrizio; DeVault, David; Georgila, Kallirroi; Artstein, Ron; Traum, David; Morency, Louis-Philippe
A Demonstration of Dialogue Processing in SimSensei Kiosk Proceedings Article
In: 15th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 254, 2014.
Abstract | Links | BibTeX | Tags: UARC, Virtual Humans
@inproceedings{morbini_demonstration_2014,
title = {A Demonstration of Dialogue Processing in SimSensei Kiosk},
author = {Fabrizio Morbini and David DeVault and Kallirroi Georgila and Ron Artstein and David Traum and Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/A%20Demonstration%20of%20Dialogue%20Processing%20in%20SimSensei%20Kiosk.pdf},
year = {2014},
date = {2014-06-01},
booktitle = {15th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {254},
abstract = {This demonstration highlights the dialogue processing in SimSensei Kiosk, a virtual human dialogue system that con- ducts interviews related to psychological distress conditions such as depression, anxiety, and post-traumatic stress disorder (PTSD). The dialogue processing in SimSensei Kiosk allows the system to con- duct coherent spoken interviews of human users that are 15-25 minutes in length, and in which users feel comfortable talking and openly sharing information. We present the design of the individual dialogue components, and show examples of natural conversation flow between the sys- tem and users, including expressions of empathy, follow-up responses and continuation prompts, and turn-taking.},
keywords = {UARC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}