Publications
Search
Chollet, Mathieu; Massachi, Talie; Scherer, Stefan
Racing Heart and Sweaty Palms What Influences Users’ Self-Assessments and Physiological Signals When Interacting With Virtual Audiences? Proceedings Article
In: Proceedings of the International Conference on Intelligent Virtual Agents, pp. 83–86, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{chollet_racing_2017,
title = {Racing Heart and Sweaty Palms What Influences Users’ Self-Assessments and Physiological Signals When Interacting With Virtual Audiences?},
author = {Mathieu Chollet and Talie Massachi and Stefan Scherer},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_9},
doi = {10.1007/978-3-319-67401-8_9},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the International Conference on Intelligent Virtual Agents},
volume = {10498},
pages = {83–86},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {In psychotherapy, virtual audiences have been shown to promote successful outcomes when used to help treating public speaking anxiety. Additionally, early experiments have shown its potential to help improve public speaking ability. However, it is still unclear to what extent certain factors, such as audience non-verbal behaviors, impact users when interacting with a virtual audience. In this paper, we design an experimental study to investigate users’ self-assessments and physiological states when interacting with a virtual audience. Our results showed that virtual audience behaviors did not influence participants self-assessments or physiological responses, which were instead predominantly determined by participants’ prior anxiety levels.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Lucas, Gale; Gratch, Jonathan
To Tell the Truth: Virtual Agents and Morning Morality Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents, pp. 283–286, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{mozgai_tell_2017,
title = {To Tell the Truth: Virtual Agents and Morning Morality},
author = {Sharon Mozgai and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_37},
doi = {10.1007/978-3-319-67401-8_37},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
pages = {283–286},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {This paper investigates the impact of time of day on truthfulness in human-agent interactions. Time of day has been found to have important implications for moral behavior in human-human interaction. Namely, the morning morality effect shows that people are more likely to act ethically (i.e., tell fewer lies) in the morning than in the afternoon. Based on previous work on disclosure and virtual agents, we propose that this effect will not bear out in human-agent interactions. Preliminary evaluation shows that individuals who lie when engaged in multi-issue bargaining tasks with the Conflict Resolution Agent, a semi-automated virtual human, tell more lies to human negotiation partners than virtual agent negotiation partners in the afternoon and are more likely to tell more lies in the afternoon than in the morning when they believe they are negotiating with a human. Time of day does not have a significant effect on the amount of lies told to the virtual agent during the multi-issue bargaining task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nazari, Zahra; Lucas, Gale; Gratch, Jonathan
Fixed-pie Lie in Action Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents, pp. 287–300, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{nazari_fixed-pie_2017,
title = {Fixed-pie Lie in Action},
author = {Zahra Nazari and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_38},
doi = {10.1007/978-3-319-67401-8_38},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
volume = {10498},
pages = {287–300},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {Negotiation is a crucial skill for socially intelligent agents. Sometimes negotiators lie to gain advantage. In particular, they can claim that they want the same thing as their opponents (i.e., use a “fixed-pie lie”) to gain an advantage while appearing fair. The current work is the first attempt to examine effectiveness of this strategy when used by agents against humans in realistic negotiation settings. Using the IAGO platform, we show that the exploitative agent indeed wins more points while appearing fair and honest to its opponent. In a second study, we investigated how far the exploitative agents could push for more gain and examined their effect on people’s behavior. This study shows that even though exploitative agents gained high value in short-term, their long-term success remains questioned as they left their opponents unhappy and unsatisfied.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Hyunh, Loc; Wang, Shanhe; Debevec, Paul
Modeling vellus facial hair from asperity scattering silhouettes Proceedings Article
In: Proceedings of SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5008-2.
@inproceedings{legendre_modeling_2017,
title = {Modeling vellus facial hair from asperity scattering silhouettes},
author = {Chloe LeGendre and Loc Hyunh and Shanhe Wang and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?doid=3084363.3085057},
doi = {10.1145/3084363.3085057},
isbn = {978-1-4503-5008-2},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for modeling the vellus hair over the face based on observations of asperity scattering along a subject's silhouette. We photograph the backlit subject in profile and three-quarters views with a high-resolution DSLR camera to observe the vellus hair on the side and front of the face and separately acquire a 3D scan of the face geometry and texture. We render a library of backlit vellus hair patch samples with different geometric parameters such as density, orientation, and curvature, and we compute image statistics for each set of parameters. We trace the silhouette contour in each face image and straighten the backlit hair silhouettes using image resampling. We compute image statistics for each section of the facial silhouette and determine which set of hair modeling parameters best matches the statistics. We then generate a complete set of vellus hairs for the face by interpolating and extrapolating the matched parameters over the skin. We add the modeled vellus hairs to the 3D facial scan and generate renderings under novel lighting conditions, generally matching the appearance of real photographs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mell, Johnathan; Lucas, Gale; Gratch, Jonathan
Prestige Questions, Online Agents, and Gender-Driven Differences in Disclosure Proceedings Article
In: Proceedings of the 17th International Conference on Intelligent Virtual Agents, pp. 273–282, Springer International Publishing, Stockholm, Sweden, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@inproceedings{mell_prestige_2017,
title = {Prestige Questions, Online Agents, and Gender-Driven Differences in Disclosure},
author = {Johnathan Mell and Gale Lucas and Jonathan Gratch},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_36},
doi = {10.1007/978-3-319-67401-8_36},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 17th International Conference on Intelligent Virtual Agents},
pages = {273–282},
publisher = {Springer International Publishing},
address = {Stockholm, Sweden},
abstract = {This work considers the possibility of using virtual agents to encourage disclosure for sensitive information. In particular, this research used “prestige questions”, which asked participants to disclose information relevant to their socioeconomic status, such as credit limit, as well as university attendance, and mortgage or rent payments they could afford. We explored the potential for agents to enhance disclosure compared to conventional web-forms, due to their ability to serve as relational agents by creating rapport. To consider this possibility, agents were framed as artificially intelligent versus avatars controlled by a real human, and we compared these conditions to a version of the financial questionnaire with no agent. In this way, both the perceived agency of the agent and its ability to generate rapport were tested. Additionally, we examined the differences in disclosure between men and women in these conditions. Analyses reveled that agents (either AI- or human-framed) evoked greater disclosure compared to the no agent condition. However, there was some evidence that human-framed agents evoked greater lying. Thus, users in general responded more socially to the presence of a human- or AI-framed agent, and the benefits and costs of this approach were made apparent. The results are discussed in terms of rapport and anonymity.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Leuski, Anton; Artstein, Ron
Lessons in Dialogue System Deployment Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 352–355, Association for Computational Linguistics, Saarbruecken Germany, 2017.
@inproceedings{leuski_lessons_2017,
title = {Lessons in Dialogue System Deployment},
author = {Anton Leuski and Ron Artstein},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {352–355},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We analyze deployment of an interactive dialogue system in an environment where deep technical expertise might not be readily available. The initial version was created using a collection of research tools. We summarize a number of challenges with its deployment at two museums and describe a new system that simplifies the installation and user interface; reduces reliance on 3rd-party software; and provides a robust data collection mechanism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Foots, Ashley; Hayes, Cory; Henry, Cassidy; Pollard, Kimberly; Artstein, Ron; Voss, Clare; Traum, David
Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task Proceedings Article
In: Proceedings of the First Workshop on Language Grounding for Robotics, pp. 58–66, Association for Computational Linguistics, Vancouver, Canada, 2017.
@inproceedings{marge_exploring_2017,
title = {Exploring Variation of Natural Human Commands to a Robot in a Collaborative Navigation Task},
author = {Matthew Marge and Claire Bonial and Ashley Foots and Cory Hayes and Cassidy Henry and Kimberly Pollard and Ron Artstein and Clare Voss and David Traum},
url = {http://www.aclweb.org/anthology/W17-2808},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the First Workshop on Language Grounding for Robotics},
pages = {58–66},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Robot-directed communication is variable, and may change based on human perception of robot capabilities. To collect training data for a dialogue system and to investigate possible communication changes over time, we developed a Wizard-of-Oz study that (a) simulates a robot’s limited understanding, and (b) collects dialogues where human participants build a progressively better mental model of the robot’s understanding. With ten participants, we collected ten hours of human-robot dialogue. We analyzed the structure of instructions that participants gave to a remote robot before it responded. Our findings show a general initial preference for including metric information (e.g., move forward 3 feet) over landmarks (e.g., move to the desk) in motion commands, but this decreased over time, suggesting changes in perception.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Brixey, Jacqueline; Hoegen, Rens; Lan, Wei; Rusow, Joshua; Singla, Karan; Yin, Xusen; Artstein, Ron; Leuski, Anton
SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS Proceedings Article
In: Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pp. 370–373, Association for Computational Linguistics, Saarbruecken Germany, 2017.
@inproceedings{brixey_shihbot_2017,
title = {SHIHbot: A Facebook chatbot for Sexual Health Information on HIV/AIDS},
author = {Jacqueline Brixey and Rens Hoegen and Wei Lan and Joshua Rusow and Karan Singla and Xusen Yin and Ron Artstein and Anton Leuski},
url = {http://www.sigdial.org/workshops/conference18/proceedings/index.html},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGDIAL 2017 Conference: the 18th Annual Meeting of the Special Interest Group on Discourse and Dialogue},
pages = {370–373},
publisher = {Association for Computational Linguistics},
address = {Saarbruecken Germany},
abstract = {We present the implementation of an autonomous chatbot, SHIHbot, deployed on Facebook, which answers a wide variety of sexual health questions on HIV/AIDS. The chatbot's response database is compiled from professional medical and public health resources in order to provide reliable information to users. The system's backend is NPCEditor, a response selection platform trained on linked questions and answers; to our knowledge this is the first retrieval-based chatbot deployed on a large public social network.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Traum, David
An Incremental Response Policy in an Automatic Word-Game Proceedings Article
In: Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions, Stockholm, Sweden, 2017.
@inproceedings{pincus_incremental_2017,
title = {An Incremental Response Policy in an Automatic Word-Game},
author = {Eli Pincus and David Traum},
url = {http://people.ict.usc.edu/ traum/Papers/pincus_traum-cihai2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of IVA 2017 Workshop on Conversational Interruptions in Human-Agent Interactions},
address = {Stockholm, Sweden},
abstract = {Turn-taking is an important aspect of human-human and human-computer interaction. Rapid turn-taking is a feature of human-human interaction that is difficult for today’s dialogue systems to emulate. For example, typical humanhuman interactions can involve an original sending interlocutor changing or stopping their speech mid-utterance as a result of overlapping speech from the other interlocutor. The overlapping utterances from the other interlocutor are typically called barge-in utterances. An example of this phenomena is seen in the two turns of dialogue in the top half of Figure 1. In this dialogue segment Student A first reveals his test score in the original utterance. Student A then begins to tell student B that he had heard Student B got a perfect score. Student B interrupts Student A with a barge-in utterance that contains new information (that actually he had not performed well on the test) causing Student A to halt his speech and not finish his original utterance. We call the unspoken part of student A’s original utterance Student A’s originally intended utterance. Student A then makes a decision based on the new information to not say his originally intended utterance. This is likely due to the originally intended utterance no longer being appropriate considering the new information made available to Student A. Student A then makes an intelligent next choice of what to say which can be seen in Student A’s updated utterance which takes into account the new information contained in Student B’s barge-in utterance. In this work we refer to Student A’s dialogue move as an intelligent update.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Shapiro, Ari; Schwartz, David; Lewine, Gabrielle; Feng, Andrew Wei-Wen
Virtual Role-Play with Rapid Avatars Book Section
In: Intelligent Virtual Agents, vol. 10498, pp. 463–466, Springer International Publishing, Cham, Switzerland, 2017, ISBN: 978-3-319-67400-1 978-3-319-67401-8.
@incollection{wang_virtual_2017,
title = {Virtual Role-Play with Rapid Avatars},
author = {Ning Wang and Ari Shapiro and David Schwartz and Gabrielle Lewine and Andrew Wei-Wen Feng},
url = {http://link.springer.com/10.1007/978-3-319-67401-8_59},
isbn = {978-3-319-67400-1 978-3-319-67401-8},
year = {2017},
date = {2017-08-01},
booktitle = {Intelligent Virtual Agents},
volume = {10498},
pages = {463–466},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Digital doppelgangers possess great potential to serve as powerful models for behavioral change. An emerging technology, the Rapid Avatar Capture and Simulation (RACAS), enables low-cost and high-speed scanning of a human user and creation of a digital doppelganger that is a fully animatable virtual 3D model of the user. We designed a virtual role-playing game, DELTA, with digital doppelgangers to influence a human user’s attitude to-wards sexism on college campuses. In this demonstration, we will showcase the RACAS system and the DELTA game.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Mitros, Piotr; Schunn, Christian; Foltz, Peter W.; Gasevic, Dragan; Katz, Irvin R.
Why Assess? The Role of Assessment in Learning Science and Society Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 5- Assessment, vol. 5, pp. 189–202, US Army Research Laboratory, Orlando, FL, 2017, ISBN: 978-0-9977257-2-8.
@incollection{benjamin_d_nye_why_2017,
title = {Why Assess? The Role of Assessment in Learning Science and Society},
author = {Benjamin D. Nye and Piotr Mitros and Christian Schunn and Peter W. Foltz and Dragan Gasevic and Irvin R. Katz},
url = {https://books.google.com/books?id=5tsyDwAAQBAJ&pg=PA189&source=gbs_toc_r&cad=4#v=onepage&q&f=false},
isbn = {978-0-9977257-2-8},
year = {2017},
date = {2017-08-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 5- Assessment},
volume = {5},
pages = {189–202},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Even though assessment often is imperfect, it provides valuable input to the process of teaching, learning, and educational resource design. However, narrow assessment, especially used in high-stakes settings, can lead to worse educational outcomes (e.g., performance in later courses, workplace, or social settings; Hout & Elliott, 2011). Teachers may have a strong incentive to teach to the test, leading to a strong focus on memorization and rote procedural knowledge, while compromising key skills such as empathy, groupwork, mathematical maturity, and analytical reasoning. These are thorny problems – education shapes the skills1 that shape society, so these questions have broad implications. With that said, by constraining the discussion to the kinds of constructs considered when building learning experiences, the goals of assessment become more tractable.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Roemmele, Melissa; Gordon, Andrew S.; Swanson, Reid
Evaluating Story Generation Systems Using Automated Linguistic Analyses Proceedings Article
In: Proceedings of the SIGKDD-2017 Workshop on Machine Learning for Creativity, ACM, Halifax, Nova Scotia, Canada, 2017.
@inproceedings{roemmele_evaluating_2017,
title = {Evaluating Story Generation Systems Using Automated Linguistic Analyses},
author = {Melissa Roemmele and Andrew S. Gordon and Reid Swanson},
url = {http://people.ict.usc.edu/ roemmele/publications/fiction_generation.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the SIGKDD-2017 Workshop on Machine Learning for Creativity},
publisher = {ACM},
address = {Halifax, Nova Scotia, Canada},
abstract = {Story generation is a well-recognized task in computational creativity research, but one that can be di cult to evaluate empirically. It is often ine cient and costly to rely solely on human feedback for judging the quality of generated stories. We address this by examining the use of linguistic analyses for automated evaluation, using metrics from existing work on predicting writing quality. We apply these metrics speci cally to story continuation, where a model is given the beginning of a story and generates the next sentence, which is useful for systems that interactively support authors' creativity in writing. We compare sentences generated by different existing models to human-authored ones according to the analyses. The results show some meaningful dfferences between the models, suggesting that this evaluation approach may be advantageous for future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Roemmele, Melissa; Mardo, Paola; Gordon, Andrew S.
Natural-language Interactive Narratives in Imaginal Exposure Therapy for Obsessive-Compulsive Disorder Proceedings Article
In: Proceedings of the Computational Linguistics and Clinical Psychology Workshop (CLPsych), pp. 48–57, Association for Computational Linguistics, Vancouver, Canada, 2017.
@inproceedings{roemmele_natural-language_2017,
title = {Natural-language Interactive Narratives in Imaginal Exposure Therapy for Obsessive-Compulsive Disorder},
author = {Melissa Roemmele and Paola Mardo and Andrew S. Gordon},
url = {http://www.aclweb.org/anthology/W17-31#page=58},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the Computational Linguistics and Clinical Psychology Workshop (CLPsych)},
pages = {48–57},
publisher = {Association for Computational Linguistics},
address = {Vancouver, Canada},
abstract = {Obsessive-compulsive disorder (OCD) is an anxiety-based disorder that affects around 2.5% of the population. A common treatment for OCD is exposure therapy, where the patient repeatedly confronts a feared experience, which has the long-term effect of decreasing their anxiety. Some exposures consist of reading and writing stories about an imagined anxiety-provoking scenario. In this paper, we present a technology that enables patients to interactively contribute to exposure stories by supplying natural language input (typed or spoken) that advances a scenario. This interactivity could potentially increase the patient’s sense of immersion in an exposure and contribute to its success. We introduce the NLP task behind processing inputs to predict new events in the scenario, and describe our initial approach. We then illustrate the future possibility of this work with an example of an exposure scenario authored with our application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Manuvinakurike, Ramesh; DeVault, David; Georgila, Kallirroi
Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game Proceedings Article
In: Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, SIGDIAL, Saarbruecken Germany, 2017.
@inproceedings{manuvinakurike_using_2017,
title = {Using Reinforcement Learning to Model Incrementality in a Fast-Paced Dialogue Game},
author = {Ramesh Manuvinakurike and David DeVault and Kallirroi Georgila},
url = {http://www.manuvinakurike.com/papers/eve-2017.pdf},
year = {2017},
date = {2017-08-01},
booktitle = {Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue},
publisher = {SIGDIAL},
address = {Saarbruecken Germany},
abstract = {We apply Reinforcement Learning (RL) to the problem of incremental dialogue policy learning in the context of a fast-paced dialogue game. We compare the policy learned by RL with a high performance baseline policy which has been shown to perform very efficiently (nearly as well as humans) in this dialogue game. The RL policy outperforms the baseline policy in offline simulations (based on real user data). We provide a detailed comparison of the RL policy and the baseline policy, including information about how much effort and time it took to develop each one of them. We also highlight the cases where the RL policy performs better, and show that understanding the RL policy can provide valuable insights which can inform the creation of an even better rule-based policy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Sayan; Chollet, Mathieu; Laksana, Eugene; Morency, Louis-Philippe; Scherer, Stefan
Affect-LM: A Neural Language Model for Customizable Affective Text Generation Proceedings Article
In: Proceedings of the Annual Meeting of the Association for Computational Linguistics 2017, arxiv.org, Vancouver, Canada, 2017.
@inproceedings{ghosh_affect-lm_2017,
title = {Affect-LM: A Neural Language Model for Customizable Affective Text Generation},
author = {Sayan Ghosh and Mathieu Chollet and Eugene Laksana and Louis-Philippe Morency and Stefan Scherer},
url = {https://arxiv.org/pdf/1704.06851.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the Annual Meeting of the Association for Computational Linguistics 2017},
publisher = {arxiv.org},
address = {Vancouver, Canada},
abstract = {Human verbal communication includes affective messages which are conveyed through use of emotionally colored words. There has been a lot of research in this direction but the problem of integrating state-of-the-art neural language models with affective information remains an area ripe for exploration. In this paper, we propose an extension to an LSTM (Long Short-Term Memory) language model for generating conversational text, conditioned on affect categories. Our proposed model, Affect-LM enables us to customize the degree of emotional content in generated sentences through an additional design parameter. Perception studies conducted using Amazon Mechanical Turk show that Affect- LM generates naturally looking emotional sentences without sacrificing grammatical correctness. Affect-LM also learns affectdiscriminative word representations, and perplexity experiments show that additional affective information in conversational text can improve language model prediction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
LeGendre, Chloe; Krissman, David; Debevec, Paul
Improved Chromakey of Hair Strands via Orientation Filter Convolution Proceedings Article
In: Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017, pp. 1–2, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5015-0.
@inproceedings{legendre_improved_2017,
title = {Improved Chromakey of Hair Strands via Orientation Filter Convolution},
author = {Chloe LeGendre and David Krissman and Paul Debevec},
url = {http://dl.acm.org/citation.cfm?id=3102200},
doi = {10.1145/3102163.3102200},
isbn = {978-1-4503-5015-0},
year = {2017},
date = {2017-07-01},
booktitle = {Proceeding of SIGGRAPH '17 ACM SIGGRAPH 2017},
pages = {1–2},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a technique for improving the alpha maing of challenging green-screen video sequences involving hair strands. As hair strands are thin and can be semi-translucent, they are especially hard to separate from a background. However, they appear as extended lines and thus have a strong response when convolved with oriented filters, even in the presence of noise. We leverage this oriented filter response to robustly locate hair strands within each frame of an actor’s performance filmed in front of a green-screen. We demonstrate using production video footage that individual hair fibers excluded from a coarse artist’s matte can be located and then added to the foreground element, qualitatively improving the composite result without added manual labor.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Laine, Samuli; Karras, Tero; Aila, Timo; Herva, Antti; Saito, Shunsuke; Yu, Ronald; Li, Hao; Lehtinen, Jaakko
Production-level facial performance capture using deep convolutional neural networks Proceedings Article
In: Proceedings of the ACM SIGGRAPH / Eurographics Symposium on Computer Animation, pp. 1–10, ACM Press, Los Angeles, CA, 2017, ISBN: 978-1-4503-5091-4.
@inproceedings{laine_production-level_2017,
title = {Production-level facial performance capture using deep convolutional neural networks},
author = {Samuli Laine and Tero Karras and Timo Aila and Antti Herva and Shunsuke Saito and Ronald Yu and Hao Li and Jaakko Lehtinen},
url = {http://dl.acm.org/citation.cfm?doid=3099564.3099581},
doi = {10.1145/3099564.3099581},
isbn = {978-1-4503-5091-4},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the ACM SIGGRAPH / Eurographics Symposium on Computer Animation},
pages = {1–10},
publisher = {ACM Press},
address = {Los Angeles, CA},
abstract = {We present a real-time deep learning framework for video-based facial performance capture—the dense 3D tracking of an actor's face given a monocular video. Our pipeline begins with accurately capturing a subject using a high-end production facial capture pipeline based on multi-view stereo tracking and artist-enhanced animations. With 5–10 minutes of captured footage, we train a convolutional neural network to produce high-quality output, including self-occluded regions, from a monocular video sequence of that subject. Since this 3D facial performance capture is fully automated, our system can drastically reduce the amount of labor involved in the development of modern narrative-driven video games or films involving realistic digital doubles of actors and potentially hours of animated dialogue per character. We compare our results with several state-of-the-art monocular real-time facial capture techniques and demonstrate compelling animation inference in challenging areas such as eyes and lips.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Powell, Wendy; Sharkey, Paul; Rizzo, Albert; Merrick, Joav (Ed.)
Virtual reality: recent advances for health and wellbeing Book
Nova Science Publishers, New York, NY, 2017, ISBN: 978-1-5361-2454-5.
@book{powell_virtual_2017,
title = {Virtual reality: recent advances for health and wellbeing},
editor = {Wendy Powell and Paul Sharkey and Albert Rizzo and Joav Merrick},
url = {https://researchportal.port.ac.uk/portal/en/publications/virtual-reality(f56cf1d4-6f04-4cda-84f3-b8bfacf585af)/export.html},
isbn = {978-1-5361-2454-5},
year = {2017},
date = {2017-07-01},
publisher = {Nova Science Publishers},
address = {New York, NY},
abstract = {Virtual reality and human interaction with it is a complex topic, and certainly not one which will be mastered overnight; but across the world, there is excellent research being carried out for all of these important domains. As humanity extends its understanding of the interplay with these system components, developers will be well-positioned to design better and more effective virtual reality interventions and come closer to realising the full potential of virtual reality for health and well-being. In this book, the authors present a number of short papers from research groups around the world working in this important and complex field. The chapters explore a range of issues, suggesting routes forward and offering insights into both the potential and the challenges of this rapidly maturing technology.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Cassidy, Henry; Moolchandani, Pooja; Pollard, Kimberly A.; Bonial, Claire; Foots, Ashley; Artstein, Ron; Hayes, Cory; Voss, Claire R.; Traum, David; Marge, Matthew
Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld Proceedings Article
In: Proceedings of the WiNLP workshop, Vancouver, Canada, 2017.
@inproceedings{cassidy_towards_2017,
title = {Towards Efficient Human-Robot Dialogue Collection: Moving Fido into the VirtualWorld},
author = {Henry Cassidy and Pooja Moolchandani and Kimberly A. Pollard and Claire Bonial and Ashley Foots and Ron Artstein and Cory Hayes and Claire R. Voss and David Traum and Matthew Marge},
url = {http://www.winlp.org/wp-content/uploads/2017/final_papers_2017/52_Paper.pdf},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the WiNLP workshop},
address = {Vancouver, Canada},
abstract = {Our research aims to develop a natural dialogue interface between robots and humans. We describe two focused efforts to increase data collection efficiency towards this end: creation of an annotated corpus of interaction data, and a robot simulation, allowing greater flexibility in when and where we can run experiments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Saito, Shunsuke; Wei, Lingyu; Hu, Liwen; Nagano, Koki; Li, Hao
Photorealistic Facial Texture Inference Using Deep Neural Networks Proceedings Article
In: Proceedings of the 30th IEEE International Conference on Computer Vision and Pattern Recognition 2017 (CVPR 2017), IEEE, Honolulu, HI, 2017.
@inproceedings{saito_photorealistic_2017,
title = {Photorealistic Facial Texture Inference Using Deep Neural Networks},
author = {Shunsuke Saito and Lingyu Wei and Liwen Hu and Koki Nagano and Hao Li},
url = {https://arxiv.org/abs/1612.00523},
year = {2017},
date = {2017-07-01},
booktitle = {Proceedings of the 30th IEEE International Conference on Computer Vision and Pattern Recognition 2017 (CVPR 2017)},
publisher = {IEEE},
address = {Honolulu, HI},
abstract = {We present a data-driven inference method that can synthesize a photorealistic texture map of a complete 3D face model given a partial 2D view of a person in the wild. After an initial estimation of shape and low-frequency albedo, we compute a high-frequency partial texture map, without the shading component, of the visible face area. To extract the fine appearance details from this incomplete input, we introduce a multi-scale detail analysis technique based on midlayer feature correlations extracted from a deep convolutional neural network. We demonstrate that fitting a convex combination of feature correlations from a high-resolution face database can yield a semantically plausible facial detail description of the entire face. A complete and photorealistic texture map can then be synthesized by iteratively optimizing for the reconstructed feature correlations. Using these high-resolution textures and a commercial rendering framework, we can produce high-fidelity 3D renderings that are visually comparable to those obtained with state-of-theart multi-view face capture systems. We demonstrate successful face reconstructions from a wide range of low resolution input images, including those of historical figures. In addition to extensive evaluations, we validate the realism of our results using a crowdsourced user study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2009
Parsons, Thomas D.; Kenny, Patrick G.; Cosand, Louise; Iyer, Arvind; Courtney, Chris; Rizzo, Albert
A Virtual Human Agent for Assessing Bias in Novice Therapists Journal Article
In: Medicine Meets Virtual Reality, vol. 17, pp. 253–258, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{parsons_virtual_2009,
title = {A Virtual Human Agent for Assessing Bias in Novice Therapists},
author = {Thomas D. Parsons and Patrick G. Kenny and Louise Cosand and Arvind Iyer and Chris Courtney and Albert Rizzo},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Human%20Agent%20for%20Assessing%20Bias%20in%20Novice%20Therapists.pdf},
doi = {10.3233/978-1-58603-964-6-253},
year = {2009},
date = {2009-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {17},
pages = {253–258},
abstract = {Monitoring the psychological and physiological activity of persons interacting with virtual humans poses exacting measurement challenges. Three experiments are reported in this paper. In these experiments we made use of Virtual Human Agent technology to assess persons' psychological and physiological responses to Virtual Standardized Patients. The first experiment provided support for the usability of the Virtual Standardized Patients through the use of a virtual character emulating an adolescent male with conduct disorder. In the second experiment we further developed the technology and aimed at assessing whether novice mental health clinicians could conduct an interview with a virtual character that emulates an adolescent female who has recently been physically traumatized. The third experiment looked at the usability of Virtual Standardized Patients for eliciting psychophysiological responses following exposure to virtual humans representing different ethnicities.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Melo, Celso M.; Gratch, Jonathan
Expression of Emotions using Wrinkles, Blushing, Sweating and Tears Proceedings Article
In: Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), Amsterdam, The Netherlands, 2009, ISBN: 978-3-642-04379-6.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{de_melo_expression_2009,
title = {Expression of Emotions using Wrinkles, Blushing, Sweating and Tears},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Expression%20of%20Emotions%20using%20Wrinkles%20Blushing%20Sweating%20and%20Tears.pdf},
doi = {10.1007/978-3-642-04380-2_23},
isbn = {978-3-642-04379-6},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {Wrinkles, blushing, sweating and tears are physiological manifestations of emotions in humans. Therefore, the simulation of these phenomena is important for the goal of building believable virtual humans which interact naturally and effectively with humans. This paper describes a real-time model for the simulation of wrinkles, blushing, sweating and tears. A study is also conducted to assess the influence of the model on the perception of surprise, sadness, anger, shame, pride and fear. The study follows a repeatedmeasures design where subjects compare how well is each emotion expressed by virtual humans with or without these phenomena. The results reveal a significant positive effect on the perception of surprise, sadness, anger, shame and fear. The relevance of these results is discussed for the fields of virtual humans and expression of emotions.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Chu, Selina; Narayanan, Shrikanth; Kuo, C. -C. Jay
A Semi-Supervised Learning Appproach to Online Audio Background Detection Proceedings Article
In: Proceedings of ICASSP 2009, Taipei, Taiwan, 2009.
Abstract | Links | BibTeX | Tags:
@inproceedings{chu_semi-supervised_2009,
title = {A Semi-Supervised Learning Appproach to Online Audio Background Detection},
author = {Selina Chu and Shrikanth Narayanan and C. -C. Jay Kuo},
url = {http://ict.usc.edu/pubs/A%20Semi-Supervised%20Learning%20Appproach%20to%20Online%20Audio%20Background%20Detection.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of ICASSP 2009},
address = {Taipei, Taiwan},
abstract = {We present a framework for audio background modeling of complex and unstructured audio environments. The determination of back- ground audio is important for understanding and predicting the am- bient context surrounding an agent, both human and machine. Our method extends the online adaptive Gaussian Mixture model tech- nique to model variations in the background audio. We propose a method for learning the initial background model using a semi- supervised learning approach. This information is then integrated into the online background determination process, providing us with a more complete background model. We show that we can utilize both labeled and unlabeled data to improve audio classification per- formance. By incorporating prediction models in the determination process, we can improve the background detection performance even further. Experimental results on real data sets demonstrate the effec- tiveness of our proposed method.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Benotti, Luciana; Traum, David
A computational account of comparative implicatures for a spoken dialogue agent Proceedings Article
In: Proceedings of the 8th International Conference on Computational Semantics, Tilburg, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{benotti_computational_2009,
title = {A computational account of comparative implicatures for a spoken dialogue agent},
author = {Luciana Benotti and David Traum},
url = {http://ict.usc.edu/pubs/A%20computational%20account%20of%20comparative%20implicatures%20for%20a%20spoken%20dialogue%20agent.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the 8th International Conference on Computational Semantics},
address = {Tilburg, The Netherlands},
abstract = {Comparative constructions are common in dialogue, especially in negotiative dialogue where a choice must be made between different options, and options must be evaluated using multiple metrics. Com- paratives explicitly assert a relationship between two elements along a scale, but they may also implicate positions on the scale especially if constraints on the possible values are present. Dialogue systems must often understand more from a comparative than the explicit assertion in order to understand why the comparative was uttered. In this paper we examine the pragmatic meaning of comparative constructions from a computational perspective.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Alexander, Oleg; Rogers, Mike; Lambeth, William; Chiang, Matt; Debevec, Paul
Creating a Photoreal Digital Actor: The Digital Emily Project Technical Report
University of Southern California Institute for Creative Technologies London, UK, no. ICT TR 04 2009, 2009.
Abstract | Links | BibTeX | Tags:
@techreport{alexander_creating_2009-1,
title = {Creating a Photoreal Digital Actor: The Digital Emily Project},
author = {Oleg Alexander and Mike Rogers and William Lambeth and Matt Chiang and Paul Debevec},
url = {http://ict.usc.edu/pubs/ICT%20TR%2004%202009.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {IEEE European Conference on Visual Media Production (CVMP)},
number = {ICT TR 04 2009},
address = {London, UK},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {The Digital Emily Project is a collaboration between facial animation company Image Metrics and the Graphics Laboratory at the University of Southern California's Institute for Creative Technologies to achieve one of the world's first photorealistic digital facial performances. The project leverages latest-generation techniques in high-resolution face scanning, character rigging, video-based facial animation, and compositing. An actress was first filmed on a studio set speaking emotive lines of dialog in high definition. The lighting on the set was captured as a high dynamic range light probe image. The actress' face was then three-dimensionally scanned in thirty-three facial expressions showing different emotions and mouth and eye movements using a high-resolution facial scanning process accurate to the level of skin pores and fine wrinkles. Lighting-independent diffuse and specular reflectance maps were also acquired as part of the scanning process. Correspondences between the 3D expression scans were formed using a semi-automatic process, allowing a blendshape facial animation rig to be constructed whose expressions closely mirrored the shapes observed in the rich set of facial scans; animated eyes and teeth were also added to the model. Skin texture detail showing dynamic wrinkling was converted into multiresolution displacement maps also driven by the blend shapes. A semi-automatic video-based facial animation system was then used to animate the 3D face rig to match the performance seen in the original video, and this performance was tracked onto the facial motion in the studio video. The final face was illuminated by the captured studio illumination and shading using the acquired reflectance maps with a skin translucency shading algorithm. Using this process, the project was able to render a synthetic facial performance which was generally accepted as being a real face.},
keywords = {},
pubstate = {published},
tppubtype = {techreport}
}
Morency, Louis-Philippe; Kok, Iwan; Gratch, Jonathan
A Probabilistic Multimodal Approach for Predicting Listener Backchannels Journal Article
In: Journal of Autonomous Agents and Multi-Agent Systems, vol. 20, no. 1, pp. 70–84, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{morency_probabilistic_2009,
title = {A Probabilistic Multimodal Approach for Predicting Listener Backchannels},
author = {Louis-Philippe Morency and Iwan Kok and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/A%20Probabilistic%20Multimodal%20Approach%20for%20Predicting%20Listener%20Backchannels.pdf},
year = {2009},
date = {2009-01-01},
journal = {Journal of Autonomous Agents and Multi-Agent Systems},
volume = {20},
number = {1},
pages = {70–84},
abstract = {During face-to-face interactions, listeners use backchannel feedback such as head nods as a signal to the speaker that the communication is working and that they should continue speaking. Predicting these backchannel opportunities is an important milestone for building engaging and natural virtual humans. In this paper we show how sequential probabilistic models (e.g., Hidden Markov Model or Conditional Random Fields) can automatically learn from a database of human-to-human interactions to predict listener backchannels using the speaker multimodal output features (e.g., prosody, spoken words and eye gaze). The main challenges addressed in this paper are automatic selection of the relevant features and optimal feature representation for probabilistic models. For prediction of visual backchannel cues (i.e., head nods), our prediction model shows a statistically significant improvement over a previously published approach based on hand-crafted rules.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Morie, Jacquelyn; Antonisse, Jamie; Bouchard, Sean; Chance, Eric
Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans Proceedings Article
In: Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics, IOS Press, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{morie_virtual_2009,
title = {Virtual Worlds as a Healing Modality for Returning Soldiers and Veterans},
author = {Jacquelyn Morie and Jamie Antonisse and Sean Bouchard and Eric Chance},
url = {http://ict.usc.edu/pubs/Virtual%20Worlds%20as%20a%20Healing%20Modality%20for%20Returning%20Soldiers%20and%20Veterans.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Annual Review of CyberTherapy and Telemedicine; Studies in Health Technology and Informatics},
volume = {144},
publisher = {IOS Press},
abstract = {Those who have served in recent conflicts face many challenges as they reintegrate into society. In addition to recovering from physical wounds, traumatic brain injury and post-traumatic stress disorders, many soldiers also face basic psychological issues about who they are and how to find their place in a society that has not shared their experiences. To address these challenges, we have created a space that provides ongoing opportunities for healing activities, personal exploration and social camaraderie in an online virtual world, Second Life. In such worlds, where each avatar is controlled by a live individual, experiences can be unintuitive, uninviting, considered boring or difficult to control. To counter this, we are implementing autonomous intelligent agent avatars that can be "on duty" 24/7, serving as guides and information repositories, making the space and activities easy to find and even personalized to the visitor's needs. We report the results of usability testing with an in-world veterans' group. Tests comparing soldiers who use this space as part of their reintegration regimen compared to those who do not are being scheduled as part of the Army's Warriors in Transition program.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Evaluation of Novice and Expert Interpersonal Interaction Skills with a Virtual Patient Proceedings Article
In: Lecture Notes in Artificial Intelligence; Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA), pp. 511–512, Amsterdam, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{kenny_evaluation_2009,
title = {Evaluation of Novice and Expert Interpersonal Interaction Skills with a Virtual Patient},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Evaluation%20of%20Novice%20and%20Expert%20Interpersonal%20Interaction%20Skills%20with%20a%20Virtual%20Patient.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Lecture Notes in Artificial Intelligence; Proceedings of the 9th International Conference on Intelligent Virtual Agents (IVA)},
volume = {5773},
pages = {511–512},
address = {Amsterdam},
abstract = {Interactive Virtual Standardized Patients (VP) can provide meaningful training for clinicians. These VP’s portray interactive embodied conversational characters with realistic representations of a mental or physical problem to be diagnosed or discussed. This research is a continuation of evaluating of our VP "Justina" which suffers from Posttraumatic Stress Disorder (PTSD) from a sexual attack and presents the results of comparing novices, test subjects without medical training, and experts interacting with 'Justina' to find out if they could elicit the proper responses to make a diagnosis and to investigate the topics and questions the novices asked for coverage of the categories and criteria of PTSD as defined in the DSM-IV. It is assumed that novices will perform better than experts, however the main investigation is to gather empirical data and understand why this is true and how this can be used to improve the system. There have not been, to the authors' knowledge, any studies in evaluating experts and non-experts with virtual human characters in the psychological domain.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Lamond, Bruce; Peers, Pieter; Ghosh, Abhijeet; Debevec, Paul
Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination Proceedings Article
In: IEEE International Conference on Computational Photography, 2009.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{lamond_image-based_2009,
title = {Image-based Separation of Diffuse and Specular Reflections using Environmental Structured Illumination},
author = {Bruce Lamond and Pieter Peers and Abhijeet Ghosh and Paul Debevec},
url = {http://ict.usc.edu/pubs/Image-based%20Separation%20of%20Diffuse%20and%20Specular%20Reflections%20using%20Environmental%20Structured%20Illumination.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {IEEE International Conference on Computational Photography},
number = {ICT TR 01 2009},
institution = {University of Southern California Institute for Creative Technologies},
abstract = {We present an image-based method for separating diffuse and specular reflections using environmental structured illumination. Two types of structured illumination are discussed: phase-shifted sine wave patterns, and phase-shifted binary stripe patterns. In both cases the low-pass filtering nature of diffuse reflections is utilized to separate the reflection components. We illustrate our method on a wide range of example scenes and applications.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Audhkhasi, Kartik; Georgiou, Panayiotis G.; Narayanan, Shrikanth
Lattice-based Lexical Cues for Word Fragment Detection in Conversational Speech Proceedings Article
In: Proceedings of the IEEE Workshop on Automatic Speech Recognition and Understanding, 2009.
Abstract | Links | BibTeX | Tags:
@inproceedings{audhkhasi_lattice-based_2009,
title = {Lattice-based Lexical Cues for Word Fragment Detection in Conversational Speech},
author = {Kartik Audhkhasi and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Lattice-based%20Lexical%20Cues%20for%20Word%20Fragment%20Detection%20in%20Conversational%20Speech.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the IEEE Workshop on Automatic Speech Recognition and Understanding},
abstract = {Previous approaches to the problem of word fragment detection in speech have focussed primarily on acoustic-prosodic features [1], [2]. This paper proposes that the output of a continuous Automatic Speech Recognition (ASR) system can also be used to derive robust lexical features for the task. We hypothesize that the confusion in the word lattice generated by the ASR system can be exploited for detecting word fragments. Two sets of lexical features are proposed one which is based on the word confusion, and the other based on the pronunciation confusion between the word hypotheses in the lattice. Classiï¬cation experiments with a Support Vector Machine (SVM) classiï¬er show that these lexical features perform better than the previously proposed acoustic-prosodic features by around 5.20% (relative) on a corpus chosen from the DARPA Transtac Iraqi-English (San Diego) corpus [3]. A combination of both these feature sets improves the word fragment detection accuracy by 11.50% relative to using just the acoustic-prosodic features.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rushforth, Michael; Gandhe, Sudeep; Artstein, Ron; Roque, Antonio; Ali, Sarrah; Whitman, Nicolle; Traum, David
Varying Personality in Spoken Dialogue with a Virtual Human Proceedings Article
In: Proceedings of the Intelligent Virtual Humans Conference (IVA-09), 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{rushforth_varying_2009-1,
title = {Varying Personality in Spoken Dialogue with a Virtual Human},
author = {Michael Rushforth and Sudeep Gandhe and Ron Artstein and Antonio Roque and Sarrah Ali and Nicolle Whitman and David Traum},
url = {http://ict.usc.edu/pubs/Varying%20Personality%20in%20Spoken%20Dialogue%20with%20a%20Virtual%20Human.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the Intelligent Virtual Humans Conference (IVA-09)},
abstract = {We extend a virtual human architecture that has been used to build tactical questioning characters with a parameterizable personality model, allowing characters to be designed with di⬚erent personalities, allowing a richer set of possible user interactions in a training environment. Two experiments were carried out to evaluate the framework. In the ⬚rst, it was determined that personality models do have an impact on user perception of several aspects of the personality of the character. In the second, a model of assertiveness was evaluated and found to have a small but signi⬚cant impact on the users who interacted with the full virtual human, and larger di⬚erences in judgement of annotators who examined only the verbal transcripts of the interaction.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Difede, JoAnn; Rothbaum, Barbara O.; Johnston, Scott; McLay, Robert N.; Reger, Greg; Gahm, Greg; Parsons, Thomas D.; Graap, Ken; Pair, Jarrell
VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants Journal Article
In: Medicine Meets Virtual Reality, vol. 17, 2009.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@article{rizzo_vr_2009,
title = {VR PTSD Exposure Therapy Results with Active Duty OIF/OEF Combatants},
author = {Albert Rizzo and JoAnn Difede and Barbara O. Rothbaum and Scott Johnston and Robert N. McLay and Greg Reger and Greg Gahm and Thomas D. Parsons and Ken Graap and Jarrell Pair},
url = {http://ict.usc.edu/pubs/VR%20PTSD%20Exposure%20Therapy%20Results%20with%20Active%20Duty%20OIF%20OEF%20Combatants.pdf},
year = {2009},
date = {2009-01-01},
journal = {Medicine Meets Virtual Reality},
volume = {17},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Reports indicate that at least 1 out of 6 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality exposure therapy has been previously used for PTSD with reports of positive outcomes. This paper will present a brief description of the USC/ICT Virtual Iraq/Afghanistan PTSD therapy application and present clinical outcome data from active duty patients treated at the Naval Medical Center-San Diego (NMCSD) as of October 2009. Initial outcomes from the first twenty patients to complete treatment indicate that 16 no longer meet diagnostic criteria for PTSD at post treatment. Research and clinical tests using the Virtual Iraq/Afghanistan software are also currently underway at Weill Cornell Medical College, Emory University, Fort Lewis and WRAMC along with 20 other test sites.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Jan, Dusan; Roque, Antonio; Leuski, Anton; Morie, Jacquelyn; Traum, David
A Virtual Tour Guide for Virtual Worlds Proceedings Article
In: Intelligent Virtual Agents Conference (IVA), Amsterdam, The Netherlands, 2009.
Abstract | Links | BibTeX | Tags: Virtual Humans, Virtual Worlds
@inproceedings{jan_virtual_2009,
title = {A Virtual Tour Guide for Virtual Worlds},
author = {Dusan Jan and Antonio Roque and Anton Leuski and Jacquelyn Morie and David Traum},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Tour%20Guide%20for%20Virtual%20Worlds.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Intelligent Virtual Agents Conference (IVA)},
address = {Amsterdam, The Netherlands},
abstract = {In this paper we present an implementation of a embodied conversational agent that serves as a virtual tour guide in Second Life. We show how we combined the abilities of a conversational agent with navigation in the world and present some preliminary evaluation results.},
keywords = {Virtual Humans, Virtual Worlds},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn
Re-Entry: Online virtual worlds as a healing space for veterans Proceedings Article
In: Proceedings of SPIE, 2009.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_re-entry_2009,
title = {Re-Entry: Online virtual worlds as a healing space for veterans},
author = {Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/Re-Entry-%20Online%20virtual%20worlds%20as%20a%20healing%20space%20for%20veterans.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of SPIE},
volume = {7238},
abstract = {We describe a project designed to use the power of online virtual worlds as a place of camaraderie and healing for returning United States military veterans–a virtual space that can help them deal with problems related to their time of service and also assist in their reintegration into society. This veterans' space is being built in Second Life®, a popular immersive world, under consultation with medical experts and psychologists, with several types of both social and healing activities planned. In addition, we address several barrier issues with virtual worlds, including lack of guides or helpers to ensure the participants have a quality experience. To solve some of these issues, we are porting the advanced intelligence of the ICT’s virtual human characters to avatars in Second Life®, so they will be able to greet the veterans, converse with them, guide them to relevant activities, and serve as informational agents for healing options. In this way such "avatar agents" will serve as autonomous intelligent characters that bring maximum engagement and functionality to the veterans' space. This part of the effort expands online worlds beyond their existing capabilities, as currently a human being must operate each avatar in the virtual world; few autonomous characters exist. As this project progresses we will engage in an iterative design process with veteran participants who will be able to advise us, along with the medical community, on what efforts are well suited to, and most effective within, the virtual world.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Story-Based Learning Environments Book Section
In: The PSI Handbook of Virtual Environments for Training and Education: Developments for the Military and Beyond, Volume 2: Components and Training Technologies, vol. 2, Praeger Security International, Westport, CT, 2009.
Links | BibTeX | Tags: The Narrative Group
@incollection{gordon_story-based_2009,
title = {Story-Based Learning Environments},
author = {Andrew S. Gordon},
url = {http://www.ict.usc.edu/pubs/Story%20based%20Learning%20Environments.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {The PSI Handbook of Virtual Environments for Training and Education: Developments for the Military and Beyond, Volume 2: Components and Training Technologies},
volume = {2},
publisher = {Praeger Security International},
address = {Westport, CT},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {incollection}
}
2008
McAlinden, Ryan; Gordon, Andrew S.; Lane, H. Chad; Hart, John; Durlach, Paula
UrbanSim: A game-based instructional package for conducting counterinsurgency operations Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Learning Sciences, The Narrative Group
@inproceedings{mcalinden_urbansim_2008,
title = {UrbanSim: A game-based instructional package for conducting counterinsurgency operations},
author = {Ryan McAlinden and Andrew S. Gordon and H. Chad Lane and John Hart and Paula Durlach},
url = {http://ict.usc.edu/pubs/UrbanSim-%20A%20game-based%20instructional%20package%20for%20conducting%20counterinsurgency%20operations.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Operation Iraqi Freedom and Operation Enduring Freedom have identified the need for instructional and training solutions that develop the skills of Battalion and Brigade Commanders in formulating situational understanding in order to successfully lead operations in a counterinsurgency environment. In this paper we describe the UrbanSim Learning Package, a game-based instructional software suite for Commanders and their staffs for directing and coordinating full-spectrum operations where the stability component is predominant. We describe a formal instructional design approach to the development of this instructional software, which consists of a component that introduces key concepts in counterinsurgency operations and a component that allows students to develop their skills in a simulated counterinsurgency environment. We describe how intelligent automated tutoring is used to provide formative feedback to students in the practice environment, and discuss our approach to student performance assessment.},
keywords = {Learning Sciences, The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Poesio, Massimo; Artstein, Ron
Introduction to the Special Issue on Ambiguity and Semantic Judgments Journal Article
In: Research on Language and Computation, vol. 6, no. 3-4, pp. 241–245, 2008.
@article{poesio_introduction_2008,
title = {Introduction to the Special Issue on Ambiguity and Semantic Judgments},
author = {Massimo Poesio and Ron Artstein},
url = {http://ict.usc.edu/pubs/Introduction%20to%20the%20Special%20Issue%20on%20Ambiguity%20and%20Semantic%20Judgments.pdf},
year = {2008},
date = {2008-12-01},
journal = {Research on Language and Computation},
volume = {6},
number = {3-4},
pages = {241–245},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Sridhar, Vivek Kumar Rangarajan; Bangalore, Srinivas
Incorporating Discourse Context in Spoken Language Translation Through Dialog Acts Proceedings Article
In: Proceedings of IEEE Spoken Language Technology Workshop, Goa, India, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{sridhar_incorporating_2008,
title = {Incorporating Discourse Context in Spoken Language Translation Through Dialog Acts},
author = {Vivek Kumar Rangarajan Sridhar and Srinivas Bangalore},
url = {http://ict.usc.edu/pubs/incorporating%20discourse%20context%20in%20spoken%20language%20translation.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of IEEE Spoken Language Technology Workshop},
address = {Goa, India},
abstract = {Current statistical speech translation approaches predominantly rely on just text transcripts and are limited in their use of rich contex- tual information such as prosody and discourse function. In this pa- per, we explore the role of discourse context characterized through dialog acts (DAs) in statistical translation. We present a bag-of- words (BOW) model that exploits DA tags in translation and contrast it with a phrase table interpolation approach presented in previous work. In addition to producing interpretable DA-annotated target language translations through our framework, we also obtain consis- tent improvements in terms of automatic evaluation metrics such as lexical selection accuracy and BLEU score using both the models. We also analyze the performance improvements per DA tag. Our experiments indicate that questions, acknowledgments, agreements and appreciations contribute to more improvement in comparison to statements.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Miyao, Yusuke; Sagae, Kenji; Sætre, Rune; Matsuzaki, Takuya; Tsujii, Jun'ichi
Evaluating contributions of natural language parsers to protein-protein interaction extraction Journal Article
In: Bioinformatics, vol. 25, no. 3, pp. 394–400, 2008.
Abstract | Links | BibTeX | Tags:
@article{miyao_evaluating_2008,
title = {Evaluating contributions of natural language parsers to protein-protein interaction extraction},
author = {Yusuke Miyao and Kenji Sagae and Rune Sætre and Takuya Matsuzaki and Jun'ichi Tsujii},
url = {http://ict.usc.edu/pubs/Evaluating%20contributions%20of%20natural%20language%20parsers%20to%20protein–protein%20interaction%20extraction.pdf},
year = {2008},
date = {2008-12-01},
journal = {Bioinformatics},
volume = {25},
number = {3},
pages = {394–400},
abstract = {Motivation: While text mining technologies for biomedical research have gained popularity as a way to take advantage of the explosive growth of information in text form in biomedical papers, selecting appropriate natural language processing (NLP) tools is still difficult for researchers who are not familiar with recent advances in NLP. This article provides a comparative evaluation of several state-of-the-art natural language parsers, focusing on the task of extracting protein– protein interaction (PPI) from biomedical papers. We measure how each parser, and its output representation, contributes to accuracy improvement when the parser is used as a component in a PPI system. Results: All the parsers attained improvements in accuracy of PPI extraction. The levels of accuracy obtained with these different parsers vary slightly, while differences in parsing speed are larger. The best accuracy in this work was obtained when we combined Miyao and Tsujii's Enju parser and Charniak and Johnson's reranking parser, and the accuracy is better than the state-of-the-art results on the same data. Availability: The PPI extraction system used in this work (AkanePPI) is available online at http://www-tsujii.is.s.u-tokyo.ac.jp/ downloads/downloads.cgi. The evaluated parsers are also available online from each developer's site.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Leuski, Anton; Traum, David
A Statistical Approach for Text Processing in Virtual Humans Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{leuski_statistical_2008,
title = {A Statistical Approach for Text Processing in Virtual Humans},
author = {Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/A%20STATISTICAL%20APPROACH%20FOR%20TEXT%20PROCESSING%20IN%20VIRTUAL%20HUMANS.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We describe a text classi⬚cation approach based on statistical language modeling. We show how this approach can be used for several natural language processing tasks in a virtual human system. Speci⬚cally, we show it can applied to language understanding, language generation, and character response selection tasks. We illustrate these applications with some experimental results.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Metallinou, Angeliki; Lee, Sungbok; Narayanan, Shrikanth
Audio-Visual Emotion Recognition using Gaussian Mixture Models for Face and Voice Proceedings Article
In: Proceedings of the IEEE International Symposium on Multimedia, pp. 250–257, Berkeley, CA, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{metallinou_audio-visual_2008,
title = {Audio-Visual Emotion Recognition using Gaussian Mixture Models for Face and Voice},
author = {Angeliki Metallinou and Sungbok Lee and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Audio-Visual%20Emotion%20Recognition%20using%20Gaussian%20Mixture%20Models%20for%20Face%20and%20Voice.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the IEEE International Symposium on Multimedia},
pages = {250–257},
address = {Berkeley, CA},
abstract = {Emotion expression associated with human communica- tion is known to be a multimodal process. In this work, we investigate the way that emotional information is conveyed by facial and vocal modalities, and how these modalities can be effectively combined to achieve improved emotion recognition accuracy. In particular, the behaviors of differ- ent facial regions are studied in detail. We analyze an emo- tion database recorded from ten speakers (five female, five male), which contains speech and facial marker data. Each individual modality is modeled by Gaussian Mixture Mod- els (GMMs). Multiple modalities are combined using two different methods: a Bayesian classifier weighting scheme and support vector machines that use post classification ac- curacies as features. Individual modality recognition per- formances indicate that anger and sadness have compara- ble accuracies for facial and vocal modalities, while happi- ness seems to be more accurately transmitted by facial ex- pressions than voice. The neutral state has the lowest per- formance, possibly due to the vague definition of neutral- ity. Cheek regions achieve better emotion recognition ac- curacy compared to other facial regions. Moreover, classi- fier combination leads to significantly higher performance, which confirms that training detailed single modality clas- sifiers and combining them at a later stage is an effective approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe
Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{morency_real-time_2008,
title = {Real-time Head Pose Estimation Using a Webcam: Monocular Adaptive View-based Appearance Model},
author = {Louis-Philippe Morency},
url = {http://ict.usc.edu/pubs/REAL-TIME%20HEAD%20POSE%20ESTIMATION%20USING%20A%20WEBCAM-%20MONOCULAR%20ADAPTIVE%20VIEW-BASED%20APPEARANCE%20MODEL.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Accurately estimating the person's head position and orientation is an important task for a wide range of applications such as driver awareness and human-robot interaction. Over the past two decades, many approaches have been suggested to solve this problem, each with its own advantages and disadvantages. In this paper, we present a probabilistic framework called Monocular Adaptive View-based Appearance Model (MAVAM) which integrates the advantages from two of these approaches: (1) the relative precision and user-independence of differential registration, and (2) the robustness and bounded drift of keyframe tracking. In our experiments, we show how the MAVAM model can be used to estimate head position and orientation in real-time using a simple monocular camera. Our experiments on two previously published datasets show that the MAVAM framework can accurately track for a long period of time (textbackslashtextbackslashtextbackslashtextbackslashtextgreater2 minutes) with an average accuracy of 3.9 degrees and 1.2in with an inertial sensor and a 3D magnetic sensor.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Poesio, Massimo
Inter-Coder Agreement for Computational Linguistics Journal Article
In: Computational Linguistics, vol. 34, no. 4, pp. 555–596, 2008.
Abstract | Links | BibTeX | Tags:
@article{artstein_inter-coder_2008,
title = {Inter-Coder Agreement for Computational Linguistics},
author = {Ron Artstein and Massimo Poesio},
url = {http://ict.usc.edu/pubs/Inter-Coder%20Agreement%20for%20Computational%20Linguistics.pdf},
year = {2008},
date = {2008-12-01},
journal = {Computational Linguistics},
volume = {34},
number = {4},
pages = {555–596},
abstract = {This article is a survey of methods for measuring agreement among corpus annotators. It exposes the mathematics and underlying assumptions of agreement coefficients, covering Krippendorff's alpha as well as Scott's pi and Cohen's kappa; discusses the use of coefficients in several annotation tasks; and argues that weighted, alpha-like coefficients, traditionally less used than kappa-like measures in Computational Linguistics, may be more appropriate for many corpus annotation tasks – but that their use makes the interpretation of the value of the coefficient even harder.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Artstein, Ron; Cannon, Jacob; Gandhe, Sudeep; Gerten, Jillian; Henderer, Joe; Leuski, Anton; Traum, David
Coherence of Off-Topic Response for a Virtual Character Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{artstein_coherence_2008,
title = {Coherence of Off-Topic Response for a Virtual Character},
author = {Ron Artstein and Jacob Cannon and Sudeep Gandhe and Jillian Gerten and Joe Henderer and Anton Leuski and David Traum},
url = {http://ict.usc.edu/pubs/COHERENCE%20OF%20OFF-TOPIC%20RESPONSES%20FOR%20A%20VIRTUAL%20CHARACTER.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {We demonstrate three classes of off-topic responses which allow a virtual question-answering character to handle cases where it does not understand the user's input: ask for clarification, indicate misunderstanding, and move on with the conversation. While falling short of full dialogue management, a combination of such responses together with prompts to change the topic can improve overall dialogue coherence.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Leuski, Anton; Roque, Antonio; Gandhe, Sudeep; DeVault, David; Gerten, Jillian; Robinson, Susan; Martinovski, Bilyana
Natural Language Dialogue Architectures for Tactical Questioning Characters Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{traum_natural_2008,
title = {Natural Language Dialogue Architectures for Tactical Questioning Characters},
author = {David Traum and Anton Leuski and Antonio Roque and Sudeep Gandhe and David DeVault and Jillian Gerten and Susan Robinson and Bilyana Martinovski},
url = {http://ict.usc.edu/pubs/Natural%20Language%20Dialogue%20Architectures.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {In this paper we contrast three architectures for natural language questioning characters. We contrast the relative costs and benefits of each approach in building characters for tactical questioning. The first architecture works purely at the textual level, using cross-language information retrieval techniques to learn the best output for any input from a training set of linked questions and answers. The second architecture adds a global emotional model and computes a compliance model, which can result in different outputs for different levels, given the same inputs. The third architecture works at a semantic level and allows authoring of different policies for response for different kinds of information. We describe these architectures and their strengths and weaknesses with respect to expressive capacity, performance, and authoring demands.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Rizzo, Albert; Parsons, Thomas D.; Pair, Jarrell; McLay, Robert N.; Johnston, Scott; Perlman, Karen; Deal, Robert; Reger, Greg; Gahm, Greg; Roy, Michael; Shilling, Russell; Rothbaum, Barbara O.; Graap, Ken; Spitalnick, Josh; Bordnick, Patrick; Difede, JoAnn
Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{rizzo_clinical_2008,
title = {Clinical Results from the Virtual Iraq Exposure Therapy Application for PTSD},
author = {Albert Rizzo and Thomas D. Parsons and Jarrell Pair and Robert N. McLay and Scott Johnston and Karen Perlman and Robert Deal and Greg Reger and Greg Gahm and Michael Roy and Russell Shilling and Barbara O. Rothbaum and Ken Graap and Josh Spitalnick and Patrick Bordnick and JoAnn Difede},
url = {http://ict.usc.edu/pubs/Clinical%20Results%20from%20the%20Virtual%20Iraq%20Esposure%20Therapy%20Application%20for%20PTSD.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {Post Traumatic Stress Disorder (PTSD) is reported to be caused by traumatic events that are outside the range of usual human experience including (but not limited to) military combat, violent personal assault, being kidnapped or taken hostage and terrorist attacks. Initial data suggests that at least 1 out of 5 Iraq War veterans are exhibiting symptoms of depression, anxiety and PTSD. Virtual Reality (VR) delivered exposure therapy for PTSD has been previously used with reports of positive outcomes. The current paper will present the rationale and description of a VR PTSD therapy application (Virtual Iraq) and present initial findings from its use with active duty service members. Virtual Iraq consists of a series of customizable virtual scenarios designed to represent relevant Middle Eastern VR contexts for exposure therapy, including a city and desert road convoy environment. User-centered design feedback needed to iteratively evolve the system was gathered from returning Iraq War veterans in the USA and from a system deployed in Iraq and tested by an Army Combat Stress Control Team. Results from an open clinical trial using Virtual Iraq at the Naval Medical Center-San Diego with 20 treatment completers indicate that 16 no longer met PTSD diagnostic criteria at post-treatment, with only one not maintaining treatment gains at 3 month follow-up.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Abhijeet; Hawkins, Tim; Peers, Pieter; Frederiksen, Sune; Debevec, Paul
Practical Modeling and Acquisition of Layered Facial Reflectance Journal Article
In: ACM Transaction on Graphics, vol. 27, no. 5, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@article{ghosh_practical_2008,
title = {Practical Modeling and Acquisition of Layered Facial Reflectance},
author = {Abhijeet Ghosh and Tim Hawkins and Pieter Peers and Sune Frederiksen and Paul Debevec},
url = {http://ict.usc.edu/pubs/Practical%20Modeling%20and%20Acquisition%20of%20Layered%20Facial%20Reflectance.pdf},
year = {2008},
date = {2008-12-01},
journal = {ACM Transaction on Graphics},
volume = {27},
number = {5},
abstract = {We present a practical method for modeling layered facial reflectance consisting of specular reflectance, single scattering, and shallow and deep subsurface scattering. We estimate parameters of appropriate reflectance models for each of these layers from just 20 photographs recorded in a few seconds from a single viewpoint. We extract spatially-varying specular reflectance and single-scattering parameters from polarization-difference images under spherical and point source illumination. Next, we employ direct-indirect separation to decompose the remaining multiple scattering observed under cross-polarization into shallow and deep scattering components to model the light transport through multiple layers of skin. Finally, we match appropriate diffusion models to the extracted shallow and deep scattering components for different regions on the face. We validate our technique by comparing renderings of subjects to reference photographs recorded from novel viewpoints and under novel illumination conditions.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {article}
}
McAlinden, Ryan; Bosack, Matthew; Macha, Adrian; Vargas, Esau; Walker, Tim; Mann, John; Cruz, Julio
Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments Proceedings Article
In: Proceedings of the 26th Army Science Conference, Orlando, FL, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{mcalinden_towards_2008,
title = {Towards an Automated Pipeline for the Translation and Optimization of Geospatial Data for Virtual Environments},
author = {Ryan McAlinden and Matthew Bosack and Adrian Macha and Esau Vargas and Tim Walker and John Mann and Julio Cruz},
url = {http://ict.usc.edu/pubs/Towards%20an%20Automated%20Pipeline%20for%20the%20Translation%20and%20Optimization%20of%20Geospatial%20Data%20for%20Virtual%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the 26th Army Science Conference},
address = {Orlando, FL},
abstract = {The infusion of commercial game technology into U.S. Army training, simulation, and instructional domains has resulted in more immersive and engaging experiences for Soldiers to hone their skills. However, the influx of such technology comes at a significant cost, specifically in the creation of virtual environments in which these skills are simulated and practiced. Today's typical commercial triple-A game title cost upwards of $40-$60M and four to six years to develop, much of which is spent on producing the digital assets used to populate the scene (models, animations, etc). Additionally, this content is often suited for a custom type of rendering technology, and often cannot be reused without significant manual modification. Unfortunately, the Army has neither the financial or personnel resources available to create such highly immersive, reusable virtual content, nor the time to invest when current operations call for training or simulation data in a matter of hours, not months or years. In this paper, we discuss a research initiative aimed at significantly reducing the time and cost for converting, optimizing, and enhancing existing geospatial data for today's virtual environments. The goal is a completely automated process for ingesting existing military terrain data and outputting a technology-agnostic representation in less than 24 hours.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Treskunov, Anton; Sherstyuk, Andrei; Wang, Kin Lik; Pair, Jarrell
Real Binoculars with Virtual Functions for Mixed Environments Proceedings Article
In: International Conference on Advances in Computer Entertainment Technology 2008, Yokohama, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{treskunov_real_2008,
title = {Real Binoculars with Virtual Functions for Mixed Environments},
author = {Anton Treskunov and Andrei Sherstyuk and Kin Lik Wang and Jarrell Pair},
url = {http://ict.usc.edu/pubs/Real%20Binoculars%20with%20Virtual%20Functions%20for%20Mixed%20Environments.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {International Conference on Advances in Computer Entertainment Technology 2008},
address = {Yokohama, Japan},
abstract = {Though often desirable, the integration of real and virtual elements in mixed reality environments can be di⬚cult. We propose a number of techniques to facilitate scene exploration and object selection by giving users real instruments as props while implementing their functionality in a virtual part of the environment. Speci cally, we present a family of tools built upon the idea of using real binoculars for viewing virtual content. This approach matches user expectations with the tool's capabilities enhancing the sense of presence and increasing the depth of interaction between the real and virtual components of the scene. We also discuss possible applications of these tools and the results of our user study. This paper is an extended version of earlier work presented at the 4th International Workshop on the Tangible Space Initiative[5].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mower, Emily; Mataric, Maja J.; Narayanan, Shrikanth
Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays Proceedings Article
In: Proceedings of the IEEE International Symposium on Multimedia, Berkeley, CA, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{mower_selection_2008,
title = {Selection of Emotionally Salient Audio-Visual Features for Modeling Human Evaluations of Synthetic Character Emotion Displays},
author = {Emily Mower and Maja J. Mataric and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Selection%20of%20Emotionally%20Salient%20Audio-Visual%20Features%20for%20Modeling%20Human%20Evaluations%20of%20Synthetic%20Character%20Emotion%20Displays.pdf},
year = {2008},
date = {2008-12-01},
booktitle = {Proceedings of the IEEE International Symposium on Multimedia},
address = {Berkeley, CA},
abstract = {Computer simulated avatars and humanoid robots have an increasingly prominent place in today's world. Accep- tance of these synthetic characters depends on their ability to properly and recognizably convey basic emotion states to a user population. This study presents an analysis of audio- visual features that can be used to predict user evaluations of synthetic character emotion displays. These features in- clude prosodic, spectral, and semantic properties of audio signals in addition to FACS-inspired video features [11]. The goal of this paper is to identify the audio-visual fea- tures that explain the variance in the emotional evaluations of na ̈ıve listeners through the utilization of information gain feature selection in conjunction with support vector ma- chines. These results suggest that there exists an emotion- ally salient subset of the audio-visual feature space. The features that contribute most to the explanation of evalua- tor variance are the prior knowledge audio statistics (e.g., average valence rating), the high energy band spectral com- ponents, and the quartile pitch range. This feature subset should be correctly modeled and implemented in the design of synthetic expressive displays to convey the desired emo- tions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Swanson, Reid; Gordon, Andrew S.
Say Anything: A Massively collaborative Open Domain Story Writing Companion Proceedings Article
In: First International Conference on Interactive Digital Storytelling, Erfurt, Germany, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{swanson_say_2008,
title = {Say Anything: A Massively collaborative Open Domain Story Writing Companion},
author = {Reid Swanson and Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Say%20Anything-%20A%20Massively%20collaborative%20Open%20Domain%20Story%20Writing%20Companion.pdf},
year = {2008},
date = {2008-11-01},
booktitle = {First International Conference on Interactive Digital Storytelling},
address = {Erfurt, Germany},
abstract = {Interactive storytelling is an interesting cross-disciplinary area that has importance in research as well as entertainment. In this paper we explore a new area of interactive storytelling that blurs the line between traditional interactive fiction and collaborative writing. We present a system where the user and computer take turns in writing sentences of a fictional narrative. Sentences contributed by the computer are selected from a collection of millions of stories extracted from Internet weblogs. By leveraging the large amounts of personal narrative content available on the web, we show that even with a simple approach our system can produce compelling stories with our users.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Kyu J.; Georgiou, Panayiotis G.; Narayanan, Shrikanth
The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings Proceedings Article
In: Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP), Cairns, Australia, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{han_sail_2008,
title = {The SAIL Speaker Diarization System for Analysis of Spontaneous Meetings},
author = {Kyu J. Han and Panayiotis G. Georgiou and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/The%20SAIL%20Speaker%20Diarization%20System%20for%20Analysis%20of%20Spontaneous%20Meetings.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of IEEE International Workshop on Multimedia Signal Processing (MMSP)},
address = {Cairns, Australia},
abstract = {In this paper, we propose a novel approach to speaker diarization of spontaneous meetings in our own mul- timodal SmartRoom environment. The proposed speaker di- arization system first applies a sequential clustering concept to segmentation of a given audio data source, and then performs agglomerative hierarchical clustering for speaker-specific classi- fication (or speaker clustering) of speech segments. The speaker clustering algorithm utilizes an incremental Gaussian mixture cluster modeling strategy, and a stopping point estimation method based on information change rate. Through experiments on various meeting conversation data of approximately 200 minutes total length, this system is demonstrated to provide diarization error rate of 18.90% on average.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pataki, Caroly; Sugar, Jeff; Kenny, Patrick G.; Parsons, Thomas D.; Rizzo, Albert; Pato, Michele; George, Cheryl St.
A Virtual Adolescent Patient with PTSD for Training Psychiatrists Proceedings Article
In: Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry, Chicago, IL, 2008.
@inproceedings{pataki_virtual_2008,
title = {A Virtual Adolescent Patient with PTSD for Training Psychiatrists},
author = {Caroly Pataki and Jeff Sugar and Patrick G. Kenny and Thomas D. Parsons and Albert Rizzo and Michele Pato and Cheryl St. George},
url = {http://ict.usc.edu/pubs/A%20Virtual%20Adolescent%20Patient%20with%20PTSD%20for%20Training%20Psychiatrists.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 55th Annual Meeting of the American Academy of Child Adolescent Psychiatry},
address = {Chicago, IL},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn; Pearce, Celia
Uses of Digital Enchantment: Computer Games as the New Fairy Tales Proceedings Article
In: Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG), Vienna, Austria, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_uses_2008,
title = {Uses of Digital Enchantment: Computer Games as the New Fairy Tales},
author = {Jacquelyn Morie and Celia Pearce},
url = {http://ict.usc.edu/pubs/The_uses_of_digital_enchantment.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the Vienna Games Conference 2008: The Future of Reality and Gaming (FROG)},
address = {Vienna, Austria},
abstract = {In this paper we argue that digital games have come to fill the cultural niche traditionally occupied by fairytales, and that they are ideally suited to realize some of the unique characteristics of this genre of folklore and literature. Arguably one of the most influential authors on game narrative and genre, J.R.R. Tolkien wrote extensively about fairytales, authored fairytales and considered his great epic work of high fantasy, "The Trilogy of the Ring," to be a fairy tale of sorts. He argued that fairytales were not about fairies per se but took place in the "realm of faerie," the magical world that fairies inhabit. "The realm of fairy-story is wide and deep and high and filled with many things: all manner of beasts and birds are found there; shoreless seas and stars uncounted; beauty that is an enchantment, and ever-present peril; both joy and sorrow as sharp as swords." [1] The "realm of faerie" provides a context for archetypal characters and narratives that express the inner life of the child and the process of transitioning to adulthood, a universal theme with has equal resonance with adults. In The Uses of Enchantment, controversial psychologist Bruno Betttelheim argues that "The motifs of fairy tales are experienced as wondrous because the child feels understood and appreciated deep down in his feelings, hopes, and anxieties, without these all having to be dragged up and investigated in the harsh light of a rationality that is still beyond him." [2] "...the internal processes are externalized and become comprehensible as represented by the figures of the story and its events." [3] These externalized processes can be seen in a wide range of digital games that put the player in the role of fairytale heroine, or more often, hero. Single-player adventure-style games such as the Zelda and Final Fantasy series, Ico, Shadow of the Collosus, Beyond Good and Evil, Okami and the Longest Journey series bring the unique affordances of the computer as a purveyor of magic to bear on this classic literary genre. Science fiction author Arthur C. Clark famously asserted that "Any sufficiently advanced technology is indistinguishable from magic." [4] Frederick Brooks, in The Mythical Man-Month [5], brings another level of refinement to this by describing the alchemic conjuring qualities of the computer thusly: "One types the correct incantation on a keyboard and a display screen comes to life, showing things that never were nor could be." Indeed even the nomenclature of MUDs, in which programmers are referred to as "wizards," seems to confer this quality of magical enchantment to the very creators of games themselves. Given its propensity for magic, the computer is particularly well-suited as a means of expression for the fairytale genre, shifting the focus from empathy with a central character engaged in an epic journey, to endowing a player with the agency to fulfill his or her destiny. We see the trajectory of the "realm of faerie" in the tradition from Tolkien's literary masterworks to the contemporary MMOG. Tolkien's world formed the inspiration for the tabletop role-playing games of the seventies, particularly Dungeons and Dragons, which gave rise to the MUDs of the 1980s and finally the fully realized multiplayer 3D computer fantasy worlds of the 1990s to the present, and the recent release of Lord of the Rings Online. This instrumentalizaton of fantasy environments through mathematical constructs provided a vital transition for the fairytale genre from the world of words to the world of numbers, and hence the world of computers. Today, the fantasy worlds of Tolkien, as well as the new fairy tales of game developers, have been rendered in their full glory via the "correct incantation on a keyboard." While it remains to be seen how or if these new digital fairytales will stand the tests of time as their literary counterparts have done, we argue that they fulfill a similar and vital role in providing today's children a sense of ritual and power in their own hero's journey from child to adulthood. References [1] Tolkien, J.R.R. (1966). The Tolkien Reader. New York: Ballantine. [2] Bettelheim, Bruno. (1975). The Uses of Enchantment: The Meaning and Importance of Fairy Tales. New York: Alfred K. Knopf. [3] Ibid [4] Clark, Arthur C. (1962). Profiles of the Future; an Inquiry into the Limits of the Possible. New York: Harper & Row. [5] Brooks, Frederick P. (1975). The mythical man month: Essays on software engineering. Reading, MA: Addison-Wesley.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Morie, Jacquelyn
The Performance of the Self and Its Effect on Presence in Virtual Worlds Proceedings Article
In: Proceedings of the 11th Annual International Workshop on Presence, pp. 265–269, Padova, Italy, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morie_performance_2008,
title = {The Performance of the Self and Its Effect on Presence in Virtual Worlds},
author = {Jacquelyn Morie},
url = {http://ict.usc.edu/pubs/The%20Performance%20of%20the%20Self%20and%20Its%20Effect%20on%20Presence%20in%20Virtual%20Worlds.pdf},
year = {2008},
date = {2008-10-01},
booktitle = {Proceedings of the 11th Annual International Workshop on Presence},
pages = {265–269},
address = {Padova, Italy},
abstract = {This paper addresses the many types of roles that people adopt within digital arenas such as online virtual worlds, and how those authored selves can enhance the sense of Self presence. Erving Goffman maintains that we play many roles in our everyday lives and that our identity is constantly being redefined by both aspects of a situation and the other people with whom we interact. With the explosion of online virtual worlds, the possibilities for such performances of self have multiplied. We now have more opportunities to explore aspects of our personalities including those that we might be reluctant to expose in real life situations. This is a new development for virtual reality: participants can create their appearance in online virtual worlds and become extremely connected to it. The potential for these personas to affect and enhance the sense of Presence should be addressed, and both quantitative and qualitative methods developed to measure their effects.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gandhe, Sudeep; DeVault, David; Roque, Antonio; Martinovski, Bilyana; Artstein, Ron; Leuski, Anton; Gerten, Jillian; Traum, David
From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters Proceedings Article
In: Proceedings of InterSpeech, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{gandhe_domain_2008,
title = {From Domain Specification to Virtual Humans: An integrated approach to authoring tactical questioning characters},
author = {Sudeep Gandhe and David DeVault and Antonio Roque and Bilyana Martinovski and Ron Artstein and Anton Leuski and Jillian Gerten and David Traum},
url = {http://ict.usc.edu/pubs/From%20Domain%20Specification%20to%20Virtual%20Humans.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of InterSpeech},
abstract = {We present a new approach for rapidly developing dialogue capabilities for virtual humans. Starting from domain specification, an integrated authoring interface automatically generates dialogue acts with all possible contents.These dialogue acts are linked to example utterances in order to provide training data for natural language understanding and generation. The virtual human dialogue system contains a dialogue manager following the information-state approach, using finite-state machines and SCXML to manage local coherence, as well as explicit modeling of emotions and compliance level and a grounding component based on evidence of understanding. Using the authoring tools, we design and implement a version of the virtual human Hassan and compare to previous architectures for the character.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.
Story Management Technologies for Organizational Learning Proceedings Article
In: International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning, Graz, Austria, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_story_2008,
title = {Story Management Technologies for Organizational Learning},
author = {Andrew S. Gordon},
url = {http://ict.usc.edu/pubs/Story%20Management%20Technologies%20for%20Organizational%20Learning.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on Knowledge Management, Special Track on Intelligent Assistance for Self-Directed and Organizational Learning},
address = {Graz, Austria},
abstract = {The stories told among members of an organization are an effective instrument for knowledge socialization, the sharing of experiences through social mechanisms. However, the utility of stories for organizational learning is limited due to the difficulties in acquiring stories that are relevant to the practices of an organization, identifying the learning goals that these stories serve, and delivering these stories to the right people and the right time in a manner that best facilitates learning. In this paper we outline a vision for story-based organizational learning in the future, and describe three areas where intelligent technologies can be applied to automate story management practices in support of organizational learning. First, we describe automated story capture technologies that identify narratives of people's experiences within the context of a larger discourse. Second, we describe automated retrieval technologies that identify stories that are relevant to specific educational needs. Third, we describe how stories can be transformed into effective story-based learning environments with minimal development costs.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso M.; Gratch, Jonathan
Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels Journal Article
In: Lecture Notes in Computer Science, vol. 5208, pp. 484–485, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@article{de_melo_evolving_2008,
title = {Evolving Expression of Emotions in Virtual Humans Using Lights and Pixels},
author = {Celso M. Melo and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Evolving%20Expression%20of%20Emotions%20in%20Virtual%20Humans%20Using%20Lights%20and%20Pixels.pdf},
year = {2008},
date = {2008-09-01},
journal = {Lecture Notes in Computer Science},
volume = {5208},
pages = {484–485},
abstract = {nspired by the arts, this paper addresses the challenge of expressing emotions in virtual humans using the environment's lights and the screen's pixels. An evolutionary approach is proposed which relies on genetic algorithms to learn how to map emotions into these forms of expression. The algorithm evolves populations of hypotheses, where each hypothesis represents a configuration of lighting and screen expression. Hypotheses are evaluated by a critic ensemble composed of artificial and human critics. The need for human critics is motivated by a study which reveals the limitations of an approach that relies only on artificial critics that follow principles from art literature. We also address the need for the model to improve with experience and to adapt to the individual, social and cultural values in the arts. Finally, a second study is described where subjects successfully evolved mappings for joy and sadness.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Parsons, Thomas D.; Rizzo, Albert
Virtual Human Patients for Training of Clinical Interview and Communication Skills Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, Maia, Portugal, 2008, ISBN: 07 049 15 00 6.
Abstract | Links | BibTeX | Tags: MedVR, Virtual Humans
@inproceedings{parsons_virtual_2008,
title = {Virtual Human Patients for Training of Clinical Interview and Communication Skills},
author = {Thomas D. Parsons and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Human%20Patients%20for%20Training%20of%20Clinical%20Interview%20and%20Communication%20Skills.pdf},
isbn = {07 049 15 00 6},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
address = {Maia, Portugal},
abstract = {Although schools commonly make use of standardized patients to teach interview skills, the diversity of the scenarios standardized patients can characterize is limited by availability of human actors. Virtual Human Agent technology has evolved to a point where esearchers may begin developing mental health applications that make use of virtual reality patients. The work presented here is a preliminary attempt at what we believe to be a large application area. Herein we describe an ongoing study of our virtual patients. We present an approach that allows novice mental health clinicians to conduct an interview with virtual character that emulates 1) an adolescent male with conduct disorder; and 2) an adolescent female who has recently been physically traumatized.},
keywords = {MedVR, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gordon, Andrew S.; Swanson, Reid
Envisioning With Weblogs Proceedings Article
In: International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web, Graz, Austria, 2008.
Abstract | Links | BibTeX | Tags: The Narrative Group
@inproceedings{gordon_envisioning_2008,
title = {Envisioning With Weblogs},
author = {Andrew S. Gordon and Reid Swanson},
url = {http://ict.usc.edu/pubs/Envisioning%20With%20Weblogs.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {International Conference on New Media Technology, Special Track on Knowledge Acquisition From the Social Web},
address = {Graz, Austria},
abstract = {In this position paper we present a vision of how the stories that people tell in Internet weblogs can be used directly for automated commonsense reasoning, specifically to support the core envisionment functions of event prediction, explanation, and imagination.},
keywords = {The Narrative Group},
pubstate = {published},
tppubtype = {inproceedings}
}
Traum, David; Gratch, Jonathan; Hartholt, Arno; Marsella, Stacy C.; Lee, Jina
Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents Proceedings Article
In: Proceedings of the 8th International Conference on Intelligent Virtual Agents, pp. 117–130, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags: Social Simulation, Virtual Humans
@inproceedings{traum_multi-party_2008,
title = {Multi-party, Multi-issue, Multi-strategy Negotiation for Multi-modal Virtual Agents},
author = {David Traum and Jonathan Gratch and Arno Hartholt and Stacy C. Marsella and Jina Lee},
url = {http://ict.usc.edu/pubs/Multi-party,%20Multi-issue,%20Multi-strategy%20Negotiation.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 8th International Conference on Intelligent Virtual Agents},
pages = {117–130},
address = {Tokyo, Japan},
abstract = {We present a model of negotiation for virtual agents that extends previous work to be more human-like and applicable to a broader range of situations, including more than two negotiators with different goals, and negotiating over multiple options. The agents can dynamically change their negotiating strategies based on the current values of several parameters and factors that can be updated in the course of the negotiation.We have implemented this model and done preliminary evaluation within a prototype training system and a three-party negotiation with two virtual humans and one human.},
keywords = {Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Sherstyuk, Andrei; Treskunov, Anton; Berg, Benjamin
Fast Geometry Acquisition for Mixed Reality Applications Using Motion Tracking Proceedings Article
In: 7th IEEE and ACM International Symposium on Mixed and Augmented Reality - ISMAR 2008, Cambridge, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{sherstyuk_fast_2008,
title = {Fast Geometry Acquisition for Mixed Reality Applications Using Motion Tracking},
author = {Andrei Sherstyuk and Anton Treskunov and Benjamin Berg},
url = {http://ict.usc.edu/pubs/Fast%20Geometry%20Acquisition%20for%20Mixed%20Reality%20Applications%20Using%20Motion%20Tracking.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {7th IEEE and ACM International Symposium on Mixed and Augmented Reality - ISMAR 2008},
address = {Cambridge, UK},
abstract = {Mixing real and virtual elements into one environment often in- volves creating geometry models of physical objects. Traditional approaches include manual modeling by 3D artists or use of ded- icated devices. Both approaches require special skills or special hardware and may be costly. We propose a new method for fast semi-automatic 3D geom- etry acquisition, based upon unconventional use of motion track- ing equipment. The proposed method is intended for quick surface prototyping for Virtual, Augmented and Mixed reality applications where quality of visualization of objects is not required or is of low priority.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Velson, Martin
Towards Real-time Authoring of Believable Agents in Interactive Narrative Proceedings Article
In: 8th International Conference on Intelligent Virtual Agents, Tokyo, Japan, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{van_velson_towards_2008,
title = {Towards Real-time Authoring of Believable Agents in Interactive Narrative},
author = {Martin Velson},
url = {http://ict.usc.edu/pubs/Towards%20real%20time%20authoring%20of%20believable%20agents.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {8th International Conference on Intelligent Virtual Agents},
address = {Tokyo, Japan},
abstract = {In this paper we present an authoring tool called Narratoria that allows non-technical experts in the field of digital entertainment to create interactive narratives with 3D graphics and multimedia. Narratoria allows experts in digital entertainment to participate in the generation of story-based military training applications. Users of the tools can create story-arcs, screenplays, pedagogical goals and AI models using a single software application. Using commercial game engines, which provide direct visual output in a real-time feedback-loop, users can view the final product as they edit.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parsons, Thomas D.; Cosand, Louise; Courtney, Chris; Iyer, Arvind; Rizzo, Albert
Neuropsychological Assessment using the Virtual Reality Cognitive Performance Assessment Test Proceedings Article
In: Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology, 2008.
Abstract | Links | BibTeX | Tags: MedVR
@inproceedings{parsons_neuropsychological_2008,
title = {Neuropsychological Assessment using the Virtual Reality Cognitive Performance Assessment Test},
author = {Thomas D. Parsons and Louise Cosand and Chris Courtney and Arvind Iyer and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Neurocognitive%20Workload%20Assessment%20Using%20the%20Virtual%20Reality%20Cognitive%20Performance%20Assessment%20Test.pdf},
year = {2008},
date = {2008-09-01},
booktitle = {Proceedings of the 2008 International Conference on Disability, Virtual Reality and Associated Technology},
abstract = {The traditional approach to assessing neurocognitive performance makes use of paper and pencil neuropsychological assessments. This received approach has been criticized as limited in the area of ecological validity. The newly developed Virtual Reality Cognitive Performance Assessment Test (VRCPAT) focuses upon enhanced ecological validity using virtual environment scenarios to assess neurocognitive processing. The VRCPAT battery and a europsychological assessment were conducted with a sample of healthy adults. Findings suggest 1) good construct validity for the Memory Module; and 2) that increase in stimulus complexity and stimulus intensity can manipulate attention performance within the Attention Module.},
keywords = {MedVR},
pubstate = {published},
tppubtype = {inproceedings}
}
Bolas, Mark; Lange, Belinda; Dallas, I.; Rizzo, Albert
Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system Proceedings Article
In: Virtual Rehabilitation, pp. 72, Vancouver, CA, 2008.
Abstract | Links | BibTeX | Tags: MedVR, MxR
@inproceedings{bolas_engaging_2008,
title = {Engaging breathing exercises: developing an interactive XNA-based air flow sensing and control system},
author = {Mark Bolas and Belinda Lange and I. Dallas and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Engaging%20breathing%20exercises-%20developing%20an%20interactive%20XNA-based%20air%20flow%20sensing%20and%20control%20system.jpg},
year = {2008},
date = {2008-08-01},
booktitle = {Virtual Rehabilitation},
pages = {72},
address = {Vancouver, CA},
abstract = {The aim of this project was to make breathing exercises for children with Cystic Fibrosis fun. We developed a prototype device that uses breathing to control specifically designed video games.},
keywords = {MedVR, MxR},
pubstate = {published},
tppubtype = {inproceedings}
}
Morency, Louis-Philippe; Sun, Xu; Okanoharay, Daisuke; Tsujii, Jun'ichi
Modeling Latent-Dynamic in Shallow Parsing: A Latent Conditional Model with Improved Inference Proceedings Article
In: The 22nd International Conference on Computational Linguistics (COLING 2008), Manchester, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{morency_modeling_2008,
title = {Modeling Latent-Dynamic in Shallow Parsing: A Latent Conditional Model with Improved Inference},
author = {Louis-Philippe Morency and Xu Sun and Daisuke Okanoharay and Jun'ichi Tsujii},
url = {http://www.ict.usc.edu/pubs/Modeling%20Latent-Dynamic%20in%20Shallow%20Parsing.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {The 22nd International Conference on Computational Linguistics (COLING 2008)},
address = {Manchester, UK},
abstract = {Shallow parsing is one of many NLP tasks that can be reduced to a sequence labeling problem. In this paper we show that the latent-dynamics (i.e., hidden substructure of shallow phrases) constitutes a problem in shallow parsing, and we show that modeling this intermediate structure is useful. By analyzing the automatically learned hidden states, we show how the latent conditional model explicitly learn latent-dynamics. We propose in this paper the Best Label Path (BLP) inference algorithm, which is able to produce the most probable label sequence on latent conditional models. It outperforms two existing inference algorithms. With the BLP inference, the LDCRF model significantly outperforms CRF models on word features, and achieves comparable performance of the most successful shallow parsers on the CoNLL data when further using part-ofspeech features.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ma, Wan-Chun; Jones, Andrew; Hawkins, Tim; Chiang, Jen-Yuan; Debevec, Paul
A high-resolution geometry capture system for facial performance Proceedings Article
In: SIGGRAPH, Los Angeles, CA, 2008.
Abstract | Links | BibTeX | Tags: Graphics
@inproceedings{ma_high-resolution_2008,
title = {A high-resolution geometry capture system for facial performance},
author = {Wan-Chun Ma and Andrew Jones and Tim Hawkins and Jen-Yuan Chiang and Paul Debevec},
url = {http://ict.usc.edu/pubs/A%20high-resolution%20geometry%20capture%20system%20for%20facial%20performance.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {SIGGRAPH},
address = {Los Angeles, CA},
abstract = {Results The two cameras capture data at a resolution of 2400× 1800 (Bayer pattern). With a internal RAM storage of 12GB, the maximum recording time is around 5 seconds. The result of each scan contains a high resolution mesh that usually consists of 1M triangles, a smoothed medium resolution mesh, a color texture, a world-space normal map, and a displacement map represents the difference between the high resolution mesh and the smoothed mesh.},
keywords = {Graphics},
pubstate = {published},
tppubtype = {inproceedings}
}
Sagae, Kenji; Tsujii, Jun'ichi
Shift-reduce dependency DAG parsing Proceedings Article
In: 22nd International Conference on Computational Linguistics (Coling 2008), Manchester, UK, 2008.
Abstract | Links | BibTeX | Tags:
@inproceedings{sagae_shift-reduce_2008,
title = {Shift-reduce dependency DAG parsing},
author = {Kenji Sagae and Jun'ichi Tsujii},
url = {http://www.ict.usc.edu/pubs/Shift-reduce%20dependency%20DAG%20parsing.pdf},
year = {2008},
date = {2008-08-01},
booktitle = {22nd International Conference on Computational Linguistics (Coling 2008)},
address = {Manchester, UK},
abstract = {Most data-driven dependency parsing approaches assume that sentence structure is represented as trees. Although trees have several desirable properties from both computational and linguistic perspectives, the structure of linguistic phenomena that goes beyond shallow syntax often cannot be fully captured by tree representations. We present a parsing approach that is nearly as simple as current data-driven transition-based dependency parsing frameworks, but outputs directed acyclic graphs (DAGs). We demonstrate the benefits of DAG parsing in two experiments where its advantages over dependency tree parsing can be clearly observed: predicate-argument analysis of English and syntactic analysis of Danish with a representation that includes long-distance dependencies and anaphoric reference links.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kenny, Patrick G.; Parsons, Thomas D.; Gratch, Jonathan; Rizzo, Albert
Virtual Humans for Assisted Health Care Proceedings Article
In: Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings, ACM, Athens, Greece, 2008.
Abstract | Links | BibTeX | Tags: MedVR, Social Simulation, Virtual Humans
@inproceedings{kenny_virtual_2008-1,
title = {Virtual Humans for Assisted Health Care},
author = {Patrick G. Kenny and Thomas D. Parsons and Jonathan Gratch and Albert Rizzo},
url = {http://ict.usc.edu/pubs/Virtual%20Humans%20for%20Assisted%20Health%20Care.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {Pervasive Technologies for Assistive Environments (PETRA) Conference Proceedings},
publisher = {ACM},
address = {Athens, Greece},
abstract = {There is a growing need for applications that can dynamically interact with aging populations to gather information, monitor their health care, provide information, or even act as companions. Virtual human agents or virtual characters offer a technology that can enable human users to overcome the confusing interfaces found in current human-computer interactions. These artificially intelligent virtual characters have speech recognition, natural language and vision that will allow human users to interact with their computers in a more natural way. Additionally, sensors may be used to monitor the environment for specific behaviors that can be fused into a virtual human system. As a result, the virtual human may respond to a patient or elderly person in a manner that will have a powerful affect on their living situation. This paper will describe the virtual human technology developed and some current applications that apply the technology to virtual patients for mental health diagnosis and clinician training. Additionally the paper will discuss possible ways in which the virtual humans may be utilized for assisted health care and for the integration of multi-modal input to enhance the virtual human system.},
keywords = {MedVR, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Solomon, Steve; Gratch, Jonathan; Bulitko, Vadim; Lent, Michael
Modeling Culturally and Emotionally Affected Behavior Proceedings Article
In: The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics., Osaka, Japan, 2008.
Abstract | Links | BibTeX | Tags: Virtual Humans
@inproceedings{solomon_modeling_2008,
title = {Modeling Culturally and Emotionally Affected Behavior},
author = {Steve Solomon and Jonathan Gratch and Vadim Bulitko and Michael Lent},
url = {http://www.ict.usc.edu//pubs/Modeling Culturally and Emotionally Affected Behavior.pdf},
year = {2008},
date = {2008-07-01},
booktitle = {The 10th International Conference on the Simulation of Adaptive Behavior (SAB); Workshop on the role of emotion in adaptive behavior and cognitive robotics.},
address = {Osaka, Japan},
abstract = {Culture and emotions have a profound impact on human behavior. Consequently, high-fidelity simulated interactive environments (e.g., trainers and computer games) that involve virtual humans must model socio-cultural and emotional affects on agent behavior. In this paper we discuss two recently fielded systems that do so independently: Culturally Affected Behavior (CAB) and EMotion and Adaptation (EMA). We then propose a simple language that combines the two systems in a natural way thereby enabling simultaneous simulation of culturally and emotionally affected behavior. The proposed language is based on matrix algebra and can be easily implemented on single- or multi-core hardware with a standard matrix package (e.g., MATLAB or a C++ library). We then show how to extend the combined culture and emotion model with an explicit representation of religion and personality profiles.},
keywords = {Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}