Publications
Search
Melo, Celso M.; Marsella, Stacy; Gratch, Jonathan
"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 949–956, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016.
@inproceedings{de_melo_as_2016,
title = {"Do As I Say, Not As I Do:” Challenges in Delegating Decisions to Automated Agents},
author = {Celso M. Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2937063},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {949–956},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {There has been growing interest, across various domains, in computer agents that can decide on behalf of humans. These agents have the potential to save considerable time and help humans reach better decisions. One implicit assumption, however, is that, as long as the algorithms that simulate decision-making are correct and capture how humans make decisions, humans will treat these agents similarly to other humans. Here we show that interaction with agents that act on our behalf or on behalf of others is richer and more interesting than initially expected. Our results show that, on the one hand, people are more selfish with agents acting on behalf of others, than when interacting directly with others. We propose that agents increase the social distance with others which, subsequently, leads to increased demand. On the other hand, when people task an agent to interact with others, people show more concern for fairness than when interacting directly with others. In this case, higher psychological distance leads people to consider their social image and the long-term consequences of their actions and, thus, behave more fairly. To support these findings, we present an experiment where people engaged in the ultimatum game, either directly or via an agent, with others or agents representing others. We show that these patterns of behavior also occur in a variant of the ultimatum game – the impunity game – where others have minimal power over the final outcome. Finally, we study how social value orientation – i.e., people’s propensity for cooperation – impact these effects. These results have important implications for our understanding of the psychological mechanisms underlying interaction with agents, as well as practical implications for the design of successful agents that act on our behalf or on behalf of others.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Uryupina, Olga; Artstein, Ron; Bristot, Antonella; Cavicchio, Federica; Rodriguez, Kepa; Poesio, Massimo
ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 2058–2062, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{uryupina_arrau_2016,
title = {ARRAU: Linguistically-Motivated Annotation of Anaphoric Descriptions},
author = {Olga Uryupina and Ron Artstein and Antonella Bristot and Federica Cavicchio and Kepa Rodriguez and Massimo Poesio},
url = {http://www.lrec-conf.org/proceedings/lrec2016/summaries/1121.html},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {2058–2062},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {This paper presents a second release of the ARRAU dataset: a multi-domain corpus with thorough linguistically motivated annotation of anaphora and related phenomena. Building upon the first release almost a decade ago, a considerable effort had been invested in improving the data both quantitatively and qualitatively. Thus, we have doubled the corpus size, expanded the selection of covered phenomena to include referentiality and genericity and designed and implemented a methodology for enforcing the consistency of the manual annotation. We believe that the new release of ARRAU provides a valuable material for ongoing research in complex cases of coreference as well as for a variety of related tasks. The corpus is publicly available through LDC.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
ZarrieB, Sina; Hough, Julian; Kennington, Casey; Manuvinakurike, Ramesh; DeVault, David; Fernández, Raquel; Schlangen, David
PentoRef: A Corpus of Spoken References in Task-oriented Dialogues Proceedings Article
In: 10th edition of the Language Resources and Evaluation Conference, ELRA, Portorož, Slovenia, 2016.
@inproceedings{zarrieb_pentoref_2016,
title = {PentoRef: A Corpus of Spoken References in Task-oriented Dialogues},
author = {Sina ZarrieB and Julian Hough and Casey Kennington and Ramesh Manuvinakurike and David DeVault and Raquel Fernández and David Schlangen},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/563_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {10th edition of the Language Resources and Evaluation Conference},
publisher = {ELRA},
address = {Portorož, Slovenia},
abstract = {PentoRef is a corpus of task-oriented dialogues collected in systematically manipulated settings. The corpus is multilingual, with English and German sections, and overall comprises more than 20000 utterances. The dialogues are fully transcribed and annotated with referring expressions mapped to objects in corresponding visual scenes, which makes the corpus a rich resource for research on spoken referring expressions in generation and resolution. The corpus includes several sub-corpora that correspond to different dialogue situations where parameters related to interactivity, visual access, and verbal channel have been manipulated in systematic ways. The corpus thus lends itself to very targeted studies of reference in spontaneous dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ghosh, Eugene Laksana Satan; Morency, Louis-Philippe; Scherer, Stefen
Learning Representations of Affect from Speech Proceedings Article
In: ICLR 2016, ICLR, San Juan, Puerto Rico, 2016.
@inproceedings{ghosh_eugene_laksana_satan_learning_2016,
title = {Learning Representations of Affect from Speech},
author = {Eugene Laksana Satan Ghosh and Louis-Philippe Morency and Stefen Scherer},
url = {http://arxiv.org/pdf/1511.04747.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {ICLR 2016},
publisher = {ICLR},
address = {San Juan, Puerto Rico},
abstract = {There has been a lot of prior work on representation learning for speech recognition applications, but not much emphasis has been given to an investigation of effective representations of affect from speech, where the paralinguistic elements of speech are separated out from the verbal content. In this paper, we explore denoising autoencoders for learning paralinguistic attributes, i.e. categorical and dimensional affective traits from speech. We show that the representations learnt by the bottleneck layer of the autoencoder are highly discriminative of activation intensity and at separating out negative valence (sadness and anger) from positive valence (happiness). We experiment with different input speech features (such as FFT and log-mel spectrograms with temporal context windows), and different autoencoder architectures (such as stacked and deep autoencoders). We also learn utterance specific representations by a combination of denoising autoencoders and BLSTM based recurrent autoencoders. Emotion classification is performed with the learnt temporal/dynamic representations to evaluate the quality of the representations. Experiments on a well-established real-life speech dataset (IEMOCAP) show that the learnt representations are comparable to state of the art feature extractors (such as voice quality features and MFCCs) and are competitive with state-of-the-art approaches at emotion and dimensional affect recognition.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Konovalov, Vasily; Artstein, Ron; Melamud, Oren; Dagan, Ido
The Negochat Corpus of Human-agent Negotiation Dialogues Proceedings Article
In: Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016), pp. 3141–3145, European Language Resources Association (ELRA), Portorož, Slovenia, 2016.
@inproceedings{konovalov_negochat_2016,
title = {The Negochat Corpus of Human-agent Negotiation Dialogues},
author = {Vasily Konovalov and Ron Artstein and Oren Melamud and Ido Dagan},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/240_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
pages = {3141–3145},
publisher = {European Language Resources Association (ELRA)},
address = {Portorož, Slovenia},
abstract = {Annotated in-domain corpora are crucial to the successful development of dialogue systems of automated agents, and in particular for developing natural language understanding (NLU) components of such systems. Unfortunately, such important resources are scarce. In this work, we introduce an annotated natural language human-agent dialogue corpus in the negotiation domain. The corpus was collected using Amazon Mechanical Turk following the ‘Wizard-Of-Oz’ approach, where a ‘wizard’ human translates the participants’ natural language utterances in real time into a semantic language. Once dialogue collection was completed, utterances were annotated with intent labels by two independent annotators, achieving high inter-annotator agreement. Our initial experiments with an SVM classifier show that automatically inferring such labels from the utterances is far from trivial. We make our corpus publicly available to serve as an aid in the development of dialogue systems for negotiation agents, and suggest that analogous corpora can be created following our methodology and using our available source code. To the best of our knowledge this is the first publicly available negotiation dialogue corpus.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Melo, Celso De; Marsella, Stacy; Gratch, Jonathan
People Don’t Feel Guilty About Exploiting Machines Journal Article
In: ACM Transactions on Computer-Human Interaction (TOCHI), vol. 23, no. 2, pp. 1–17, 2016, ISSN: 1073-0516.
@article{melo_people_2016,
title = {People Don’t Feel Guilty About Exploiting Machines},
author = {Celso De Melo and Stacy Marsella and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?id=2890495},
doi = {10.1145/2890495},
issn = {1073-0516},
year = {2016},
date = {2016-05-01},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
volume = {23},
number = {2},
pages = {1–17},
abstract = {Guilt and envy play an important role in social interaction. Guilt occurs when individuals cause harm to others or break social norms. Envy occurs when individuals compare themselves unfavorably to others and desire to benefit from the others’ advantage. In both cases, these emotions motivate people to act and change the status quo: following guilt, people try to make amends for the perceived transgression and, following envy, people try to harm envied others. In this paper, we present two experiments that study participants' experience of guilt and envy when engaging in social decision making with machines and humans. The results showed that, though experiencing the same level of envy, people felt considerably less guilt with machines than with humans. These effects occurred both with subjective and behavioral measures of guilt and envy, and in three different economic games: public goods, ultimatum, and dictator game. This poses an important challenge for human-computer interaction because, as shown here, it leads people to systematically exploit machines, when compared to humans. We discuss theoretical and practical implications for the design of human-machine interaction systems that hope to achieve the kind of efficiency – cooperation, fairness, reciprocity, etc. – we see in human-human interaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pincus, Eli; Traum, David
Towards Automatic Identification of Effective Clues for Team Word-Guessing Games Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 2741–2747, European Language Resources Association, Portorož, Slovenia, 2016.
@inproceedings{pincus_towards_2016,
title = {Towards Automatic Identification of Effective Clues for Team Word-Guessing Games},
author = {Eli Pincus and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/762_Paper.pdf},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {2741–2747},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {Team word-guessing games where one player, the clue-giver, gives clues attempting to elicit a target-word from another player, the receiver, are a popular form of entertainment and also used for educational purposes. Creating an engaging computational agent capable of emulating a talented human clue-giver in a timed word-guessing game depends on the ability to provide effective clues (clues able to elicit a correct guess from a human receiver). There are many available web resources and databases that can be mined for the raw material for clues for target-words; however, a large number of those clues are unlikely to be able to elicit a correct guess from a human guesser. In this paper, we propose a method for automatically filtering a clue corpus for effective clues for an arbitrary target-word from a larger set of potential clues, using machine learning on a set of features of the clues, including point-wise mutual information between a clue’s constituent words and a clue’s target-word. The results of the experiments significantly improve the average clue quality over previous approaches, and bring quality rates in-line with measures of human clue quality derived from a corpus of human-human interactions. The paper also introduces the data used to develop this method; audio recordings of people making guesses after having heard the clues being spoken by a synthesized voice (Pincus and Traum, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gilani, Setareh Nasihati; Sheetz, Kraig; Lucas, Gale; Traum, David
What Kind of Stories Should a Virtual Human Swap? Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 1437–1438, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{nasihati_gilani_what_2016,
title = {What Kind of Stories Should a Virtual Human Swap?},
author = {Setareh Nasihati Gilani and Kraig Sheetz and Gale Lucas and David Traum},
url = {http://dl.acm.org/citation.cfm?id=2937198},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {1437–1438},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Stories are pervasive in conversation between people [5]. They are used to establish identity pass on cultural heritage, and build rapport. Often stories are swapped when one conversational participant will reply to a story with a story. Stories are also told by virtual humans [1, 6, 2]. In creating or mining stories for a virtual human (VH) to tell, there are a number of considerations that come up about what kinds of stories should be told, and how the stories should be related to the virtual human's identity, such as whether the identity should be human or arti⬚cial, and whether the stories should be about the virtual human or about someone else. We designed a set of virtual human characters who can engage in a simple form of story-swapping. Each of the characters can engage in simple interactions such as greetings and closings and can respond to a set of textbackslashtextbackslashtextbackslashtextbackslashice-breaker" questions, that might be used on a ⬚rst date or similar textbackslashtextbackslashtextbackslashtextbackslashget to know you" encounter. For these questions the character's answer includes a story. We created 4 character response sets, to have all combinations of identity (human or arti⬚cial) and perspective (⬚rst person stories about the narrator, or third person stories about someone else). We also designed an experiment to try to explore the collective impact of above principles on people who interact with the characters. Participants interact with two of the above characters in a "get to know you" scenario. We investigate the degree of reciprocity where people respond to the character with their own stories, and also compare rapport of participants with the characters as well as the impressions of the character's personality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Collins, Kathryn J.; Traum, David
Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue Proceedings Article
In: Proceedings of the Language Resources and Evaluation Conference (LREC), pp. 118–124, European Language Resources Association, Portorož, Slovenia, 2016, ISBN: 978-2-9517408-9-1.
@inproceedings{collins_towards_2016,
title = {Towards A Multi-Dimensional Taxonomy Of Stories In Dialogue},
author = {Kathryn J. Collins and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2016/pdf/354_Paper.pdf},
isbn = {978-2-9517408-9-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the Language Resources and Evaluation Conference (LREC)},
pages = {118–124},
publisher = {European Language Resources Association},
address = {Portorož, Slovenia},
abstract = {In this paper, we present a taxonomy of stories told in dialogue. We based our scheme on prior work analyzing narrative structure and method of telling, relation to storyteller identity, as well as some categories particular to dialogue, such as how the story gets introduced. Our taxonomy currently has 5 major dimensions, with most having sub-dimensions - each dimension has an associated set of dimension-specific labels. We adapted an annotation tool for this taxonomy and have annotated portions of two different dialogue corpora, Switchboard and the Distress Analysis Interview Corpus. We present examples of some of the tags and concepts with stories from Switchboard, and some initial statistics of frequencies of the tags.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
RIZZO, Albert; LUCAS, Gale; GRATCH, Jonathan; STRATOU, Giota; MORENCY, Louis-Philippe; CHAVEZ, Kenneth; SHILLING, Russ; SCHERER, Stefan
Automatic Behavior Analysis During a Clinical Interview with a Virtual Human. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 316–322, 2016.
@article{rizzo_automatic_2016,
title = {Automatic Behavior Analysis During a Clinical Interview with a Virtual Human.},
author = {Albert RIZZO and Gale LUCAS and Jonathan GRATCH and Giota STRATOU and Louis-Philippe MORENCY and Kenneth CHAVEZ and Russ SHILLING and Stefan SCHERER},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA316&dq=%22captured+across+a+20+minute+interview.+Results+from+of+sample+of+service%22+%22technology+for+clinical+purposes.+Recent+shifts+in+the+social+and%22+%22needed+to+create+VH+systems+is+now+driving+application+development+across%22+&ots=Ej8M4iuPfb&sig=Ad6Z3DPSwN3qA2gMDKWPe1YTPhg},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {316–322},
abstract = {SimSensei is a Virtual Human (VH) interviewing platform that uses off-the-shelf sensors (i.e., webcams, Microsoft Kinect and a microphone) to capture and interpret real-time audiovisual behavioral signals from users interacting with the VH system. The system was specifically designed for clinical interviewing and health care support by providing a face-to-face interaction between a user and a VH that can automatically react to the inferred state of the user through analysis of behavioral signals gleaned from the user’s facial expressions, body gestures and vocal parameters. Akin to how non-verbal behavioral signals have an impact on human-to-human interaction and communication, SimSensei aims to capture and infer user state from signals generated from user non-verbal communication to improve engagement between a VH and a user and to quantify user state from the data captured across a 20 minute interview. Results from of sample of service members (SMs) who were interviewed before and after a deployment to Afghanistan indicate that SMs reveal more PTSD symptoms to the VH than they report on the Post Deployment Health Assessment. Pre/Post deployment facial expression analysis indicated more sad expressions and few happy expressions at post deployment.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
TALBOT, Thomas B.; KALISCH, Nicolai; CHRISTOFFERSEN, Kelly; LUCAS, Gale; FORBELL, Eric
Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters. Journal Article
In: Medicine Meets Virtual Reality 22: NextMed/MMVR22, vol. 220, pp. 407–413, 2016.
@article{talbot_natural_2016,
title = {Natural Language Understanding Performance & Use Considerations in Virtual Medical Encounters.},
author = {Thomas B. TALBOT and Nicolai KALISCH and Kelly CHRISTOFFERSEN and Gale LUCAS and Eric FORBELL},
url = {http://books.google.com/books?hl=en&lr=&id=sLgtDAAAQBAJ&oi=fnd&pg=PA407&dq=%22through+regular+web+browsers+and+is+capable+of+multiple+types+of%22+%22practice+targeting+diagnostic+interviews.+A+natural+language+interview%22+%22narrative+statement+based+upon+dialog+context.+The+dialog+manager%27s%22+&ots=Ej8L8hxLlb&sig=GMnqEb5n7CB9x1lWE4gfe5_4n8o},
doi = {10.3233/978-1-61499-625-5-407},
year = {2016},
date = {2016-04-01},
journal = {Medicine Meets Virtual Reality 22: NextMed/MMVR22},
volume = {220},
pages = {407–413},
abstract = {A virtual standardized patient (VSP) prototype was tested for natural language understanding (NLU) performance. The conversational VSP was evaluated in a controlled 61 subject study over four repetitions of a patient case. The prototype achieved more than 92% appropriate response rate from naïve users on their first attempt and results were stable by their fourth case repetition. This level of performance exceeds prior efforts and is at a level comparable of accuracy as seen in human conversational patient training, with caveats. This level of performance was possible due to the use of a unified medical taxonomy underpinning that allows virtual patient language training to be applied to all cases in our system as opposed to benefiting a single patient case.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Gandhe, Sudeep; Traum, David
A Semi-automated Evaluation Metric for Dialogue Model Coherence Book Section
In: Situated Dialog in Speech-Based Human-Computer Interaction, pp. 217–225, Springer International Publishing, Cham, 2016, ISBN: 978-3-319-21833-5 978-3-319-21834-2.
@incollection{gandhe_semi-automated_2016,
title = {A Semi-automated Evaluation Metric for Dialogue Model Coherence},
author = {Sudeep Gandhe and David Traum},
url = {http://link.springer.com/10.1007/978-3-319-21834-2_19},
isbn = {978-3-319-21833-5 978-3-319-21834-2},
year = {2016},
date = {2016-04-01},
booktitle = {Situated Dialog in Speech-Based Human-Computer Interaction},
pages = {217–225},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {We propose a new metric, Voted Appropriateness, which can be used to automatically evaluate dialogue policy decisions, once some wizard data has been collected. We show that this metric outperforms a previously proposed metric Weak agreement.We also present a taxonomy for dialogue model evaluation schemas, and orient our new metric within this taxonomy.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Phan, Thai; Krum, David M.; Bolas, Mark
ShodanVR: Immersive Visualization of Text Records from the Shodan Database Proceedings Article
In: Proceedings of the 2016 Workshop on Immersive Analytics (IA), IEEE, Greenville,SC, 2016, ISBN: 978-1-5090-0834-6.
@inproceedings{phan_shodanvr_2016,
title = {ShodanVR: Immersive Visualization of Text Records from the Shodan Database},
author = {Thai Phan and David M. Krum and Mark Bolas},
url = {http://ieeexplore.ieee.org/document/7932379/?part=1},
doi = {10.1109/IMMERSIVE.2016.7932379},
isbn = {978-1-5090-0834-6},
year = {2016},
date = {2016-03-01},
booktitle = {Proceedings of the 2016 Workshop on Immersive Analytics (IA)},
publisher = {IEEE},
address = {Greenville,SC},
abstract = {ShodanVR is an immersive visualization for querying and displaying text records from the Shodan database of Internet connected devices. Shodan provides port connection data retrieved from servers, routers, and other networked devices [2]. Cybersecurity professionals can glean this data for device populations, software versions, and potential security vulnerabilities [1].},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
Automated Path Prediction for Redirected Walking Using Navigation Meshes Proceedings Article
In: 2016 IEEE Symposium on 3D User Interfaces (3DUI), pp. 63–66, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_automated_2016,
title = {Automated Path Prediction for Redirected Walking Using Navigation Meshes},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7460032},
doi = {10.1109/3DUI.2016.7460032},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {63–66},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking techniques have been introduced to overcome physical space limitations for natural locomotion in virtual reality. These techniques decouple real and virtual user trajectories by subtly steering the user away from the boundaries of the physical space while maintaining the illusion that the user follows the intended virtual path. Effectiveness of redirection algorithms can significantly improve when a reliable prediction of the users future virtual path is available. In current solutions, the future user trajectory is predicted based on non-standardized manual annotations of the environment structure, which is both tedious and inflexible. We propose a method for automatically generating environment annotation graphs and predicting the user trajectory using navigation meshes. We discuss the integration of this method with existing redirected walking algorithms such as FORCE and MPCRed. Automated annotation of the virtual environments structure enables simplified deployment of these algorithms in any virtual environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hutton, Courtney; Suma, Evan
A Realistic Walking Model for Enhancing Redirection in Virtual Reality Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 183–184, IEEE, Greenville, SC, 2016.
@inproceedings{hutton_realistic_2016,
title = {A Realistic Walking Model for Enhancing Redirection in Virtual Reality},
author = {Courtney Hutton and Evan Suma},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504714},
doi = {10.1109/VR.2016.7504714},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {183–184},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirected walking algorithms require the prediction of human motion in order to effectively steer users away from the boundaries of the physical space. While a virtual walking trajectory may be represented using straight lines connecting waypoints of interest, this simple model does not accurately represent typical user behavior. In this poster we present a more realistic walking model for use in real-time virtual environments that employ redirection techniques. We implemented the model within a framework that can be used for simulation of redirected walking within different virtual and physical environments. Such simulations are useful for the evaluation of redirected walking algorithms and the tuning of parameters under varying conditions. Additionally, the model can also be used to animate an artificial humanoid “ghost walker” to provide a visual demonstration of redirected walking in virtual reality.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M.; Kang, Sin-Hwa; Phan, Thai; Dukes, Lauren Cairco; Bolas, Mark
Head Mounted Projection for Enhanced Gaze in Social Interactions Proceedings Article
In: 2016 IEEE Virtual Reality (VR), pp. 209–210, IEEE, Greenville, SC, 2016.
@inproceedings{krum_head_2016,
title = {Head Mounted Projection for Enhanced Gaze in Social Interactions},
author = {David M. Krum and Sin-Hwa Kang and Thai Phan and Lauren Cairco Dukes and Mark Bolas},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7504727},
doi = {10.1109/VR.2016.7504727},
year = {2016},
date = {2016-03-01},
booktitle = {2016 IEEE Virtual Reality (VR)},
pages = {209–210},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Projected displays can present life-sized imagery of a virtual human character that can be seen by multiple observers. However, typical projected displays can only render that virtual human from a single viewpoint, regardless of whether head tracking is employed. This results in the virtual human being rendered from an incorrect perspective for most individuals. This could cause perceptual miscues, such as the “Mona Lisa” effect, causing the virtual human to appear as if it is simultaneously gazing and pointing at all observers regardless of their location. This may be detrimental to training scenarios in which all trainees must accurately assess where the virtual human is looking or pointing a weapon. We discuss our investigations into the presentation of eye gaze using REFLCT, a previously introduced head mounted projective display. REFLCT uses head tracked, head mounted projectors and retroreflective screens to present personalized, perspective correct imagery to multiple users without the occlusion of a traditional head mounted display. We examined how head mounted projection for enhanced presentation of eye gaze might facilitate or otherwise affect social interactions during a multi-person guessing game of “Twenty Questions.”},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nilsson, Niels; Suma, Evan; Nordahl, Rolf; Bolas, Mark; Serafin, Stefania
Estimation of Detection Thresholds for Audiovisual Rotation Gains Proceedings Article
In: IEEE Virtual Reality 2016, pp. ID: A22, IEEE, Greenville, SC, 2016.
@inproceedings{nilsson_estimation_2016,
title = {Estimation of Detection Thresholds for Audiovisual Rotation Gains},
author = {Niels Nilsson and Evan Suma and Rolf Nordahl and Mark Bolas and Stefania Serafin},
url = {http://ieeevr.org/2016/posters/},
year = {2016},
date = {2016-03-01},
booktitle = {IEEE Virtual Reality 2016},
pages = {ID: A22},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {Redirection techniques allow users to explore large virtual environments on foot while remaining within a limited physical space. However, research has primarily focused on redirection through manipulation of visual stimuli. We describe a within-subjects study (n=31) exploring if participants’ ability to detect differences between real and virtual rotations is influenced by the addition of sound that is spatially aligned with its virtual source. The results revealed similar detection thresholds for conditions involving moving audio, static audio, and no audio. This may be viewed as an indication of visual dominance during scenarios such as the one used for the current study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Silver, Kenneth
Ethics for a Combined Human-Machine Dialogue Agent Proceedings Article
In: Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium, pp. 184–189, AAAI Press, Stanford, California, 2016.
@inproceedings{artstein_ethics_2016,
title = {Ethics for a Combined Human-Machine Dialogue Agent},
author = {Ron Artstein and Kenneth Silver},
url = {http://www.aaai.org/ocs/index.php/SSS/SSS16/paper/viewFile/12706/11948},
year = {2016},
date = {2016-03-01},
booktitle = {Ethical and Moral Considerations in Non-Human Agents: Papers from the AAAI Spring Symposium},
pages = {184–189},
publisher = {AAAI Press},
address = {Stanford, California},
abstract = {We discuss philosophical and ethical issues that arise from a dialogue system intended to portray a real person, using recordings of the person together with a machine agent that selects recordings during a synchronous conversation with a user. System output may count as actions of the speaker if the speaker intends to communicate with users and the outputs represent what the speaker would have chosen to say in context; in such cases the system can justifiably be said to be holding a conversation that is offset in time. The autonomous agent may at times misrepresent the speaker’s intentions, and such failures are analogous to good-faith misunderstandings. The user may or may not need to be informed that the speaker is not organically present, depending on the application.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Azmandian, Mahdi; Grechkin, Timofey; Bolas, Mark; Suma, Evan
The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments Proceedings Article
In: 2nd Workshop on Everyday Virtual Reality, IEEE, Greenville, SC, 2016.
@inproceedings{azmandian_redirected_2016,
title = {The Redirected Walking Toolkit: A Unified Development Platform for Exploring Large Virtual Environments},
author = {Mahdi Azmandian and Timofey Grechkin and Mark Bolas and Evan Suma},
url = {http://www.adalsimeone.me/papers/WEVR2016/WEVR2016_Azmandian.pdf},
year = {2016},
date = {2016-03-01},
booktitle = {2nd Workshop on Everyday Virtual Reality},
publisher = {IEEE},
address = {Greenville, SC},
abstract = {With the imminent emergence of low-cost tracking solutions, everyday VR users will soon experience the enhanced immersion of natural walking. Even with consumer-grade room-scale tracking, exploring large virtual environments can be made possible using a software solution known as redirected walking. Wide adoption of this technique has been hindered by the complexity and subtleties involved in successfully deploying redirection. To address this matter, we introduce the Redirected Walking Toolkit, to serve as a unified platform for developing, benchmarking, and deploying redirected walking algorithms. Our design enables seamless integration with standard virtual reality configurations, requiring minimal setup effort for content developers.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Filter
2000
Kim, Youngjun; Hill, Randall W.; Gratch, Jonathan
How Long Can an Agent Look Away From a Target? Proceedings Article
In: 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{kim_how_2000,
title = {How Long Can an Agent Look Away From a Target?},
author = {Youngjun Kim and Randall W. Hill and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/how%20long%20can%20you%20look%20away%20from%20a%20target.pdf},
year = {2000},
date = {2000-05-01},
booktitle = {9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Situation awareness (SA) is the perception of the elements in the environment within a volume of time and space, the comprehension of their meaning, and the projection of their status in the near future [3]. Although the impact of situation awareness and assessment on humans in complex systems is clear, no one theory for SA has been developed. A critical aspect of the SA problem is that agents must construct an overall view of a dynamically changing world using limited sensor channels. For instance, a (virtual) pilot, who visually tracks the location and direction of several vehicles that he cannot see simultaneously, must shift its visual field of view to scan the environment and to sense the situation involved. How he directs his attention, for how long, and how he efficiently reacquires targets is the central question we address in this paper. We describe the perceptual coordination that helps a virtual pilot efficiently track one or more objects. In SA, it is important for a virtual pilot having a limited visual field of view to gather more information from its environment and to choose appropriate actions to take in the environment without losing the target.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgiou, Panayiotis G.; Kyriakakis, Chris
A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time Proceedings Article
In: Proceedings of ICME 2000, New York, NY, 2000.
Abstract | Links | BibTeX | Tags: DTIC
@inproceedings{georgiou_multiple_2000,
title = {A Multiple Input Single Output Model for Rendering Virtual Sound Sources in Real Time},
author = {Panayiotis G. Georgiou and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/A%20MULTIPLE%20INPUT%20SINGLE%20OUTPUT%20MODEL%20FOR%20RENDERING%20VIRTUAL%20SOUND%20SOURCES%20IN%20REAL%20TIME.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of ICME 2000},
address = {New York, NY},
abstract = {Accurate localization of sound in 3-D space is based on variations in the spectrum of sound sources. These variations arise mainly from reflection and diffraction effects caused by the pinnae and are described through a set of Head-Related Transfer Functions (HRTF’s) that are unique for each azimuth and elevation angle. A virtual sound source can be rendered in the desired location by filtering with the corresponding HRTF for each ear. Previous work on HRTF modeling has mainly focused on the methods that attempt to model each transfer function individually. These methods are generally computationally-complex and cannot be used for real-time spatial rendering of multiple moving sources. In this work we provide an alternative approach, which uses a multiple input single output state space system to creat a combined model of the HRTF’s for all directions. This method exploits the similarities among the different HRTF’s to achieve a significant reduction in the model size with a minimum loss of accuracy.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Èmile: Marshalling Passions in Training and Education Proceedings Article
In: Proceedings of the 4th International Conference on Autonomous Agents, pp. 325–332, Barcelona, Spain, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_emile_2000,
title = {Èmile: Marshalling Passions in Training and Education},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Emile-%20Marshalling%20Passions%20in%20Training%20and%20Education.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 4th International Conference on Autonomous Agents},
pages = {325–332},
address = {Barcelona, Spain},
abstract = {Emotional reasoning can be an important contribution to automated tutoring and training systems. This paper describes �mile, a model of emotional reasoning that builds upon existing approaches and significantly generalizes and extends their capabilities. The main contribution is to show how an explicit planning model allows a more general treatment of several stages of the reasoning process. The model supports educational applications by allowing agents to appraise the emotional significance of events as they relate to students' (or their own) plans and goals, model and predict the emotional state of others, and alter behavior accordingly.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Modeling the Interplay Between Emotion and Decision-Making Proceedings Article
In: Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_modeling_2000,
title = {Modeling the Interplay Between Emotion and Decision-Making},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Modeling%20the%20Interplay%20Between%20Emotion%20and%20Decision-Making.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the 9th Conference on Computer Generated Forces and Behavioral Representation},
abstract = {Current models of computer-generated forces are limited by their inability to model many of the moderators that influence the performance of real troops in the field such as the effects of stress, emotion, and individual differences. This article discusses an extension to our command and control modeling architecture that begins to address how behavioral moderators influence the command decision-making process. Our Soar-Cfor command architecture was developed under the STOW and ASTT programs to support distributed command and control decision-making in the domain of army aviation planning. We have recently extended this architecture to model how people appraise the emotional significance of events and how these events influence decision making.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Scholer, Andrew; Rickel, Jeff; Angros, Richard Jr.; Johnson, W. Lewis
Learning Domain Knowledge for Teaching Procedural Tasks Proceedings Article
In: AAAI-2000 Fall Symposium on Learning How to Do Things, 2000.
Abstract | Links | BibTeX | Tags: DTIC
@inproceedings{scholer_learning_2000,
title = {Learning Domain Knowledge for Teaching Procedural Tasks},
author = {Andrew Scholer and Jeff Rickel and Richard Jr. Angros and W. Lewis Johnson},
url = {http://ict.usc.edu/pubs/Learning%20Domain%20Knowledge%20for%20Teaching%20Procedural%20Tasks.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {AAAI-2000 Fall Symposium on Learning How to Do Things},
abstract = {Providing domain knowledge needed by intelligent tutoring systems to teach a procedure to students is traditionally a difficult and time consuming task. This paper presents a system for making this process easier by allowing the automated tutor to acquire the knowledge it needs through a combination of programming by demonstration, autonomous experimentation, and direct instruction.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan
Socially Situated Planning Book Section
In: Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations, vol. 3, pp. 181–188, AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA, 2000.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@incollection{gratch_socially_2000,
title = {Socially Situated Planning},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Socially%20Situated%20Planning.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Socially Intelligent Agents, Multiagent Systems, Artificial Societies, and Simulated Organizations},
volume = {3},
pages = {181–188},
address = {AAAI Fall Symposium on Socially Intelligent Agents - The Human in the Loop, North Falmouth, MA},
abstract = {Introduction: Virtual environments such as training simulators and video games do an impressive job at modeling the physical dynamics of synthetic worlds but fall short when modeling the social dynamics of anything but the most impoverished human encounters. Yet the social dimension is at least as important as good graphics for creating an engaging game or effective training tool. Commercial flight simulators accurately model the technical aspects of flight but many aviation disasters arise from social breakdowns: poor management skills in the cockpit, or the effects of stress and emotion. Perhaps the biggest consumer of simulation technology, the U.S. military, identifies unrealistic human and organizational behavior as a major limitation of existing simulation technology (NRC, 1998). And of course the entertainment industry has long recognized the importance of good character, emotional attachment and rich social interactions to "put butts in seats." This article describes a research effort to endow virtual training environments with richer models of social behavior. We have been developing autonomous and semi-autonomous software agents that plan and act while situated in a social network of other entities, human and synthetic (Hill et. al, 1997; Tambe, 1997; Gratch and Hill, 1999). My work has focused on making agents act in an organization and obey social constraints, coordinate their behavior, negotiate conflicts, but also obey their own self-interest and show a range of individual differences in their behavior and willingness to violate social norms, albeit within the relatively narrow context of a specific training exercise.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {incollection}
}
Bharitkar, Sunil; Kyriakakis, Chris
Eigenfilters for Signal Cancellation Proceedings Article
In: International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS), Hawaii, 2000.
Abstract | Links | BibTeX | Tags: DTIC
@inproceedings{bharitkar_eigenfilters_2000,
title = {Eigenfilters for Signal Cancellation},
author = {Sunil Bharitkar and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/EIGENFILTERS%20FOR%20SIGNAL%20CANCELLATION.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS)},
address = {Hawaii},
abstract = {Selectively canceling signals at specific locations within an acoustical environment with multiple listeners is of significant importance for home theater, automobile, teleconferencing, office, industrial and other applications. The traditional noise cancellation approach is impractical for such applications because it requires sensors that must be placed on the listeners. In this paper we investigate the theoretical properties of eigenfilters for signal cancellation proposed in [1]. We also investigate the sensitivity of the eigenfilter as a function of the room impulse response duration. Our results show that with the minimum phase model for the room impulse response, we obtain a better behaviour in the sensitivity of the filter to the duration of the room response.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Srinivasamurthy, Naveen; Ortega, Antonio; Narayanan, Shrikanth
Efficient Scalable Speech Compression for Scalable Speech Recognition Proceedings Article
In: Proceedings of the IEEE Conference on Multimedia and Expo, 2000.
Abstract | Links | BibTeX | Tags: DTIC
@inproceedings{srinivasamurthy_efficient_2000,
title = {Efficient Scalable Speech Compression for Scalable Speech Recognition},
author = {Naveen Srinivasamurthy and Antonio Ortega and Shrikanth Narayanan},
url = {http://ict.usc.edu/pubs/Efficient%20Scalable%20Speech%20Compression%20for%20Scalable%20Speech%20Recognition.pdf},
year = {2000},
date = {2000-01-01},
booktitle = {Proceedings of the IEEE Conference on Multimedia and Expo},
abstract = {We propose a scalable recognition system for reducing recognition complexity. Scalable recognition can be combined with scalable compression in a distributed speech recognition (DSR) application to reduce both the computational load and the bandwidth requirement at the server. A low complexity preprocessor is used to eliminate the unlikely classes so that the complex recognizer can use the reduced subset of classes to recognize the unknown utterance. It is shown that by using our system it is fairly straightforward to trade-off reductions in complexity for performance degradation. Results of preliminary experiments using the TI-46 word digit database show that the proposed scalable approach can provide a 40% speed up, while operating under 1.05 kbps, compared to the baseline recognition using uncompressed speech.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
1999
Georgiou, Panayiotis G.; Tsakalides, Panagiotis; Kyriakakis, Chris
Alpha-Stable Modeling of Noise and Robust Time- Delay Estimation in the Presence of Impulsive Noise Proceedings Article
In: IEEE Transactions on Multimedia, pp. 291–301, 1999.
Abstract | Links | BibTeX | Tags: DTIC
@inproceedings{georgiou_alpha-stable_1999,
title = {Alpha-Stable Modeling of Noise and Robust Time- Delay Estimation in the Presence of Impulsive Noise},
author = {Panayiotis G. Georgiou and Panagiotis Tsakalides and Chris Kyriakakis},
url = {http://ict.usc.edu/pubs/Alpha-Stable%20Modeling%20of%20Noise%20and%20Robust%20Time-%20Delay%20Estimation%20in%20the%20Presence%20of%20Impulsive%20Noise.pdf},
year = {1999},
date = {1999-09-01},
booktitle = {IEEE Transactions on Multimedia},
volume = {1},
pages = {291–301},
abstract = {A new representation of audio noise signals is proposed, based on symmetric-stable (S S) distributions in order to better model the outliers that exist in real signals. This representation addresses a shortcoming of the Gaussian model, namely, the fact that it is not well suited for describing signals with impulsive behavior. The stable and Gaussian methods are used to model measured noise signals. It is demonstrated that the stable distribution, which has heavier tails than the Gaussian distribution, gives a much better approximation to real-world audio signals. The significance of these results is shown by considering the time delay estimation (TDE) problem for source localization in teleimmersion applications. In order to achieve robust sound source localization, a novel time delay estimation approach is proposed. It is based on fractional lower order statistics (FLOS), which mitigate the effects of heavy-tailed noise. An improvement in TDE performance is demonstrated using FLOS that is up to a factor of four better than what can be achieved with second-order statistics.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Hill, Randall W.
Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_continuous_1999,
title = {Continuous Planning and Collaboration for Command and Control in Joint Synthetic Battlespaces},
author = {Jonathan Gratch and Randall W. Hill},
url = {http://ict.usc.edu/pubs/Continuous%20Planning%20and%20Collaboration%20for%20Command%20and%20Control%20in%20Joint%20Synthetic%20Battlespaces.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {In this paper we describe our efforts to model command and control entities for Joint Synthetic Battlespaces. Command agents require a broader repertoire of capabilities than is typically modeled in simulation. They must develop mission plans involving multiple subordinate units, monitor execution, dynamically modify mission plans in response to situational contingencies, collaborate with other decision makers, and deal with a host of organizational issues. We describe our approach to command agent modeling that addresses a number of these issues through its continuous and collaborative approach to mission planning.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Gratch, Jonathan; Marsella, Stacy C.; Hill, Randall W.; III, LTC George Stone
Deriving Priority Intelligence Requirements for Synthetic Command Entities Proceedings Article
In: Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation, Orlando, FL, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Social Simulation, Virtual Humans
@inproceedings{gratch_deriving_1999,
title = {Deriving Priority Intelligence Requirements for Synthetic Command Entities},
author = {Jonathan Gratch and Stacy C. Marsella and Randall W. Hill and LTC George Stone III},
url = {http://ict.usc.edu/pubs/Deriving%20Priority%20Intelligence%20Requirements%20for%20Synthetic%20Command%20Entities.pdf},
year = {1999},
date = {1999-05-01},
booktitle = {Proceedings of the 8th Conference on Computer Generated Forces and Behavioral Representation},
address = {Orlando, FL},
abstract = {Simulation-based training is using increasingly complex synthetic forces. As more complex multiechelon synthetic forces are employed in simulations, the need for a realistic model of their command and control behavior becomes more urgent. In this paper we discuss one key component of such a model, the autonomous generation and use of priority intelligence requirements within multi-echelon plans.},
keywords = {DTIC, Social Simulation, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
Kyriakakis, Chris; Tsakalides, Panagiotis; Holman, Tomlinson
Surrounded by Sound: Acquisition and Rendering Methods for Immersive Audio Journal Article
In: Signal Processing Magazine, IEEE, vol. 16, no. 1, pp. 55–66, 1999, ISSN: 1053-5888.
Abstract | Links | BibTeX | Tags: DTIC
@article{kyriakakis_surrounded_1999,
title = {Surrounded by Sound: Acquisition and Rendering Methods for Immersive Audio},
author = {Chris Kyriakakis and Panagiotis Tsakalides and Tomlinson Holman},
url = {http://ict.usc.edu/pubs/Surrounded%20by%20Sound-%20Acquisition%20and%20Rendering%20Methods%20for%20Immersive%20Audio.pdf},
doi = {10.1109/79.743868},
issn = {1053-5888},
year = {1999},
date = {1999-01-01},
journal = {Signal Processing Magazine, IEEE},
volume = {16},
number = {1},
pages = {55–66},
abstract = {The authors discuss immersive audio systems and the signal processing issues that pertain to the acquisition and subsequent rendering of 3D sound fields over loudspeakers. On the acquisition side, recent advances in statistical methods for achieving acoustical arrays in audio applications are reviewed. Classical array signal processing addresses two major aspects of spatial filtering, namely localization of a signal of interest, and adaptation of the spatial response of an array of sensors to achieve steering in a given direction. The achieved spatial focusing in the direction of interest makes array signal processing a necessary component in immersive sound acquisition systems. On the rendering side, 3D audio signal processing methods are described that allow rendering of virtual sources around the listener using only two loudspeakers. Finally, the authors discuss the commercial implications of audio DSP.},
keywords = {DTIC},
pubstate = {published},
tppubtype = {article}
}
Traum, David; Andersen, Carl F.; Chong, Waiyian; Josyula, Darsana; Okamoto, Yoshi; Purang, Khemdut; O'Donovan-Anderson, Michael; Perlis, Don
Representations of Dialogue State for Domain and Task Independent Meta-Dialogue Journal Article
In: Electronic Transactions on Artificial Intelligence, vol. 3, pp. 125–152, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@article{traum_representations_1999,
title = {Representations of Dialogue State for Domain and Task Independent Meta-Dialogue},
author = {David Traum and Carl F. Andersen and Waiyian Chong and Darsana Josyula and Yoshi Okamoto and Khemdut Purang and Michael O'Donovan-Anderson and Don Perlis},
url = {http://ict.usc.edu/pubs/Representations%20of%20Dialogue%20State%20for%20Domain%20and%20Task%20Independent%20Meta-Dialogue.pdf},
year = {1999},
date = {1999-01-01},
journal = {Electronic Transactions on Artificial Intelligence},
volume = {3},
pages = {125–152},
abstract = {We propose a representation of local dialogue context motivated by the need to react appropriately to meta-dialogue, such as various sorts of corrections to the sequence of an instruction and response action. Such contexts includes at least the following aspects: the words and linguistic structures uttered, the domain correlates of those linguistic structures, and plans and actions in response. Each of these is needed as part of the context in order to be able to correctly interpret the range of possible corrections. Partitioning knowledge of dialogue structure in this way may lead to an ability to represent generic dialogue structure (e.g., in the form of axioms), which can be particularized to the domain, topic and content of the dialogue.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gratch, Jonathan
Why You Should Buy an Emotional Planner Proceedings Article
In: Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures, 1999.
Abstract | Links | BibTeX | Tags: DTIC, Virtual Humans
@inproceedings{gratch_why_1999,
title = {Why You Should Buy an Emotional Planner},
author = {Jonathan Gratch},
url = {http://ict.usc.edu/pubs/Why%20You%20Should%20Buy%20an%20Emotional%20Planner.pdf},
year = {1999},
date = {1999-01-01},
booktitle = {Proceedings of the Agents '99 Workshop on Emotion-Based Agent Architectures},
abstract = {Computation models of emotion have begun to address the problem of how agents arrive at a given emotional state, and how that state might alter their reactions to the environment. Existing work has focused on reactive models of behavior and does not, as of yet, provide much insight on how emotion might relate to the construction and execution of complex plans. This article focuses on this later question. I present a model of how agents ap- praise the emotion significance of events that illustrates a complementary relationship between classical planning methods and models of emotion processing. By building on classical planning methods, the model clarifies prior accounts of emotional appraisal and extends these ac- counts to handle the generation and execution of com- plex multi-agent plans.},
keywords = {DTIC, Virtual Humans},
pubstate = {published},
tppubtype = {inproceedings}
}
0369
Bosnak, Robert E.; Bosnak, David E.; Rizzo, Albert
Systems and methods for ai driven generation of content attuned to a user Patent
US20240005583A1, 0369.
Abstract | Links | BibTeX | Tags:
@patent{bosnak_systems_369,
title = {Systems and methods for ai driven generation of content attuned to a user},
author = {Robert E. Bosnak and David E. Bosnak and Albert Rizzo},
url = {https://patentimages.storage.googleapis.com/2a/a6/76/2607333241cd11/US20240005583A1.pdf},
year = {0369},
date = {0369-01-01},
number = {US20240005583A1},
abstract = {Systems and methods enabling rendering an avatar attuned to a user. The systems and methods include receiving audio-visual data of user communications of a user. Using the audio-visual data, the systems and methods may determine vocal characteristics of the user, facial action units representative of facial features of the user, and speech of the user based on a speech recognition model and/or natural language understanding model. Based on the vocal characteristics, an acoustic emotion metric can be determined. Based on the speech recognition data, a speech emotion metric may be determined. Based on the facial action units, a facial emotion metric may be determined. An emotional complex signature may be determined to represent an emotional state of the user for rendering the avatar attuned to the emotional state based on a combination of the acoustic emotion metric, the speech emotion metric and the facial emotion metric.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
0000
[No title] Journal Article
In: 0000.
BibTeX | Tags: Learning Sciences, LLM
@article{nokey,
title = {[No title]},
keywords = {Learning Sciences, LLM},
pubstate = {published},
tppubtype = {article}
}
Bosnak, Robert E.; Bosnak, David E.; Rizzo, Albert
Systems and methods for ai driven generation of content attuned to a user Patent
US20240005583A1, 0000.
Abstract | Links | BibTeX | Tags:
@patent{bosnak_systems_nodate-1,
title = {Systems and methods for ai driven generation of content attuned to a user},
author = {Robert E. Bosnak and David E. Bosnak and Albert Rizzo},
url = {https://patentimages.storage.googleapis.com/2a/a6/76/2607333241cd11/US20240005583A1.pdf},
number = {US20240005583A1},
abstract = {Systems and methods enabling rendering an avatar attuned to a user. The systems and methods include receiving audio-visual data of user communications of a user. Using the audio-visual data, the systems and methods may determine vocal characteristics of the user, facial action units representative of facial features of the user, and speech of the user based on a speech recognition model and/or natural language understanding model. Based on the vocal characteristics, an acoustic emotion metric can be determined. Based on the speech recognition data, a speech emotion metric may be determined. Based on the facial action units, a facial emotion metric may be determined. An emotional complex signature may be determined to represent an emotional state of the user for rendering the avatar attuned to the emotional state based on a combination of the acoustic emotion metric, the speech emotion metric and the facial emotion metric.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
Bosnak, David E.; Bosnak, Robert E.; Rizzo, Albert
US11798217B2, 0000.
Abstract | Links | BibTeX | Tags:
@patent{bosnak_systems_nodate,
title = {Systems and methods for automated real-time generation of an interactive avatar utilizing short-term and long-term computer memory structures},
author = {David E. Bosnak and Robert E. Bosnak and Albert Rizzo},
url = {https://patentimages.storage.googleapis.com/8f/a5/ad/3e30e0837c20ee/US11798217.pdf},
number = {US11798217B2},
abstract = {Systems and methods enabling rendering an avatar attuned to a user. The systems and methods include receiving audio-visual data of user communications of a user. Using the audio-visual data, the systems and methods may determine vocal characteristics of the user, facial action units representative of facial features of the user, and speech of the user based on a speech recognition model and/or natural language understanding model. Based on the vocal characteristics, an acoustic emotion metric can be determined. Based on the speech recognition data, a speech emotion metric may be determined. Based on the facial action units, a facial emotion metric may be determined. An emotional complex signature may be determined to represent an emotional state of the user for rendering the avatar attuned to the emotional state based on a combination of the acoustic emotion metric, the speech emotion metric and the facial emotion metric.},
keywords = {},
pubstate = {published},
tppubtype = {patent}
}
Gratch, Jonathan
Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing Journal Article
In: pp. 9, 0000.
BibTeX | Tags: DTIC, Emotions, Virtual Humans
@article{gratch_emotion_nodate,
title = {Emotion recognition ≠ Emotion Understanding: Challenges Confronting the Field of Affective Computing},
author = {Jonathan Gratch},
pages = {9},
keywords = {DTIC, Emotions, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Gervits, Felix; Leuski, Anton; Bonial, Claire; Gordon, Carla; Traum, David
A Classification-Based Approach to Automating Human-Robot Dialogue Journal Article
In: pp. 13, 0000.
Abstract | Links | BibTeX | Tags: ARL, Dialogue, UARC, Virtual Humans
@article{gervits_classication-based_nodate,
title = {A Classification-Based Approach to Automating Human-Robot Dialogue},
author = {Felix Gervits and Anton Leuski and Claire Bonial and Carla Gordon and David Traum},
url = {https://link.springer.com/chapter/10.1007/978-981-15-9323-9_10},
doi = {https://doi.org/10.1007/978-981-15-9323-9_10},
pages = {13},
abstract = {We present a dialogue system based on statistical classification which was used to automate human-robot dialogue in a collaborative navigation domain. The classifier was trained on a small corpus of multi-floor Wizard-of-Oz dialogue including two wizards: one standing in for dialogue capabilities and another for navigation. Below, we describe the implementation details of the classifier and show how it was used to automate the dialogue wizard. We evaluate our system on several sets of source data from the corpus and find that response accuracy is generally high, even with very limited training data. Another contribution of this work is the novel demonstration of a dialogue manager that uses the classifier to engage in multifloor dialogue with two different human roles. Overall, this approach is useful for enabling spoken dialogue systems to produce robust and accurate responses to natural language input, and for robots that need to interact with humans in a team setting.},
keywords = {ARL, Dialogue, UARC, Virtual Humans},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; McCullough, Kyle; Mozgai, Sharon; Ustun, Volkan; Gordon, Andrew S
Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment Journal Article
In: pp. 11, 0000.
@article{hartholt_introducing_nodate-1,
title = {Introducing RIDE: Lowering the Barrier of Entry to Simulation and Training through the Rapid Integration & Development Environment},
author = {Arno Hartholt and Kyle McCullough and Sharon Mozgai and Volkan Ustun and Andrew S Gordon},
pages = {11},
abstract = {This paper describes the design, development, and philosophy of the Rapid Integration & Development Environment (RIDE). RIDE is a simulation platform that unites many Department of Defense (DoD) and Army simulation efforts to provide an accelerated development foundation and prototyping sandbox that provides direct benefit to the U.S. Army’s Synthetic Training Environment (STE) as well as the larger DoD and Army simulation communities. RIDE integrates a range of capabilities, including One World Terrain, Non-Player Character AI behaviors, xAPI logging, multiplayer networking, scenario creation, destructibility, machine learning approaches, and multi-platform support. The goal of RIDE is to create a simple, drag-and-drop development environment usable by people across all technical levels. RIDE leverages robust game engine technology while designed to be agnostic to any specific game or simulation engine. It provides decision makers with the tools needed to better define requirements and identify potential solutions in much less time and at much reduced costs. RIDE is available through Government Purpose Rights. We aim for RIDE to lower the barrier of entry to research and development efforts within the simulation community in order to reduce required time and effort for simulation and training prototyping. This paper provides an overview of our objective, overall approach, and next steps, in pursuit of these goals.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hartholt, Arno; Mozgai, Sharon
From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality Journal Article
In: pp. 35, 0000.
Abstract | BibTeX | Tags: DTIC, MedVR, Virtual Humans, VR
@article{hartholt_combat_nodate,
title = {From Combat to COVID-19 – Managing the Impact of Trauma Using Virtual Reality},
author = {Arno Hartholt and Sharon Mozgai},
pages = {35},
abstract = {Research has documented the efficacy of clinical applications that leverage Virtual Reality (VR) for assessment and treatment purposes across a wide range of domains, including pain, phobias, and posttraumatic stress disorder (PTSD). As the field of Clinical VR matures, it is important to review its origins and examine how these initial explorations have progressed, what gaps remain, and what opportunities the community can pursue. We do this by reflecting on our personal scientific journey against the backdrop of the field in general. In particular, this paper discusses how a clinical research program that was initially designed to deliver trauma-focused VR exposure therapy (VRET) for combat-related PTSD has been evolved to expand its impact and address a wider range of trauma sources. Such trauma sources include sexual trauma and the needs of first responders and healthcare professionals serving on the frontlines of the COVID-19 pandemic. We provide an overview of the field and its general trends, discuss the genesis of our research agenda and its current status, and summarize upcoming opportunities, together with common challenges and lessons learned.},
keywords = {DTIC, MedVR, Virtual Humans, VR},
pubstate = {published},
tppubtype = {article}
}
The Interservice Industry, Training, Simulation, and Education Conference Miscellaneous
0000.
@misc{noauthor_interservice_nodate,
title = {The Interservice Industry, Training, Simulation, and Education Conference},
url = {https://www.xcdsystem.com/iitsec/proceedings/index.cfm?Year=2021&AbID=97189&CID=862},
urldate = {2022-09-22},
keywords = {DTIC},
pubstate = {published},
tppubtype = {misc}
}
APA PsycNet Miscellaneous
0000.
@misc{noauthor_apa_nodate,
title = {APA PsycNet},
url = {https://psycnet.apa.org/fulltext/2022-19957-001.html},
urldate = {2022-09-13},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Chen, Haiwei; Zhao, Yajie
Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting Proceedings Article
In: pp. 7591–7600, 0000.
Abstract | Links | BibTeX | Tags: DTIC, Graphics, VGL
@inproceedings{chen_dont_nodate,
title = {Don't Look into the Dark: Latent Codes for Pluralistic Image Inpainting},
author = {Haiwei Chen and Yajie Zhao},
url = {https://openaccess.thecvf.com/content/CVPR2024/html/Chen_Dont_Look_into_the_Dark_Latent_Codes_for_Pluralistic_Image_CVPR_2024_paper.html},
pages = {7591–7600},
abstract = {We present a method for large-mask pluralistic image inpainting based on the generative framework of discrete latent codes. Our method learns latent priors discretized as tokens by only performing computations at the visible locations of the image. This is realized by a restrictive partial encoder that predicts the token label for each visible block a bidirectional transformer that infers the missing labels by only looking at these tokens and a dedicated synthesis network that couples the tokens with the partial image priors to generate coherent and pluralistic complete image even under extreme mask settings. Experiments on public benchmarks validate our design choices as the proposed method outperforms strong baselines in both visual quality and diversity metrics.},
keywords = {DTIC, Graphics, VGL},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Chen, Elizabeth
Augmenting Training Data for a Virtual Character Using GPT-3.5 Proceedings Article
In: Tyhe Florida Artificial Intelligence Research Society, 0000.
Abstract | Links | BibTeX | Tags: Dialogue, DTIC, Natural Language
@inproceedings{artstein_augmenting_nodate,
title = {Augmenting Training Data for a Virtual Character Using GPT-3.5},
author = {Ron Artstein and Elizabeth Chen},
url = {https://journals.flvc.org/FLAIRS/article/view/135552},
volume = {37},
publisher = {Tyhe Florida Artificial Intelligence Research Society},
abstract = {This paper compares different methods of using a large lan-guage model (GPT-3.5) for creating synthetic training datafor a retrieval-based conversational character. The trainingdata are in the form of linked questions and answers, whichallow a classifier to retrieve a pre-recorded answer to an un-seen question; the intuition is that a large language modelcould predict what human users might ask, thus saving theeffort of collecting real user questions as training data. Re-sults show small improvements in test performance for allsynthetic datasets. However, a classifier trained on only smallamounts of collected user data resulted in a higher F-scorethan the classifiers trained on much larger amounts of syn-thetic data generated using GPT-3.5. Based on these results,we see a potential in using large language models for gener-ating training data, but at this point it is not as valuable ascollecting actual user data for training.},
keywords = {Dialogue, DTIC, Natural Language},
pubstate = {published},
tppubtype = {inproceedings}
}
Mozgai, Sharon; Rizzo, Albert A; Hartholt, Arno
Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application Proceedings Article
In: 0000.
Abstract | BibTeX | Tags: Virtual Humans, VR
@inproceedings{mozgai_persuasive_nodate,
title = {Persuasive Technology for Suicide Prevention: A Virtual Human mHealth Application},
author = {Sharon Mozgai and Albert A Rizzo and Arno Hartholt},
abstract = {We are demoing Battle Buddy, an mHealth application designed to support access to physical and mental wellness content as well as safety planning for U.S. military veterans. This virtual human interface will collect multimodal data through passive sensors native to popular wearables (e.g., Apple Watch) and deliver adaptive multimedia content specifically tailored to the user in the interdependent domains of physical, cognitive, and emotional health. Battle Buddy can deliver health interventions matched to the individual user via novel adaptive logic-based algorithms while employing various behavior change techniques (e.g., goal-setting, barrier identification, rewards, modeling, etc.). All interactions were specifically designed to engage and motivate by employing the persuasive strategies of (1) personalization, (2) self-monitoring, (3) tunneling, (4) suggestion, and (5) expertise.},
keywords = {Virtual Humans, VR},
pubstate = {published},
tppubtype = {inproceedings}
}