Publications
Search
Stocco, Andrea; Laird, John; Lebiere, Christian; Rosenbloom, Paul
Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind Proceedings Article
In: Proceedings of the 40th Annual Meeting of the Cognitive Science Society, Cognitive Science Society, Madison, WI, 2018.
@inproceedings{stocco_empirical_2018,
title = {Empirical Evidence from Neuroimaging Data for a Standard Model of the Mind},
author = {Andrea Stocco and John Laird and Christian Lebiere and Paul Rosenbloom},
url = {https://www.researchgate.net/publication/325106544_Empirical_Evidence_from_Neuroimaging_Data_for_a_Standard_Model_of_the_Mind},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 40th Annual Meeting of the Cognitive Science Society},
publisher = {Cognitive Science Society},
address = {Madison, WI},
abstract = {In a recent paper, Laird, Lebiere, and Rosenbloom (2017) highlight how 40 years of research on cognitive architectures has begun to yield a dramatic convergence of different approaches towards a set of basic assumptions that they called the “Standard Model of the Mind” (SMM), in analogy to the Standard Model of particle physics. The SMM was designed to capture a consensus view of “human-like minds”, whether from AI or cognitive science, which if valid must also be true of the human brain. Here, we provide a preliminary test of this hypothesis based on a re-analysis of fMRI data from four tasks that span a wide range of cognitive functions and cognitive complexity, and are representative of the specific form of intelligence and flexibility that is associated with higherlevel human cognition. Using an established method (Dynamic Causal Modeling) to examine functional connectivity between brain regions, the SMM was compared against two alternative models that violate either functional or structural assumptions of the SMM. The results show that, in every dataset, the SMM significantly outperforms the other models, suggesting that the SMM best captures the functional requirements of brain dynamics in fMRI data among these alternatives.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Georgila, Kallirroi; Gordon, Carla; Choi, Hyungtak; Boberg, Jill; Jeon, Heesik; Traum, David
Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues Proceedings Article
In: Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS), IWSDS, Singapore, 2018.
@inproceedings{georgila_toward_2018,
title = {Toward Low-Cost Automated Evaluation Metrics for Internet of Things Dialogues},
author = {Kallirroi Georgila and Carla Gordon and Hyungtak Choi and Jill Boberg and Heesik Jeon and David Traum},
url = {http://www.colips.org/conferences/iwsds2018/wp/wp-content/uploads/2018/03/IWSDS-2018_paper_18.pdf},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 9th International Workshop on Spoken Dialogue Systems Technology (IWSDS)},
publisher = {IWSDS},
address = {Singapore},
abstract = {We analyze a corpus of system-user dialogues in the Internet of Things domain. Our corpus is automatically, semi-automatically, and manually annotated with a variety of features both on the utterance level and the full dialogue level. The corpus also includes human ratings of dialogue quality collected via crowdsourcing. We calculate correlations between features and human ratings to identify which features are highly associated with human perceptions about dialogue quality in this domain. We also perform linear regression and derive a variety of dialogue quality evaluation functions. These evaluation functions are then applied to a heldout portion of our corpus, and are shown to be highly predictive of human ratings and outperform standard reward-based evaluation functions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Xiao, Gang; Georgila, Kallirroi
A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue Proceedings Article
In: Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31), AAAI, Melbourne, FL, 2018.
@inproceedings{xiao_comparison_2018,
title = {A Comparison of Reinforcement Learning Methodologies in Two-Party and Three-Party Negotiation Dialogue},
author = {Gang Xiao and Kallirroi Georgila},
url = {https://aaai.org/ocs/index.php/FLAIRS/FLAIRS18/paper/view/17687},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the The Thirty-First International Florida Artificial Intelligence Research Society Conference (FLAIRS-31)},
publisher = {AAAI},
address = {Melbourne, FL},
abstract = {We use reinforcement learning to learn dialogue policies in a collaborative furniture layout negotiation task. We employ a variety of methodologies (i.e., learning against a simulated user versus co-learning) and algorithms. Our policies achieve the best solution or a good solution to this problem for a variety of settings and initial conditions, including in the presence of noise (e.g., due to speech recognition or natural language understanding errors). Also, our policies perform well even in situations not observed during training. Policies trained against a simulated user perform well while interacting with policies trained through co-learning, and vice versa. Furthermore, policies trained in a two-party setting are successfully applied to a three-party setting, and vice versa.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen
Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing Proceedings Article
In: Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018, pp. 17–22, ACM Press, Beijing, China, 2018, ISBN: 978-1-4503-6376-1.
@inproceedings{kang_socio-cultural_2018,
title = {Socio-Cultural Effects of Virtual Counseling Interviewers as Mediated by Smartphone Video Conferencing},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang},
url = {http://dl.acm.org/citation.cfm?doid=3205326.3205348},
doi = {10.1145/3205326.3205348},
isbn = {978-1-4503-6376-1},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 31st International Conference on Computer Animation and Social Agents - CASA 2018},
pages = {17–22},
publisher = {ACM Press},
address = {Beijing, China},
abstract = {We explored how users perceive virtual characters that performed the role of a counseling interviewer, while presenting different levels of social class, as well as single or multi-tasking behavior. To investigate this subject, we designed a 2x2 experiment (tasking type and social class of the virtual counseling interviewer). In the experiment, participants experienced the counseling interview interactions over video conferencing on a smartphone. We measured user responses to and perceptions of the virtual human interviewer. The results demonstrate that the tasking types and social class of the virtual counselor affected user responses to and perceptions of the virtual counselor. The results offer insight into the design and development of effective, realistic, and believable virtual human counselors. Furthermore, the results also address current social questions about how smartphones might mediate social interactions, including human-agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Artstein, Ron; Boberg, Jill; Gainer, Alesia; Gratch, Jonathan; Johnson, Emmanuel; Leuski, Anton; Lucas, Gale; Traum, David
The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents Proceedings Article
In: Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), European Language Resources Association (ELRA), Miyazaki, Japan, 2018, ISBN: 979-10-95546-00-9.
@inproceedings{artstein_niki_2018,
title = {The Niki and Julie Corpus: Collaborative Multimodal Dialogues between Humans, Robots, and Virtual Agents},
author = {Ron Artstein and Jill Boberg and Alesia Gainer and Jonathan Gratch and Emmanuel Johnson and Anton Leuski and Gale Lucas and David Traum},
url = {http://www.lrec-conf.org/proceedings/lrec2018/pdf/482.pdf},
isbn = {979-10-95546-00-9},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)},
publisher = {European Language Resources Association (ELRA)},
address = {Miyazaki, Japan},
abstract = {The Niki and Julie corpus contains more than 600 dialogues between human participants and a human-controlled robot or virtual agent, engaged in a series of collaborative item-ranking tasks designed to measure influence. Some of the dialogues contain deliberate conversational errors by the robot, designed to simulate the kinds of conversational breakdown that are typical of present-day automated agents. Data collected include audio and video recordings, the results of the ranking tasks, and questionnaire responses; some of the recordings have been transcribed and annotated for verbal and nonverbal feedback. The corpus has been used to study influence and grounding in dialogue. All the dialogues are in American English.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Vinkemeier, Doratha; Valstar, Michel; Gratch, Jonathan
Predicting Folds in Poker Using Action Unit Detectors and Decision Trees Proceedings Article
In: Proceedings of the 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 504–511, IEEE, Xi'an, China, 2018, ISBN: 978-1-5386-2335-0.
@inproceedings{vinkemeier_predicting_2018,
title = {Predicting Folds in Poker Using Action Unit Detectors and Decision Trees},
author = {Doratha Vinkemeier and Michel Valstar and Jonathan Gratch},
url = {https://ieeexplore.ieee.org/document/8373874/},
doi = {10.1109/FG.2018.00081},
isbn = {978-1-5386-2335-0},
year = {2018},
date = {2018-05-01},
booktitle = {Proceedings of the 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018)},
pages = {504–511},
publisher = {IEEE},
address = {Xi'an, China},
abstract = {Predicting how a person will respond can be very useful, for instance when designing a strategy for negotiations. We investigate whether it is possible for machine learning and computer vision techniques to recognize a person's intentions and predict their actions based on their visually expressive behaviour, where in this paper we focus on the face. We have chosen as our setting pairs of humans playing a simplified version of poker, where the players are behaving naturally and spontaneously, albeit mediated through a computer connection. In particular, we ask if we can automatically predict whether a player is going to fold or not. We also try to answer the question of at what time point the signal for predicting if a player will fold is strongest. We use state-of-the-art FACS Action Unit detectors to automatically annotate the players facial expressions, which have been recorded on video. In addition, we use timestamps of when the player received their card and when they placed their bets, as well as the amounts they bet. Thus, the system is fully automated. We are able to predict whether a person will fold or not significantly better than chance based solely on their expressive behaviour starting three seconds before they fold.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wauck, Helen; Lucas, Gale; Shapiro, Ari; Feng, Andrew; Boberg, Jill; Gratch, Jonathan
Analyzing the Effect of Avatar Self-Similarity on Men and Women in a Search and Rescue Game Proceedings Article
In: Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems, pp. 1–12, ACM Press, Montreal, Canada, 2018, ISBN: 978-1-4503-5620-6.
@inproceedings{wauck_analyzing_2018,
title = {Analyzing the Effect of Avatar Self-Similarity on Men and Women in a Search and Rescue Game},
author = {Helen Wauck and Gale Lucas and Ari Shapiro and Andrew Feng and Jill Boberg and Jonathan Gratch},
url = {http://dl.acm.org/citation.cfm?doid=3173574.3174059},
doi = {10.1145/3173574.3174059},
isbn = {978-1-4503-5620-6},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems},
pages = {1–12},
publisher = {ACM Press},
address = {Montreal, Canada},
abstract = {A crucial aspect of virtual gaming experiences is the avatar: the player's virtual self-representation. While research has demonstrated benefits to using self-similar avatars in some virtual experiences, such avatars sometimes produce a more negative experience for women. To help researchers and game designers assess the cost-benefit tradeoffs of self-similar avatars, we compared players' performance and subjective experience in a search and rescue computer game when using two different photorealistic avatars: their own self or a friend, and when playing either a social (rescuing people) or a nonsocial (rescuing gems) version of the game. There was no effect of avatar appearance on players' performance or subjective experience in either game version, but we also found that women's experience with self-similar avatars was no more negative than men's. Our results suggest that avatar appearance may not make a difference to players in certain game contexts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pincus, Eli; Lei, Su; Lucas, Gale; Johnson, Emmanuel; Tsang, Michael; Gratch, Jonathan; Traum, David
The Importance of Regulatory Fit & Early Success in a Human-Machine Game Proceedings Article
In: Proceedings of the first APA ACM Technology, Mind and Society Conference, pp. 1–6, ACM Press, Washington D.C., 2018, ISBN: 978-1-4503-5420-2.
@inproceedings{pincus_importance_2018,
title = {The Importance of Regulatory Fit & Early Success in a Human-Machine Game},
author = {Eli Pincus and Su Lei and Gale Lucas and Emmanuel Johnson and Michael Tsang and Jonathan Gratch and David Traum},
url = {http://dl.acm.org/citation.cfm?doid=3183654.3183661},
doi = {10.1145/3183654.3183661},
isbn = {978-1-4503-5420-2},
year = {2018},
date = {2018-04-01},
booktitle = {Proceedings of the first APA ACM Technology, Mind and Society Conference},
pages = {1–6},
publisher = {ACM Press},
address = {Washington D.C.},
abstract = {In this paper, we explore the potential of regulatory focus theory as a framework for personalizing human-machine interactions. We manipulate framing (gain or loss) of a collaborative word-guessing game where a fully-automated virtual human gives clues. Consistent with previous work on regulatory focus, we find evidence of significantly higher perceived task-success when participants have regulatory fit. Inconsistent with previous work, however, fit did not increase task-enjoyment (nor performance). Participants with gain framing had marginally higher enjoyment, regardless of their regulatory focus. We operationalize motivation by number of optional rounds played but failed to find a "fit" effect. Instead, players who achieved early success (scoring more points in initial rounds) were more motivated. Early success was significantly correlated with number of optional rounds played. This finding calls to attention the need for the literature to more thoroughly investigate the relationship between success-timing and total player playtime in the game.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Rovira, Ericka; Barnes, Michael J.; Hill, Susan G.
In: Persuasive Technology, vol. 10809, pp. 56–69, Springer International Publishing, Cham, Switzerland, 2018, ISBN: 978-3-319-78977-4 978-3-319-78978-1.
@incollection{wang_is_2018,
title = {Is It My Looks? Or Something I Said? The Impact of Explanations, Embodiment, and Expectations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Ericka Rovira and Michael J. Barnes and Susan G. Hill},
url = {http://link.springer.com/10.1007/978-3-319-78978-1_5},
doi = {10.1007/978-3-319-78978-1_5},
isbn = {978-3-319-78977-4 978-3-319-78978-1},
year = {2018},
date = {2018-04-01},
booktitle = {Persuasive Technology},
volume = {10809},
pages = {56–69},
publisher = {Springer International Publishing},
address = {Cham, Switzerland},
abstract = {Trust is critical to the success of human-robot interaction. Research has shown that people will more accurately trust a robot if they have an accurate understanding of its decision-making process. The Partially Observable Markov Decision Process (POMDP) is one such decision-making process, but its quantitative reasoning is typically opaque to people. This lack of transparency is exacerbated when a robot can learn, making its decision making better, but also less predictable. Recent research has shown promise in calibrating human-robot trust by automatically generating explanations of POMDP-based decisions. In this work, we explore factors that can potentially interact with such explanations in influencing human decision-making in human-robot teams. We focus on explanations with quantitative expressions of uncertainty and experiment with common design factors of a robot: its embodiment and its communication strategy in case of an error. Results help us identify valuable properties and dynamics of the human-robot trust relationship.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Trout, Theron; Russell, Stephen M.; Harrison, Andre V.; Spicer, Ryan; Dennison, Mark S.; Thomas, Jerald; Rosenberg, Evan Suma
Collaborative mixed reality (MxR) and networked decision making Proceedings Article
In: Next-Generation Analyst VI, pp. 21, SPIE, Orlando, Florida, 2018, ISBN: 978-1-5106-1817-6 978-1-5106-1818-3.
@inproceedings{trout_collaborative_2018,
title = {Collaborative mixed reality (MxR) and networked decision making},
author = {Theron Trout and Stephen M. Russell and Andre V. Harrison and Ryan Spicer and Mark S. Dennison and Jerald Thomas and Evan Suma Rosenberg},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10653/2309959/Collaborative-mixed-reality-MxR-and-networked-decision-making/10.1117/12.2309959.full},
doi = {10.1117/12.2309959},
isbn = {978-1-5106-1817-6 978-1-5106-1818-3},
year = {2018},
date = {2018-04-01},
booktitle = {Next-Generation Analyst VI},
pages = {21},
publisher = {SPIE},
address = {Orlando, Florida},
abstract = {Collaborative decision-making remains a significant research challenge that is made even more complicated in real-time or tactical problem-contexts. Advances in technology have dramatically assisted the ability for computers and networks to improve the decision-making process (i.e. intelligence, design, and choice). In the intelligence phase of decision making, mixed reality (MxR) has shown a great deal of promise through implementations of simulation and training. However little research has focused on an implementation of MxR to support the entire scope of the decision cycle, let alone collaboratively and in a tactical context. This paper presents a description of the design and initial implementation for the Defense Integrated Collaborative Environment (DICE), an experimental framework for supporting theoretical and empirical research on MxR for tactical decision-making support.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lucas, Gale M.; Boberg, Jill; Traum, David; Artstein, Ron; Gratch, Jonathan; Gainer, Alesia; Johnson, Emmanuel; Leuski, Anton; Nakano, Mikio
Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, pp. 344–351, ACM Press, Chicago, IL, 2018, ISBN: 978-1-4503-4953-6.
@inproceedings{lucas_getting_2018,
title = {Getting to Know Each Other: The Role of Social Dialogue in Recovery from Errors in Social Robots},
author = {Gale M. Lucas and Jill Boberg and David Traum and Ron Artstein and Jonathan Gratch and Alesia Gainer and Emmanuel Johnson and Anton Leuski and Mikio Nakano},
url = {http://dl.acm.org/citation.cfm?doid=3171221.3171258},
doi = {10.1145/3171221.3171258},
isbn = {978-1-4503-4953-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
pages = {344–351},
publisher = {ACM Press},
address = {Chicago, IL},
abstract = {This work explores the extent to which social dialogue can mitigate (or exacerbate) the loss of trust caused when robots make conversational errors. Our study uses a NAO robot programmed to persuade users to agree with its rankings on two tasks. We perform two manipulations: (1) The timing of conversational errors - the robot exhibited errors either in the first task, the second task, or neither; (2) The presence of social dialogue - between the two tasks, users either engaged in a social dialogue with the robot or completed a control task. We found that the timing of the errors matters: replicating previous research, conversational errors reduce the robot's influence in the second task, but not on the first task. Social dialogue interacts with the timing of errors, acting as an intensifier: social dialogue helps the robot recover from prior errors, and actually boosts subsequent influence; but social dialogue backfires if it is followed by errors, because it extends the period of good performance, creating a stronger contrast effect with the subsequent errors. The design of social robots should therefore be more careful to avoid errors after periods of good performance than early on in a dialogue.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Barnes, Michael J.; Hill, Susan G.
Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate Proceedings Article
In: Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction, ACM, Chicago, IL, 2018.
@inproceedings{wang_comparing_2018,
title = {Comparing Two Automatically Generated Explanations on the Perception of a Robot Teammate},
author = {Ning Wang and David V. Pynadath and Michael J. Barnes and Susan G. Hill},
url = {http://people.ict.usc.edu/ nwang/PDF/HRI-ERS-2018-Wang.pdf},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 ACM/IEEE International Conference on Human-Robot Interaction},
publisher = {ACM},
address = {Chicago, IL},
abstract = {Trust is critical to the success of human-robot interaction (HRI). Research has shown that people will more accurately trust a robot if they have a more accurate understanding of its decisionmaking process. Recent research has shown promise in calibrating human-agent trust by automatically generating explanations of decision-making process such as POMDP-based ones. In this paper, we compare two automatically generated explanations, one with quantitative information on uncertainty and one based on sensor observations, and study the impact of such explanations on perception of a robot in human-robot team.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Gonzalez, Diego; Gordon, Andrew S.
Comparing Speech and Text Input in Interactive Narratives Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 141–145, ACM Press, Tokyo, Japan, 2018, ISBN: 978-1-4503-4945-1.
@inproceedings{gonzalez_comparing_2018,
title = {Comparing Speech and Text Input in Interactive Narratives},
author = {Diego Gonzalez and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3172944.3172999},
doi = {10.1145/3172944.3172999},
isbn = {978-1-4503-4945-1},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {141–145},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Intelligent user interfaces are finding new applications in interactive narratives, where players take on the role of a character in a fictional storyline. A recent example is the interactive audio narrative "Traveler", in which a combination of technologies for speech recognition and unsupervised text classification allow players to navigate a branching storyline via open-vocabulary spoken input. We hypothesize that the affordances of audio-based interaction in interactive narratives are different than text-based interaction, and that these differences change the player experience and their understanding of their fictional role. To test this hypothesis, we conducted a controlled experiment (n=39) to compare player interaction in "Traveler" with a text-only variant of the same storyline. We found significant differences in the types of input provided by players, suggesting that interaction modality impacts how players conceive of their relation to narrators of fictional storylines.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Lucas, Gale
Virtual Human Role Players for Studying Social Factors in Organizational Decision Making Journal Article
In: Frontiers in Psychology, vol. 9, 2018, ISSN: 1664-1078.
@article{khooshabeh_virtual_2018,
title = {Virtual Human Role Players for Studying Social Factors in Organizational Decision Making},
author = {Peter Khooshabeh and Gale Lucas},
url = {http://journal.frontiersin.org/article/10.3389/fpsyg.2018.00194/full},
doi = {10.3389/fpsyg.2018.00194},
issn = {1664-1078},
year = {2018},
date = {2018-03-01},
journal = {Frontiers in Psychology},
volume = {9},
abstract = {The cyber domain of military operations presents many challenges. A unique element is the social dynamic between cyber operators and their leadership because of the novel subject matter expertise involved in conducting technical cyber tasks, so there will be situations where senior leaders might have much less domain knowledge or no experience at all relative to the warfighters who report to them. Nonetheless, it will be important for junior cyber operators to convey convincing information relevant to a mission in order to persuade or influence a leader to make informed decisions. The power dynamic will make it difficult for the junior cyber operator to successfully influence a higher ranking leader. Here we present a perspective with a sketch for research paradigm(s) to study how different factors (normative vs. informational social influence, degree of transparency, and perceived appropriateness of making suggestions) might interact with differential social power dynamics of individuals in cyber decision-making contexts. Finally, we contextualize this theoretical perspective for the research paradigms in viable training technologies.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Roemmele, Melissa; Gordon, Andrew S.
Automated Assistance for Creative Writing with an RNN Language Model Proceedings Article
In: Proceedings of ACM Intelligent User Interfaces, pp. 1–2, ACM Press, Tokyo, Japan, 2018, ISBN: 978-1-4503-5571-1.
@inproceedings{roemmele_automated_2018,
title = {Automated Assistance for Creative Writing with an RNN Language Model},
author = {Melissa Roemmele and Andrew S. Gordon},
url = {http://dl.acm.org/citation.cfm?doid=3180308.3180329},
doi = {10.1145/3180308.3180329},
isbn = {978-1-4503-5571-1},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of ACM Intelligent User Interfaces},
pages = {1–2},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {This work demonstrates an interface, Creative Help, that assists people with creative writing by automatically suggesting new sentences in a story. Authors can freely edit the generated suggestions, and the application tracks their modifications. We make use of a Recurrent Neural Network language model to generate suggestions in a simple probabilistic way. Motivated by the theorized role of unpredictability in creativity, we vary the degree of randomness in the probability distribution used to generate the sentences, and find that authors’ interactions with the suggestions are influenced by this randomness.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Phan, Thai; Ayanian, Nora; Honig, Wolfgang
Mixed Reality Collaboration Between Human-Agent Teams Proceedings Article
In: Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE, Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
@inproceedings{phan_mixed_2018,
title = {Mixed Reality Collaboration Between Human-Agent Teams},
author = {Thai Phan and Nora Ayanian and Wolfgang Honig},
url = {https://ieeexplore.ieee.org/document/8446542/#full-text-section},
doi = {10.1109/VR.2018.8446542},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the IEEE VR 2018, the 25th IEEE Conference on Virtual Reality and 3D User Interfaces},
publisher = {IEEE},
address = {Reutlingen, Germany},
abstract = {Collaboration between two or more geographically dispersed teams has applications in research and training. In many cases specialized devices, such as robots, may need to be combined between the collaborating groups. However, it would be expensive or even impossible to collocate them at a single physical location. We describe the design of a mixed reality test bed which allows dispersed humans and physically embodied agents to collaborate within a single virtual environment. We demonstrate our approach using Unity’s networking architecture as well as open source robot software and hardware. In our scenario, a total of 3 humans and 6 drones must move through a narrow doorway while avoiding collisions in the physical spaces as well as virtual space.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krum, David M; Kang, Sin-Hwa; Phan, Thai
Influences on the Elicitation of Interpersonal Space with Virtual Humans Proceedings Article
In: Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), IEEE, Tuebingen/Reutlingen, Germany, 2018, ISBN: 978-1-5386-3365-6.
@inproceedings{krum_influences_2018,
title = {Influences on the Elicitation of Interpersonal Space with Virtual Humans},
author = {David M Krum and Sin-Hwa Kang and Thai Phan},
url = {https://ieeexplore.ieee.org/document/8446235/#full-text-section},
doi = {10.1109/VR.2018.8446235},
isbn = {978-1-5386-3365-6},
year = {2018},
date = {2018-03-01},
booktitle = {Proceedings of the 2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
publisher = {IEEE},
address = {Tuebingen/Reutlingen, Germany},
abstract = {The emergence of low cost virtual and augmented reality systems has encouraged the development of immersive training applications for medical, military, and many other fields. Many of the training scenarios for these various fields may require the presentation of realistic interactions with virtual humans. It is thus vital to determine the critical factors of fidelity required in those interactions to elicit naturalistic behavior on the part of trainees. Negative training may occur if trainees are inadvertently influenced to react in ways that are unexpected and unnatural, hindering proper learning and transfer of skills and knowledge back into real world contexts. In this research, we examined whether haptic priming (presenting an illusion of virtual human touch at the beginning of the virtual experience) and different locomotion techniques (either joystick or physical walking) might affect proxemic behavior in human users. The results of our study suggest that locomotion techniques can alter proxemic behavior in significant ways. Haptic priming did not appear to impact proxemic behavior, but did increase rapport and other subjective social measures. The results suggest that designers and developers of immersive training systems should carefully consider the impact of even simple design and fidelity choices on trainee reactions in social interactions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Krämer, Nicole C.; Lucas, Gale; Schmitt, Lea; Gratch, Jonathan
In: International Journal of Human-Computer Studies, vol. 109, pp. 112–121, 2018, ISSN: 10715819.
@article{kramer_social_2018,
title = {Social snacking with a virtual agent – On the interrelation of need to belong and effects of social responsiveness when interacting with artificial entities},
author = {Nicole C. Krämer and Gale Lucas and Lea Schmitt and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581917301271},
doi = {10.1016/j.ijhcs.2017.09.001},
issn = {10715819},
year = {2018},
date = {2018-01-01},
journal = {International Journal of Human-Computer Studies},
volume = {109},
pages = {112--121},
abstract = {Based on considerations that people´s need to belong can be temporarily satisfied by “social snacking” (Gardner et al., 2005) in the sense that in absence of social interactions which adequately satisfy belongingness needs surrogates can bridge lonely times, it was tested whether the interaction with a virtual agent can serve to ease the need for social contact. In a between subjects experimental setting, 79 participants interacted with a virtual agent who either displayed socially responsive nonverbal behavior or not. Results demonstrate that although there was no main effect of socially responsive behavior on participants´ subjective experience of rapport and on connectedness with the agent, those people with a high need to belong reported less willingness to engage in social activities after the interaction with a virtual agent – but only if the agent displayed socially responsive behavior.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Nilsson, Niels; Peck, Tabitha; Bruder, Gerd; Hodgson, Eric; Serafin, Stefania; Suma, Evan; Whitton, Mary; Steinicke, Frank
15 Years of Research on Redirected Walking in Immersive Virtual Environments Journal Article
In: IEEE Computer Graphics and Applications, 2018, ISSN: 0272-1716.
@article{nilsson_15_2018,
title = {15 Years of Research on Redirected Walking in Immersive Virtual Environments},
author = {Niels Nilsson and Tabitha Peck and Gerd Bruder and Eric Hodgson and Stefania Serafin and Evan Suma and Mary Whitton and Frank Steinicke},
url = {http://ieeexplore.ieee.org/document/8255772/},
doi = {10.1109/MCG.2018.111125628},
issn = {0272-1716},
year = {2018},
date = {2018-01-01},
journal = {IEEE Computer Graphics and Applications},
abstract = {Virtual reality users wearing head-mounted displays can experience the illusion of walking in any direction for infinite distance while, in reality, they are walking a curvilinear path in physical space. This is accomplished by introducing unnoticeable rotations to the virtual environment—a technique called redirected walking. This paper gives an overview of the research that has been performed since redirected walking was first practically demonstrated 15 years ago.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rizzo, Albert ‘Skip’; Shilling, Russell
Clinical Virtual Reality tools to advance the prevention, assessment, and treatment of PTSD Journal Article
In: European Journal of Psychotraumatology, vol. 8, no. sup5, 2018, ISSN: 2000-8198, 2000-8066.
@article{rizzo_clinical_2018,
title = {Clinical Virtual Reality tools to advance the prevention, assessment, and treatment of PTSD},
author = {Albert ‘Skip’ Rizzo and Russell Shilling},
url = {https://www.tandfonline.com/doi/full/10.1080/20008198.2017.1414560},
doi = {10.1080/20008198.2017.1414560},
issn = {2000-8198, 2000-8066},
year = {2018},
date = {2018-01-01},
journal = {European Journal of Psychotraumatology},
volume = {8},
number = {sup5},
abstract = {Numerous reports indicate that the incidence of posttraumatic stress disorder (PTSD) in Operation Enduring Freedom/Operation Iraqi Freedom/Operation New Dawn (OEF/OIF/ OND) military personnel has created a significant behavioural healthcare challenge. These findings have served to motivate research on how to better develop and disseminate evidence-based treatments for PTSD. The current article presents the use of Virtual Reality (VR) as a clinical tool to address the assessment, prevention, and treatment of PTSD, based on the VR projects that were evolved at the University of Southern California Institute for Creative Technologies since 2004. A brief discussion of the definition and rationale for the clinical use of VR is followed by a description of a VR application designed for the delivery of prolonged exposure (PE) for treating Service Members (SMs) and Veterans with combat- and sexual assault-related PTSD. The expansion of the virtual treatment simulations of Iraq and Afghanistan for PTSD assessment and prevention is then presented. This is followed by a forward-looking discussion that details early efforts to develop virtual human agent systems that serve the role of virtual patients for training the next generation of clinical providers, as healthcare guides that can be used to support anonymous access to trauma-relevant behavioural healthcare information, and as clinical interviewers capable of automated behaviour analysis of users to infer psychological state. The paper will conclude with a discussion of VR as a tool for breaking down barriers to care in addition to its direct application in assessment and intervention.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.