Publications
Search
Neubauer, Catherine; Scherer, Scherer
The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment Proceedings Article
In: Proceedings of the Preconference on Affective Computing at the Society for Affective Science, Boston, MA, 2017.
@inproceedings{neubauer_effects_2017,
title = {The Effects of Pre-task Team Collaboration on Facial Expression and Speech Entrainment},
author = {Catherine Neubauer and Scherer Scherer},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20Pre-task%20Team%20Collaboration%20on%20Facial%20Expression%20and%20Speech%20Entrainment.pdf},
year = {2017},
date = {2017-04-01},
booktitle = {Proceedings of the Preconference on Affective Computing at the Society for Affective Science},
address = {Boston, MA},
abstract = {Many everyday tasks are complex and require the coordination of one or more individuals. Such tasks can be relatively simple like passing a ball to a friend during a game of catch, while others are more complex such as performing a life-saving surgery where surgeons, anesthesiologists and nurses all work together in a multi-person team [1]. Such coordination requires the appropriate allocation of cognitive and behavioral effort to meet the changing demands of their environment and cannot be completed alone [1]. These mutually cooperative behaviors can include team communication, body position and even affective cues [2]. Some behaviors are explicitly controlled to be coordinated [3] (e.g., when an individual purposely attempts to follow the behaviors of their teammate or team leader), while others are implicit or unconscious. Presently, these shared behaviors have been referred to as entrainment [4] [5], mimicry [6] [7] and even action matching [8] [9]; however, the specific term used typically refers to the underlying theoretical cause for the phenomenon. Theoretically, entrainment can be explained as the spontaneous interpersonal coupling that occurs because the behavior of one or more individuals is affected by another’s behavior in a closed loop system. Additionally, such behavior is typically evident when working on a mutual, goal-directed task [10]. Therefore, for the purposes of this paper we will refer to the cooperative behaviors between teammates that support problem solving as entrainment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kang, Sin-Hwa; Krum, David M.; Khooshabeh, Peter; Phan, Thai; Chang, Chien-Yen; Amir, Ori; Lin, Rebecca
Social influence of humor in virtual human counselor's self-disclosure Journal Article
In: Computer Animation and Virtual Worlds, vol. 28, no. 3-4, 2017, ISSN: 15464261.
@article{kang_social_2017,
title = {Social influence of humor in virtual human counselor's self-disclosure},
author = {Sin-Hwa Kang and David M. Krum and Peter Khooshabeh and Thai Phan and Chien-Yen Chang and Ori Amir and Rebecca Lin},
url = {http://doi.wiley.com/10.1002/cav.1763},
doi = {10.1002/cav.1763},
issn = {15464261},
year = {2017},
date = {2017-04-01},
journal = {Computer Animation and Virtual Worlds},
volume = {28},
number = {3-4},
abstract = {We explored the social influence of humor in a virtual human counselor's selfdisclosure while also varying the ethnicity of the virtual counselor. In a 2 × 3 experiment (humor and ethnicity of the virtual human counselor), participants experienced counseling interview interactions via Skype on a smartphone. We measured user responses to and perceptions of the virtual human counselor. The results demonstrate that humor positively affects user responses to and perceptions of a virtual counselor. The results further suggest that matching styles of humor with a virtual counselor's ethnicity influences user responses and perceptions. The results offer insight into the effective design and development of realistic and believable virtual human counselors. Furthermore, they illuminate the potential use of humor to enhance self‐disclosure in human–agent interactions.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Khooshabeh, Peter; Choromanski, Igor; Neubauer, Catherine; Krum, David M.; Spicer, Ryan; Campbell, Julia
Mixed Reality Training for Tank Platoon Leader Communication Skills Proceedings Article
In: Proceedings of Virtual Reality (VR), 2017 IEEE, pp. 333–334, IEEE, Los Angeles, CA, 2017.
@inproceedings{khooshabeh_mixed_2017,
title = {Mixed Reality Training for Tank Platoon Leader Communication Skills},
author = {Peter Khooshabeh and Igor Choromanski and Catherine Neubauer and David M. Krum and Ryan Spicer and Julia Campbell},
url = {http://ieeexplore.ieee.org/document/7892312/#full-text-section},
doi = {10.1109/VR.2017.7892312},
year = {2017},
date = {2017-03-01},
booktitle = {Proceedings of Virtual Reality (VR), 2017 IEEE},
pages = {333–334},
publisher = {IEEE},
address = {Los Angeles, CA},
abstract = {Here we describe the design and usability evaluation of a mixed reality prototype to simulate the role of a tank platoon leader, who is an individual who not only is a tank commander, but also directs a platoon of three other tanks with their own respective tank commanders. The domain of tank commander training has relied on physical simulators of the actual Abrams tank and encapsulates the whole crew. The TALK-ON system we describe here focuses on training communication skills of the leader in a simulated tank crew. We report results from a usability evaluation and discuss how they will inform our future work for collective tank training.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McAlinden, Ryan; Kang, Sin-Hwa; Nye, Benjamin; Phillips, Artemisa; Campbell, Julia; Goldberg, Stephan L.
Cost-Effective Strategies for Producing Engaging Online Courseware Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{mcalinden_cost-effective_2016,
title = {Cost-Effective Strategies for Producing Engaging Online Courseware},
author = {Ryan McAlinden and Sin-Hwa Kang and Benjamin Nye and Artemisa Phillips and Julia Campbell and Stephan L. Goldberg},
url = {http://www.iitsecdocs.com/search},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {As distributed learning (dL) and computer-based training (CBT) continue to proliferate, the methods of delivery often remain unengaging and bland for participants. Though many of the leaders in commercial online learning have improved their delivery style and quality in recent years, they continue to fall short in terms of user engagement and satisfaction. PowerPoint regurgitation and video lectures are commonplace and leave end users uninspired and wanting more. This paper discusses results from an ongoing research project, Captivating Virtual Instruction for Training (CVIT), which is aimed at understanding and improving dL through a series of recommendations and best practices for promoting and enhancing student engagement online. Though the central focus is on engagement, and how that translates to learning potential, a third variable (cost) has been examined to understand the financial and resource impacts on making content more interesting (i.e. the return on investment, or ROI). The paper presents findings from a 3-year long experiment comparing existing dL methods and techniques both within and outside of the Army. The project developed two dL versions of an existing Army course (Advanced Situational Awareness-Basic (ASA-B)) – the first was designed around producing material that was as engaging and as immersive as possible within a target budget; the second was a scaled-down version using more traditional, yet contemporary dL techniques (PowerPoint recital, video lectures). The two were then compared along three dimensions– engagement, learning and cost. The findings show that improved engagement in distributed courseware is possible without breaking the bank, though the returns on learning with these progressive approaches remain inconclusive. More importantly, it was determined that the quality and experience of the designers, production staff, writers, animators, programmers, and others cannot be underestimated, and that the familiar phrase – ‘you get what you pay for’ is as true with online learning as it is with other areas of content design and software development.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Neubauer, Catherine; Woolley, Joshua; Khooshabeh, Peter; Scherer, Stefan
Getting to know you: a multimodal investigation of team behavior and resilience to stress Proceedings Article
In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 193–200, ACM Press, Tokyo, Japan, 2016, ISBN: 978-1-4503-4556-9.
@inproceedings{neubauer_getting_2016,
title = {Getting to know you: a multimodal investigation of team behavior and resilience to stress},
author = {Catherine Neubauer and Joshua Woolley and Peter Khooshabeh and Stefan Scherer},
url = {http://dl.acm.org/citation.cfm?doid=2993148.2993195},
doi = {10.1145/2993148.2993195},
isbn = {978-1-4503-4556-9},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction},
pages = {193–200},
publisher = {ACM Press},
address = {Tokyo, Japan},
abstract = {Team cohesion has been suggested to be a critical factor in emotional resilience following periods of stress. Team cohesion may depend on several factors including emotional state, communication among team members and even psychophysiological response. The present study sought to employ several multimodal techniques designed to investigate team behavior as a means of understanding resilience to stress. We recruited 40 subjects to perform a cooperative-task in gender-matched, two-person teams. They were responsible for working together to meet a common goal, which was to successfully disarm a simulated bomb. This high-workload task requires successful cooperation and communication among members. We assessed several behaviors that relate to facial expression, word choice and physiological responses (i.e., heart rate variability) within this scenario. A manipulation of an â€oeice breaker†condition was used to induce a level of comfort or familiarity within the team prior to the task. We found that individuals in the â€oeice breaker†condition exhibited better resilience to subjective stress following the task. These individuals also exhibited more insight and cognitive speech, more positive facial expressions and were also able to better regulate their emotional expression during the task, compared to the control.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Spicer, Ryan; McAlinden, Ryan; Conover, Damon
Producing Usable Simulation Terrain Data from UAS-Collected Imagery Proceedings Article
In: Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016, National Training and Simulation Association, Orlando, FL, 2016.
@inproceedings{spicer_producing_2016,
title = {Producing Usable Simulation Terrain Data from UAS-Collected Imagery},
author = {Ryan Spicer and Ryan McAlinden and Damon Conover},
url = {http://ict.usc.edu/pubs/Producing%20Usable%20Simulation%20Terrain%20Data%20from%20UAS-Collected%20Imagery.pdf},
year = {2016},
date = {2016-11-01},
booktitle = {Proceedings from the Interservice/Industry Training, Simulation and Education Conference (I/ITSEC) 2016},
publisher = {National Training and Simulation Association},
address = {Orlando, FL},
abstract = {At I/ITSEC 2015, we presented an approach to produce geo-referenced, highly-detailed (10cm or better) 3D models for an area of interest using imagery collected from cheap, commercial-off-the-shelf, multirotor Unmanned Aerial Systems (UAS). This paper discusses the next steps in making this data usable for modern-day game and simulation engines, specifically how it may be visually rendered, used and reasoned with by the physics system, the artificial intelligence (AI), the simulation entities, and other components. The pipeline begins by segmenting the georeferenced point cloud created by the UAS imagery into terrain (elevation data) and structures or objects, including vegetation, structures, roads and other surface features. Attributes such as slope and edge detection and color matching are used to perform segmentation and clustering. After the terrain and objects are segmented, they are exported into engine-agnostic formats (georeferenced GeoTIFF digital elevation model (DEM) and ground textures, OBJ/FBX mesh files and JPG textures), which serves as the basis for their representation in-engine. The data is then attributed with metadata used in reasoning – collision surfaces, navigation meshes/networks, apertures, physics attributes (line-of-sight, ray-tracing), material surfaces, and others. Finally, it is loaded into the engine for real-time processing during runtime. The pipeline has been tested with several engines, including Unity, VBS, Unreal and TitanIM. The paper discusses the pipeline from collection to rendering, and as well as how other market/commercially-derived data can serve as the foundation for M&S terrain in the future. Examples of the output of this research are available online (McAlinden, 2016).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Marge, Matthew; Bonial, Claire; Pollard, Kimberly A.; Artstein, Ron; Byrne, Brendan; Hill, Susan G.; Voss, Clare; Traum, David
Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards Proceedings Article
In: Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),, Springer, Los Angeles, CA, 2016.
@inproceedings{marge_assessing_2016,
title = {Assessing Agreement in Human-Robot Dialogue Strategies: A Tale of TwoWizards},
author = {Matthew Marge and Claire Bonial and Kimberly A. Pollard and Ron Artstein and Brendan Byrne and Susan G. Hill and Clare Voss and David Traum},
url = {http://iva2016.ict.usc.edu/wp-content/uploads/Papers/100110460.pdf},
year = {2016},
date = {2016-09-01},
booktitle = {Proceedings of The Sixteenth International Conference on Intelligent Virtual Agents (IVA 2016),},
publisher = {Springer},
address = {Los Angeles, CA},
abstract = {The Wizard-of-Oz (WOz) method is a common experimental technique in virtual agent and human-robot dialogue research for eliciting natural communicative behavior from human partners when full autonomy is not yet possible. For the first phase of our research reported here, wizards play the role of dialogue manager, acting as a robot’s dialogue processing. We describe a novel step within WOz methodology that incorporates two wizards and control sessions: the wizards function much like corpus annotators, being asked to make independent judgments on how the robot should respond when receiving the same verbal commands in separate trials. We show that inter-wizard discussion after the control sessions and the resolution with a reconciled protocol for the follow-on pilot sessions successfully impacts wizard behaviors and significantly aligns their strategies. We conclude that, without control sessions, we would have been unlikely to achieve both the natural diversity of expression that comes with multiple wizards and a better protocol for modeling an automated system.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Nye, Benjamin D.; Boyce, Michael W.; Sottilare, Robert
Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 19–37, US Army Research Laboratory, Orlando, FL, 2016, ISBN: 978-0-9893923-9-6.
@incollection{nye_defining_2016,
title = {Defining the Ill-Defined: From Abstract Principles to Applied Pedagogy},
author = {Benjamin D. Nye and Michael W. Boyce and Robert Sottilare},
url = {https://gifttutoring.org/attachments/download/1736/Design%20Recommendations%20for%20ITS_Volume%204%20-%20Domain%20Modeling%20Book_web%20version_final.pdf},
isbn = {978-0-9893923-9-6},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {19–37},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Attempts to define ill-defined domains in intelligent tutoring system (ITS) research has been approached a number of times (Fournier-Viger, Nkambou, & Nguifo, 2010; Lynch, Ashley, Pinkwart, & Aleven, 2009; Mitrovic & Weerasinghe, 2009; Jacovina, Snow, Dai, & McNamara, 2015; Woods, Stensrud, Wray, Haley, & Jones, 2015). Related research has tried to determine levels of ill-definedness for a domain (Le, Loll, & Pinkwart, 2013). Despite such attempts, the field has not yet converged on common guidelines to distinguish between well-defined versus ill-defined domains. We argue that such guidelines struggle to converge because a domain is too large to meaningfully categorize: every domain contains a mixture of well-defined and ill-defined tasks. While the co-existence of well-defined and ill-defined tasks in a single domain is nearly universally-agreed upon by researchers; this key point is often quickly buried by an extensive discussion about what makes certain domain tasks ill-defined (e.g., disagreement about ideal solutions, multiple solution paths). In this chapter, we first take a step back to consider what is meant by a domain in the context of learning. Next, based on this definition for a domain, we map out the components that are in a learning domain, since each component may have ill-defined parts. This leads into a discussion about the strategies that have been used to make ill-defined domains tractable for certain types of pedagogy. Examples of ITS research that applies these strategies are noted. Finally, we conclude with practical how-to considerations and open research questions for approaching ill-defined domains. This chapter should be considered a companion piece to our chapter in the prior volume of this series (Nye, Goldberg, & Hu, 2015). This chapter focuses on how to understand and transform ill-defined parts of domains, while the prior chapter discusses commonly-used learning tasks and authoring approaches for both well-defined and ill-defined tasks. As such, this chapter is intended to help the learner understand if and how different parts of the domain are ill-defined (and what to do about them). The companion piece in the authoring tools volume discusses different categories of well and ill-defined tasks, from the standpoint of attempting to author and maintain an ITS.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams Proceedings Article
In: Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems, pp. 997–1005, International Foundation for Autonomous Agents and Multiagent Systems, Singapore, 2016, ISBN: 978-1-4503-4239-1.
@inproceedings{wang_impact_2016,
title = {The Impact of POMDP-Generated Explanations on Trust and Performance in Human-Robot Teams},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://dl.acm.org/citation.cfm?id=2937071},
isbn = {978-1-4503-4239-1},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of the 2016 International Conference on Autonomous Agents & Multiagent Systems},
pages = {997–1005},
publisher = {International Foundation for Autonomous Agents and Multiagent Systems},
address = {Singapore},
abstract = {Researchers have observed that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain effective team performance even when the system is less than 100% reliable. However, current explanation algorithms are not sufficient for making a robot's quantitative reasoning (in terms of both uncertainty and conflicting goals) transparent to human teammates. In this work, we develop a novel mechanism for robots to automatically generate explanations of reasoning based on Partially Observable Markov Decision Problems (POMDPs). Within this mechanism, we implement alternate natural-language templates and then measure their differential impact on trust and team performance within an agent-based online test-bed that simulates a human-robot team task. The results demonstrate that the added explanation capability leads to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot interaction.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations Proceedings Article
In: 2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 109–116, IEEE, New Zealand, 2016.
@inproceedings{wang_trust_2016,
title = {Trust Calibration within a Human-Robot Team: Comparing Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7451741},
doi = {10.1109/HRI.2016.7451741},
year = {2016},
date = {2016-03-01},
booktitle = {2016 11th ACM/IEEE International Conference on Human-Robot Interaction (HRI)},
pages = {109–116},
publisher = {IEEE},
address = {New Zealand},
abstract = {Trust is a critical factor for achieving the full potential of human-robot teams. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies have shown that hand-crafted explanations can help maintain trust when the system is less than 100% reliable. In this work, we leverage existing agent algorithms to provide a domain-independent mechanism for robots to automatically generate such explanations. To measure the explanation mechanism's impact on trust, we collected self-reported survey data and behavioral data in an agent-based online testbed that simulates a human-robot team task. The results demonstrate that the added explanation capability led to improvement in transparency, trust, and team performance. Furthermore, by observing the different outcomes due to variations in the robot's explanation content, we gain valuable insight that can help lead to refinement of explanation algorithms to further improve human-robot trust calibration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graesser, Arthur C; Hu, Xiangen; Nye, Benjamin D.; Sottilare, Robert A.
Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT) Book Section
In: Using Games and Simulations for Teaching and Assessment, pp. 58–79, Routledge, New York, NY, 2016, ISBN: 978-0-415-73787-6.
@incollection{graesser_intelligent_2016,
title = {Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT)},
author = {Arthur C Graesser and Xiangen Hu and Benjamin D. Nye and Robert A. Sottilare},
url = {https://www.researchgate.net/publication/304013322_Intelligent_Tutoring_Systems_Serious_Games_and_the_Generalized_Intelligent_Framework_for_Tutoring_GIFT},
isbn = {978-0-415-73787-6},
year = {2016},
date = {2016-01-01},
booktitle = {Using Games and Simulations for Teaching and Assessment},
pages = {58–79},
publisher = {Routledge},
address = {New York, NY},
abstract = {This chapter explores the prospects of integrating games with intelligent tutoring systems (ITSs). The hope is that there can be learning environments that optimize both motivation through games and deep learning through ITS technologies. Deep learning refers to the acquisition of knowledge, skills, strategies, and reasoning processes at the higher levels of Bloom’s (1956) taxonomy or the Knowledge-Learning-Instruction (KLI) framework (Koedinger, Corbett, & Perfetti, 2012), such as the application of knowledge to new cases, knowledge analysis and synthesis, problem solving, critical thinking, and other difficult cognitive processes. In contrast, shallow learning involves perceptual learning, memorization of explicit material, and mastery of simple rigid procedures. Shallow knowledge may be adequate for near transfer tests of knowledge/skills but not far transfer tests to new situations that have some modicum of complexity.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Wang, Ning; Pynadath, David V.; Hill, Susan G.
Building Trust in a Human-Robot Team with Automatically Generated Explanations Proceedings Article
In: Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015, National Training and Simulation Association, Orlando, Florida, 2015.
@inproceedings{wang_building_2015,
title = {Building Trust in a Human-Robot Team with Automatically Generated Explanations},
author = {Ning Wang and David V. Pynadath and Susan G. Hill},
url = {http://www.iitsecdocs.com/search},
year = {2015},
date = {2015-12-01},
booktitle = {Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC) 2015},
publisher = {National Training and Simulation Association},
address = {Orlando, Florida},
abstract = {Technological advances offer the promise of robotic systems that work with people to form human-robot teams that are more capable than their individual members. Unfortunately, the increasing capability of such autonomous systems has often failed to increase the capability of the human-robot team. Studies have identified many causes underlying these failures, but one critical aspect of a successful human-machine interaction is trust. When robots are more suited than humans for a certain task, we want the humans to trust the robots to perform that task. When the robots are less suited, we want the humans to appropriately gauge the robots’ ability and have people perform the task manually. Failure to do so results in disuse of robots in the former case and misuse in the latter. Real-world case studies and laboratory experiments show that failures in both cases are common. Researchers have theorized that people will more accurately trust an autonomous system, such as a robot, if they have a more accurate understanding of its decision-making process. Studies show that explanations offered by an automated system can help maintain trust with the humans in case the system makes an error, indicating that the robot’s communication transparency can be an important factor in earning an appropriate level of trust. To study how robots can communicate their decisionmaking process to humans, we have designed an agent-based online test-bed that supports virtual simulation of domain-independent human-robot interaction. In the simulation, humans work together with virtual robots as a team. The test-bed allows researchers to conduct online human-subject studies and gain better understanding of how robot communication can improve human-robot team performance by fostering better trust relationships between humans and their robot teammates. In this paper, we describe the details of our design, and illustrate its operation with an example human-robot team reconnaissance task.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Khooshabeh, Peter; Scherer, Stefan; Oiumette, Brett; Ryan, William S.; Lance, Brent J.; Gratch, Jonathan
Computational-based behavior analysis and peripheral psychophysiology Journal Article
In: Advances in Computational Psychophysiology, pp. 34–36, 2015.
@article{khooshabeh_computational-based_2015,
title = {Computational-based behavior analysis and peripheral psychophysiology},
author = {Peter Khooshabeh and Stefan Scherer and Brett Oiumette and William S. Ryan and Brent J. Lance and Jonathan Gratch},
url = {http://www.sciencemag.org/sites/default/files/custom-publishing/documents/CP_Supplement_Final_100215.pdf},
year = {2015},
date = {2015-10-01},
journal = {Advances in Computational Psychophysiology},
pages = {34–36},
abstract = {Computational-based behavior analysis aims to automatically identify, characterize, model, and synthesize multimodal nonverbal behavior within both human–machine as well as machine-mediated human–human interaction. It uses state-of-the-art machine learning algorithms to track human nonverbal and verbal information, such as facial expressions, gestures, and posture, as well as what and how a person speaks. The emerging technology from this field of research is relevant for a wide range of interactive and social applications, including health care and education. The characterization and association of nonverbal behavior with underlying clinical conditions, such as depression or posttraumatic stress, could have significant benefits for treatments and the overall efficiency of the health care system.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Spicer, Ryan; Evangelista, Edgar; New, Raymond; Campbell, Julia; Richmond, Todd; McGroarty, Christopher; Vogt, Brian
Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping Proceedings Article
In: Proceeding of 15 Simulation Interoperability Workshop, Orlando, FL, 2015.
@inproceedings{spicer_innovation_2015,
title = {Innovation and Rapid Evolutionary Design by Virtual Doing: Understanding Early Synthetic Prototyping},
author = {Ryan Spicer and Edgar Evangelista and Raymond New and Julia Campbell and Todd Richmond and Christopher McGroarty and Brian Vogt},
url = {http://ict.usc.edu/pubs/Innovation%20and%20Rapid%20Evolutionary%20Design%20by%20Virtual%20Doing-Understanding%20Early%20Synthetic.pdf},
year = {2015},
date = {2015-09-01},
booktitle = {Proceeding of 15 Simulation Interoperability Workshop},
address = {Orlando, FL},
abstract = {The proliferation and maturation of tools supporting virtual environments combined with emerging immersive capabilities (e.g. Oculus Rift and other head mounted displays) point towards the ability to take nascent ideas and realize them in engaging ways through an Early Synthetic Prototyping (ESP) system. In effect, “bend electrons before bending metal,” enabling Soldier (end-user) feedback early in the design process, while fostering an atmosphere of collaboration and innovation. Simulation has been used in a variety of ways for concept, design, and testing, but current methods do not put the user into the system in ways that provide deep feedback and enable a dialogue between Warfighter and Engineer (as well as other stakeholders) that can inform design. This paper will discuss how the process of ESP is teased out by using iterative rapid virtual prototyping based on an initial ESP schema, resulting in a rather organic design process – Innovation and Rapid Evolutionary Design by Virtual Doing. By employing canonical use cases, working through the draft schema allows the system to help design itself and inform the process evolution. This type of self-referential meta-design becomes increasingly powerful and relevant given the ability to rapidly create assets, capabilities and environments that immerse developers, stakeholders, and end users early and often in the process. Specific examples of using rapid virtual prototyping for teasing out the design and implications/applications of ESP will be presented, walking through the evolution of both schema and prototypes with specific use cases. In addition, this paper will cover more generalized concepts, approaches, analytics, and lessons-learned as well as implications for innovation throughout research, development, and industry.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark G.; Goldberg, Benjamin S.
Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 3, pp. 303 – 318, U.S. Army Research Laboratory, 2015.
@incollection{lane_lowering_2015,
title = {Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools},
author = {H. Chad Lane and Mark G. Core and Benjamin S. Goldberg},
url = {http://ict.usc.edu/pubs/Lowering%20the%20Technical%20Skill%20Requirements%20for%20Building%20Intelligent%20Tutors-A%20Review%20of%20Authoring%20Tools.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {3},
pages = {303 – 318},
publisher = {U.S. Army Research Laboratory},
abstract = {In this chapter, we focus on intelligent tutoring systems (ITSs), an instance of educational technology that is often criticized for not reaching its full potential (Nye, 2013). Researchers have debated why, given such strong empirical evidence in their favor (Anderson, Corbett, Koedinger & Pelletier, 1995; D’Mello & Graesser, 2012; VanLehn et al., 2005; Woolf, 2009), intelligent tutors are not in every classroom, on every device, providing educators with fine-grained assessment information about their students. Although many factors contribute to a lack of adoption (Nye, 2014), one widely agreed upon reason behind slow adoption and poor scalability of ITSs is that the engineering demands are simply too great. This is no surprise given that the effectiveness of ITSs is often attributable to the use of rich knowledge representations and cognitively plausible models of domain knowledge (Mark & Greer, 1995; Valerie J. Shute & Psotka, 1996; VanLehn, 2006; Woolf, 2009), which are inherently burdensome to build. To put it another way: the features that tend to make ITSs effective are also the hardest to build. The heavy reliance on cognitive scientists and artificial intelligence (AI) software engineers seems to be a bottleneck.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Goldberg, Ben; Hu, Xiangen
Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks Book Section
In: Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Brawner, Keith (Ed.): Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques, vol. 3, pp. 47–63, U.S. Army Research Laboratory, 2015, ISBN: 978-0-9893923-7-2.
@incollection{nye_generalizing_2015,
title = {Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks},
author = {Benjamin D. Nye and Ben Goldberg and Xiangen Hu},
editor = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Keith Brawner},
url = {http://ict.usc.edu/pubs/Generalizing%20the%20Genres%20for%20ITS%20-%20Authoring%20Considerations%20for%20Representative%20Learning%20Tasks.pdf},
isbn = {978-0-9893923-7-2},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques},
volume = {3},
pages = {47–63},
publisher = {U.S. Army Research Laboratory},
abstract = {Compared to many other learning technologies, intelligent tutoring systems (ITSs) have a distinct challenge: authoring an adaptive inner loop that provides pedagogical support on one or more learning tasks. This coupling of tutoring behavior to student interaction with a learning task means that authoring tools need to reflect both the learning task and the ITS pedagogy. To explore this issue, common learning activities in intelligent tutoring need to be categorized and analyzed for the information that is required to tutor each task. The types of learning activities considered cover a large range: step-by-step problem solving, bug repair, building generative functions (e.g., computer code), structured argumentation, self-reflection, short question answering, essay writing, classification, semantic matching, representation mapping (e.g., graph to equation), concept map revision, choice scenarios, simulated process scenarios, motor skills practice, collaborative discussion, collaborative design, and team coordination tasks. These different tasks imply a need for different authoring tools and processes used to create tutoring systems for each task. In this chapter, we consider three facets of authoring: 1) the minimum information required to create the task, 2) the minimum information needed to implement common pedagogical strategies, 3) the expertise required for each type of information. The goal of this analysis is to present a roadmap of effective practices in authoring tool interfaces for each tutoring task considered. A long-term vision for ITSs is to have generalizable authoring tools, which could be used to rapidly create content for a variety of ITSs. However, it is as-yet unclear if this goal is even attainable. Authoring tools have a number of serious challenges, from the standpoint of generalizability. These challenges include the domain, the data format, and the author. First, different ITS domains require different sets of authoring tools, because they have different learning tasks. Tools that are convenient for embedding tutoring in a 3D virtual world are completely different than ones that make it convenient to add tutoring to a system for practicing essay-writing, for example. Second, the data produced by an authoring tool needs to be consumed by an ITS that will make pedagogical decisions. As such, at least some of the data is specific to the pedagogy of the ITS, rather than directly reflecting domain content. As a simple example, if an ITS uses text hints, those hints need to be authored, but some systems may just highlight errors rather than providing text hints. As such, the first system actually needs more content authored and represented as data. With that said, typical ITSs use a relatively small and uniform set of authored content to interact with learners, such as correctness feedback, corrections, and hints (VanLehn, 2006). Third, different authors may need different tools (Nye, Rahman, Yang, Hays, Cai, Graesser, & Hu, 2014). This means that even the same content may need distinct authoring tools that match the expertise of different authors. In this chapter, we are focusing primarily on the first challenge: differences in domains. In particular, our stance is that the “content domain” is too coarse-grained to allow much reuse between authoring tools. This is because, to a significant extent, content domains are simply names for related content. However, the skills and pedagogy for the same domain can vary drastically across different topics and expertise levels. For example, Algebra and Geometry are both high-school level math domains. However, in geometry, graphical depictions (e.g., shapes, angles) are a central aspect of the pedagogy, while Algebra tends to use graphics very differently (e.g., coordinate plots). As such, some learning tasks tend to be shared between those subdomains (e.g., equation-solving) and other tasks are not (e.g., classifying shapes). This raises the central point of our paper: the learning tasks for a domain define how we author content for that domain. For example, while Algebra does not involve recognizing many shapes, understanding the elements of architecture involves recognizing a variety of basic and advanced shapes and forms. In total, this means that no single whole-cloth authoring tool will work well for any pair of Algebra, Geometry, and Architectural Forms. However, it also implies that a reasonable number of task-specific tools for each learning task might allow authoring for all three domains. To do this, we need to understand the common learning tasks for domains taught using ITS, and why those tasks are applied to those domains. In the following sections, we identify and categorize common learning tasks for different ITS domains. Then, we extract common principles for those learning tasks. Finally, we suggest a set of general learning activities that might be used to tutor a large number of domains.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Choi, Ahyoung; Melo, Celso M.; Khooshabeh, Peter; Woo, Woontack; Gratch, Jonathan
Physiological evidence for a dual process model of the social effects of emotion in computers Journal Article
In: International Journal of Human-Computer Studies, vol. 74, pp. 41–53, 2015, ISSN: 10715819.
@article{choi_physiological_2015,
title = {Physiological evidence for a dual process model of the social effects of emotion in computers},
author = {Ahyoung Choi and Celso M. Melo and Peter Khooshabeh and Woontack Woo and Jonathan Gratch},
url = {http://linkinghub.elsevier.com/retrieve/pii/S1071581914001414},
doi = {10.1016/j.ijhcs.2014.10.006},
issn = {10715819},
year = {2015},
date = {2015-02-01},
journal = {International Journal of Human-Computer Studies},
volume = {74},
pages = {41–53},
abstract = {There has been recent interest on the impact of emotional expressions of computers on people's decision making. However, despite a growing body of empirical work, the mechanism underlying such effects is still not clearly understood. To address this issue the paper explores two kinds of processes studied by emotion theorists in human-human interaction: inferential processes, whereby people retrieve information from emotion expressions about other's beliefs, desires, and intentions; affective processes, whereby emotion expressions evoke emotions in others, which then influence their decisions. To tease apart these two processes as they occur in human-computer interaction, we looked at physiological measures (electrodermal activity and heart rate deceleration). We present two experiments where participants engaged in social dilemmas with embodied agents that expressed emotion. Our results show, first, that people's decisions were influenced by affective and cognitive processes and, according to the prevailing process, people behaved differently and formed contrasting subjective ratings of the agents; second we show that an individual trait known as electrodermal lability, which measures people's physiological sensitivity, predicted the extent to which affective or inferential processes dominated the interaction. We discuss implications for the design of embodied agents and decision making systems that use emotion expression to enhance interaction between humans and computers.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Dehghani, M.; Khooshabeh, P.; Nazarian, A.; Gratch, J.
The Subtlety of Sound: Accent as a Marker for Culture Journal Article
In: Journal of Language and Social Psychology, 2014, ISSN: 0261-927X, 1552-6526.
@article{dehghani_subtlety_2014,
title = {The Subtlety of Sound: Accent as a Marker for Culture},
author = {M. Dehghani and P. Khooshabeh and A. Nazarian and J. Gratch},
url = {http://jls.sagepub.com/cgi/doi/10.1177/0261927X14551095},
doi = {10.1177/0261927X14551095},
issn = {0261-927X, 1552-6526},
year = {2014},
date = {2014-09-01},
journal = {Journal of Language and Social Psychology},
abstract = {Aspects of language, such as accent, play a crucial role in the formation and categorization of one’s cultural identity. Recent work on accent emphasizes the role of accent in person perception and social categorization, demonstrating that accent also serves as a meaningful indicator of an ethnic category. In this article, we investigate whether the accent of an interaction partner, as a marker for culture, can induce cultural frame-shifts in biculturals. We report the results of three experiments, performed among bicultural and monocultural individuals, in which we test the above hypothesis. Our results demonstrate that accent alone can affect people’s cognition.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Khooshabeh, Peter; Dehghani, Morteza; Nazarian, Angela; Gratch, Jonathan
The Cultural Influence Model: When Accented Natural Language Spoken by Virtual Characters Matters Journal Article
In: Journal of Artificial Intelligence and Society, vol. 29, 2013.
@article{khooshabeh_cultural_2013,
title = {The Cultural Influence Model: When Accented Natural Language Spoken by Virtual Characters Matters},
author = {Peter Khooshabeh and Morteza Dehghani and Angela Nazarian and Jonathan Gratch},
url = {http://ict.usc.edu/pubs/The%20Cultural%20Influence%20Model-%20When%20Accented%20Natural%20Language%20Spoken%20by%20Virtual%20Characters%20Matters.pdf},
year = {2013},
date = {2013-09-01},
journal = {Journal of Artificial Intelligence and Society},
volume = {29},
abstract = {Advances in Artificial Intelligence (AI) and computer graphics digital technologies have contributed to a relative increase of realism in virtual characters. Preserving virtual characters’ communicative realism, in particular, joined the ranks of the improvements in natural language technology and animation algorithms. This paper focuses on culturally relevant paralinguistic cues in nonverbal communication. We model the effects of an English speaking digital character with different accents on human interactants (i.e., users). Our cultural influence model proposes that paralinguistic realism, in the form of accented speech, is effective in promoting culturally congruent cognition only when it is self-relevant to users. For example, a Chinese or Middle Eastern English accent may be perceived as foreign to individuals who do not share the same ethnic cultural background with members of those cultures. However, for individuals who are familiar and affiliate with those cultures (i.e., in-group members who are bicultural), accent not only serves as a motif of shared social identity, it also primes them to adopt culturally appropriate interpretive frames that influence their decision making.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.