Publications
Search
Nye, Benjamin D.; Hu, Xiangen
Conceptualizing and Representing Domains to Guide Tutoring Book Section
In: Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling, vol. 4, pp. 15–18, US Army Research Laboratory, Orlando, FL, 2016.
@incollection{nye_conceptualizing_2016,
title = {Conceptualizing and Representing Domains to Guide Tutoring},
author = {Benjamin D. Nye and Xiangen Hu},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA15&dq=%22data.+This+chapter+presents+an+excellent+overview+of+current+research+on+Q-matrices%22+%22edge+work+on+ensemble+methods+that+achieve+state+of+the+art+performance+by+combining%22+&ots=6MJhm1XHVV&sig=i14eJyin69Cy-jms2lWIFF4K3CU},
year = {2016},
date = {2016-07-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
volume = {4},
pages = {15–18},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Any discussion about how intelligent tutoring system (ITS) domains must begin with considering how ITS conceptualize and represent domains. This process requires building formal, mathematically-specifiable operationalization of the often implicit knowledge about learning domains and their pedagogy. Across different domains and pedagogical approaches, a wide variety of methods have been taken: a scope that would be better-covered by an encyclopedia rather than a single book. Since this section could not possibly cover every possible approach to domain modeling, the chapters within this section were instead chosen to cover a representative range of fundamentally-different approaches to domain modeling.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Olney, Andrew; Nye, Benjamin; Sinatra, Anna M.
Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling Book
US Army Research Laboratory, Orlando, FL, 2016.
@book{sottilare_design_2016,
title = {Design Recommendations for Intelligent Tutoring Systems: Volume 4-Domain Modeling},
author = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Andrew Olney and Benjamin Nye and Anna M. Sinatra},
url = {http://books.google.com/books?hl=en&lr=&id=0suvDAAAQBAJ&oi=fnd&pg=PA1&dq=%22Barnes,+Behrooz+Mostafavi,+and+Michael+J.%22+%22A.+Sottilare+and+Joseph%22+%2214+%E2%80%93+Exploring+the+Diversity+of+Domain+Modeling+for+Training%22+%2213+%E2%80%92+Mining+Expertise:+Learning+New+Tricks+from+an+Old%22+&ots=6MJgp2XEWV&sig=7CHZvZIllN3Xk8uFbMHmxN7gfLw},
year = {2016},
date = {2016-07-01},
volume = {4},
publisher = {US Army Research Laboratory},
address = {Orlando, FL},
abstract = {Design Recommendations for Intelligent Tutoring Systems (ITSs) explores the impact of intelligent tutoring system design on education and training. Specifically, this volume examines “Authoring Tools and Expert Modeling Techniques”. The “Design Recommendations book series examines tools and methods to reduce the time and skill required to develop Intelligent Tutoring Systems with the goal of improving the Generalized Intelligent Framework for Tutoring (GIFT). GIFT is a modular, service-oriented architecture developed to capture simplified authoring techniques, promote reuse and standardization of ITSs along with automated instructional techniques and effectiveness evaluation capabilities for adaptive tutoring tools and methods.},
keywords = {},
pubstate = {published},
tppubtype = {book}
}
Nye, Benjamin D.
ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem Journal Article
In: International Journal of Artificial Intelligence in Education, vol. 26, no. 2, pp. 756–770, 2016, ISSN: 1560-4292, 1560-4306.
@article{nye_its_2016,
title = {ITS, The End of the World as We Know It: Transitioning AIED into a Service-Oriented Ecosystem},
author = {Benjamin D. Nye},
url = {http://link.springer.com/10.1007/s40593-016-0098-8},
doi = {10.1007/s40593-016-0098-8},
issn = {1560-4292, 1560-4306},
year = {2016},
date = {2016-06-01},
journal = {International Journal of Artificial Intelligence in Education},
volume = {26},
number = {2},
pages = {756–770},
abstract = {Advanced learning technologies are reaching a new phase of their evolution where they are finally entering mainstream educational contexts, with persistent user bases. However, as AIED scales, it will need to follow recent trends in service-oriented and ubiquitous computing: breaking AIED platforms into distinct services that can be composed for different platforms (web, mobile, etc.) and distributed across multiple systems. This will represent a move from learning platforms to an ecosystem of interacting learning tools. Such tools will enable new opportunities for both user-adaptation and experimentation. Traditional macro-adaptation (problem selection) and step-based adaptation (hints and feedback) will be extended by meta-adaptation (adaptive system selection) and micro-adaptation (event-level optimization). The existence of persistent and widely-used systems will also support new paradigms for experimentation in education, allowing researchers to understand interactions and boundary conditions for learning principles. New central research questions for the field will also need to be answered due to these changes in the AIED landscape.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Swartout, William; Nye, Benjamin D.; Hartholt, Arno; Reilly, Adam; Graesser, Arthur C.; VanLehn, Kurt; Wetzel, Jon; Liewer, Matt; Morbini, Fabrizio; Morgan, Brent; Wang, Lijia; Benn, Grace; Rosenberg, Milton
Designing a Personal Assistant for Life-Long Learning (PAL3) Proceedings Article
In: Proceedings of The Twenty-Ninth International Flairs Conference, pp. 491–496, AAAI Press, Key Largo, FL, 2016, ISBN: 978-1-57735-756-8.
@inproceedings{swartout_designing_2016,
title = {Designing a Personal Assistant for Life-Long Learning (PAL3)},
author = {William Swartout and Benjamin D. Nye and Arno Hartholt and Adam Reilly and Arthur C. Graesser and Kurt VanLehn and Jon Wetzel and Matt Liewer and Fabrizio Morbini and Brent Morgan and Lijia Wang and Grace Benn and Milton Rosenberg},
url = {http://www.aaai.org/ocs/index.php/FLAIRS/FLAIRS16/paper/view/12793},
isbn = {978-1-57735-756-8},
year = {2016},
date = {2016-05-01},
booktitle = {Proceedings of The Twenty-Ninth International Flairs Conference},
pages = {491–496},
publisher = {AAAI Press},
address = {Key Largo, FL},
abstract = {Learners’ skills decay during gaps in instruction, since they lack the structure and motivation to continue studying. To meet this challenge, the PAL3 system was designed to accompany a learner throughout their career and mentor them to build and maintain skills through: 1) the use of an embodied pedagogical agent (Pal), 2) a persistent learning record that drives a student model which estimates forgetting, 3) an adaptive recommendation engine linking to both intelligent tutors and traditional learning resources, and 4) game-like mechanisms to promote engagement (e.g., leaderboards, effort-based point rewards, unlocking customizations). The design process for PAL3 is discussed, from the perspective of insights and revisions based on a series of formative feedback and evaluation sessions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graesser, Arthur C; Hu, Xiangen; Nye, Benjamin D.; Sottilare, Robert A.
Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT) Book Section
In: Using Games and Simulations for Teaching and Assessment, pp. 58–79, Routledge, New York, NY, 2016, ISBN: 978-0-415-73787-6.
@incollection{graesser_intelligent_2016,
title = {Intelligent Tutoring Systems, Serious Games, and the Generalized Intelligent Framework for Tutoring (GIFT)},
author = {Arthur C Graesser and Xiangen Hu and Benjamin D. Nye and Robert A. Sottilare},
url = {https://www.researchgate.net/publication/304013322_Intelligent_Tutoring_Systems_Serious_Games_and_the_Generalized_Intelligent_Framework_for_Tutoring_GIFT},
isbn = {978-0-415-73787-6},
year = {2016},
date = {2016-01-01},
booktitle = {Using Games and Simulations for Teaching and Assessment},
pages = {58–79},
publisher = {Routledge},
address = {New York, NY},
abstract = {This chapter explores the prospects of integrating games with intelligent tutoring systems (ITSs). The hope is that there can be learning environments that optimize both motivation through games and deep learning through ITS technologies. Deep learning refers to the acquisition of knowledge, skills, strategies, and reasoning processes at the higher levels of Bloom’s (1956) taxonomy or the Knowledge-Learning-Instruction (KLI) framework (Koedinger, Corbett, & Perfetti, 2012), such as the application of knowledge to new cases, knowledge analysis and synthesis, problem solving, critical thinking, and other difficult cognitive processes. In contrast, shallow learning involves perceptual learning, memorization of explicit material, and mastery of simple rigid procedures. Shallow knowledge may be adequate for near transfer tests of knowledge/skills but not far transfer tests to new situations that have some modicum of complexity.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Morrison, Donald M.; Samei, Borhan
Automated Session-Quality Assessment for Human Tutoring Based on Expert Ratings of Tutoring Success Proceedings Article
In: Proceedings of Educational Data Mining (EDM) 2015, pp. 195–202, Springer, Madrid, Spain, 2015.
@inproceedings{nye_automated_2015,
title = {Automated Session-Quality Assessment for Human Tutoring Based on Expert Ratings of Tutoring Success},
author = {Benjamin D. Nye and Donald M. Morrison and Borhan Samei},
url = {http://ict.usc.edu/pubs/Automated%20Session-Quality%20Assessment%20for%20Human%20Tutoring%20Based%20on%20Expert%20Ratings%20of%20Tutoring%20Success.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Proceedings of Educational Data Mining (EDM) 2015},
pages = {195–202},
publisher = {Springer},
address = {Madrid, Spain},
abstract = {Archived transcripts from tens of millions of online human tutoring sessions potentially contain important knowledge about how online tutors help, or fail to help, students learn. However, without ways of automatically analyzing these large corpora, any knowledge in this data will remain buried. One way to approach this issue is to train an estimator for the learning e⬚ectiveness of an online tutoring interaction. While significant work has been done on automated assessment of student responses and artifacts (e.g., essays), automated assessment has not traditionally automated assessments of human-to-human tutoring sessions. In this work, we trained a model for estimating tutoring session quality based on a corpus of 1438 online tutoring sessions rated by expert tutors. Each session was rated for evidence of learning (outcomes) and educational soundness (process). Session features for this model included dialog act classifcations, mode classifcations (e.g., Scaffolding), statistically distinctive subsequences of such classifcations, dialog initiative (e.g., statements by tutor vs. student), and session length. The model correlated more highly with evidence of learning than educational soundness ratings, in part due to the greater difficulty of classifying tutoring modes. This model was then applied to a corpus of 242k online tutoring sessions, to examine the relationships between automated assessments and other available metadata (e.g., the tutor's self-assessment). On this large corpus, the automated assessments followed similar patterns as the expert rater's assessments, but with lower overall correlation strength. Based on the analyses presented, the assessment model for online tutoring sessions emulates the ratings of expert human tutors for session quality ratings with a reasonable degree of accuracy.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Nye, Benjamin D.; Goldberg, Ben; Hu, Xiangen
Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks Book Section
In: Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Brawner, Keith (Ed.): Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques, vol. 3, pp. 47–63, U.S. Army Research Laboratory, 2015, ISBN: 978-0-9893923-7-2.
@incollection{nye_generalizing_2015,
title = {Generalizing the Genres for ITS: Authoring Considerations for Representative Learning Tasks},
author = {Benjamin D. Nye and Ben Goldberg and Xiangen Hu},
editor = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Keith Brawner},
url = {http://ict.usc.edu/pubs/Generalizing%20the%20Genres%20for%20ITS%20-%20Authoring%20Considerations%20for%20Representative%20Learning%20Tasks.pdf},
isbn = {978-0-9893923-7-2},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques},
volume = {3},
pages = {47–63},
publisher = {U.S. Army Research Laboratory},
abstract = {Compared to many other learning technologies, intelligent tutoring systems (ITSs) have a distinct challenge: authoring an adaptive inner loop that provides pedagogical support on one or more learning tasks. This coupling of tutoring behavior to student interaction with a learning task means that authoring tools need to reflect both the learning task and the ITS pedagogy. To explore this issue, common learning activities in intelligent tutoring need to be categorized and analyzed for the information that is required to tutor each task. The types of learning activities considered cover a large range: step-by-step problem solving, bug repair, building generative functions (e.g., computer code), structured argumentation, self-reflection, short question answering, essay writing, classification, semantic matching, representation mapping (e.g., graph to equation), concept map revision, choice scenarios, simulated process scenarios, motor skills practice, collaborative discussion, collaborative design, and team coordination tasks. These different tasks imply a need for different authoring tools and processes used to create tutoring systems for each task. In this chapter, we consider three facets of authoring: 1) the minimum information required to create the task, 2) the minimum information needed to implement common pedagogical strategies, 3) the expertise required for each type of information. The goal of this analysis is to present a roadmap of effective practices in authoring tool interfaces for each tutoring task considered. A long-term vision for ITSs is to have generalizable authoring tools, which could be used to rapidly create content for a variety of ITSs. However, it is as-yet unclear if this goal is even attainable. Authoring tools have a number of serious challenges, from the standpoint of generalizability. These challenges include the domain, the data format, and the author. First, different ITS domains require different sets of authoring tools, because they have different learning tasks. Tools that are convenient for embedding tutoring in a 3D virtual world are completely different than ones that make it convenient to add tutoring to a system for practicing essay-writing, for example. Second, the data produced by an authoring tool needs to be consumed by an ITS that will make pedagogical decisions. As such, at least some of the data is specific to the pedagogy of the ITS, rather than directly reflecting domain content. As a simple example, if an ITS uses text hints, those hints need to be authored, but some systems may just highlight errors rather than providing text hints. As such, the first system actually needs more content authored and represented as data. With that said, typical ITSs use a relatively small and uniform set of authored content to interact with learners, such as correctness feedback, corrections, and hints (VanLehn, 2006). Third, different authors may need different tools (Nye, Rahman, Yang, Hays, Cai, Graesser, & Hu, 2014). This means that even the same content may need distinct authoring tools that match the expertise of different authors. In this chapter, we are focusing primarily on the first challenge: differences in domains. In particular, our stance is that the “content domain” is too coarse-grained to allow much reuse between authoring tools. This is because, to a significant extent, content domains are simply names for related content. However, the skills and pedagogy for the same domain can vary drastically across different topics and expertise levels. For example, Algebra and Geometry are both high-school level math domains. However, in geometry, graphical depictions (e.g., shapes, angles) are a central aspect of the pedagogy, while Algebra tends to use graphics very differently (e.g., coordinate plots). As such, some learning tasks tend to be shared between those subdomains (e.g., equation-solving) and other tasks are not (e.g., classifying shapes). This raises the central point of our paper: the learning tasks for a domain define how we author content for that domain. For example, while Algebra does not involve recognizing many shapes, understanding the elements of architecture involves recognizing a variety of basic and advanced shapes and forms. In total, this means that no single whole-cloth authoring tool will work well for any pair of Algebra, Geometry, and Architectural Forms. However, it also implies that a reasonable number of task-specific tools for each learning task might allow authoring for all three domains. To do this, we need to understand the common learning tasks for domains taught using ITS, and why those tasks are applied to those domains. In the following sections, we identify and categorize common learning tasks for different ITS domains. Then, we extract common principles for those learning tasks. Finally, we suggest a set of general learning activities that might be used to tutor a large number of domains.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Lane, H. Chad; Core, Mark G.; Hays, Matthew J.; Auerbach, Daniel; Rosenberg, Milton
Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective Proceedings Article
In: Artificial Intelligence in Education, pp. 195–204, Springer International Publishing, Madrid, Spain, 2015, ISBN: 978-3-319-19772-2 978-3-319-19773-9.
@inproceedings{chad_lane_situated_2015,
title = {Situated Pedagogical Authoring: Authoring Intelligent Tutors from a Student’s Perspective},
author = {H. Chad Lane and Mark G. Core and Matthew J. Hays and Daniel Auerbach and Milton Rosenberg},
url = {http://ict.usc.edu/pubs/Situated%20Pedagogical%20Authoring-Authoring%20Intelligent.pdf},
isbn = {978-3-319-19772-2 978-3-319-19773-9},
year = {2015},
date = {2015-06-01},
booktitle = {Artificial Intelligence in Education},
volume = {9112},
pages = {195–204},
publisher = {Springer International Publishing},
address = {Madrid, Spain},
abstract = {We describe the Situated Pedagogical Authoring (SitPed) system that seeks to allow non-technical authors to create ITS content for soft-skills training, such as counseling skills. SitPed is built on the assertion that authoring tools should use the learner’s perspective to the greatest extent possible. SitPed provides tools for creating tasks lists, authoring assessment knowledge, and creating tutor messages. We present preliminary findings of a two-phase study comparing authoring in SitPed to an ablated version of the same system and a spreadsheet-based control. Findings suggest modest advantages for SitPed in terms of the quality of the authored content and student learning.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lane, H. Chad; Core, Mark G.; Goldberg, Benjamin S.
Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 3, pp. 303 – 318, U.S. Army Research Laboratory, 2015.
@incollection{lane_lowering_2015,
title = {Lowering the Technical Skill Requirements for Building Intelligent Tutors: A Review of Authoring Tools},
author = {H. Chad Lane and Mark G. Core and Benjamin S. Goldberg},
url = {http://ict.usc.edu/pubs/Lowering%20the%20Technical%20Skill%20Requirements%20for%20Building%20Intelligent%20Tutors-A%20Review%20of%20Authoring%20Tools.pdf},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {3},
pages = {303 – 318},
publisher = {U.S. Army Research Laboratory},
abstract = {In this chapter, we focus on intelligent tutoring systems (ITSs), an instance of educational technology that is often criticized for not reaching its full potential (Nye, 2013). Researchers have debated why, given such strong empirical evidence in their favor (Anderson, Corbett, Koedinger & Pelletier, 1995; D’Mello & Graesser, 2012; VanLehn et al., 2005; Woolf, 2009), intelligent tutors are not in every classroom, on every device, providing educators with fine-grained assessment information about their students. Although many factors contribute to a lack of adoption (Nye, 2014), one widely agreed upon reason behind slow adoption and poor scalability of ITSs is that the engineering demands are simply too great. This is no surprise given that the effectiveness of ITSs is often attributable to the use of rich knowledge representations and cognitively plausible models of domain knowledge (Mark & Greer, 1995; Valerie J. Shute & Psotka, 1996; VanLehn, 2006; Woolf, 2009), which are inherently burdensome to build. To put it another way: the features that tend to make ITSs effective are also the hardest to build. The heavy reliance on cognitive scientists and artificial intelligence (AI) software engineers seems to be a bottleneck.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Nye, Benjamin D.; Hu, Xiangen
A Historical Perspective on Authoring and ITS: Reviewing Some Lessons Learned Book Section
In: Sottilare, Robert A.; Graesser, Arthur C.; Hu, Xiangen; Brawner, Keith (Ed.): Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques, pp. 67–70, U.S. Army Research Laboratory, 2015, ISBN: 978-0-9893923-7-2.
@incollection{nye_historical_2015,
title = {A Historical Perspective on Authoring and ITS: Reviewing Some Lessons Learned},
author = {Benjamin D. Nye and Xiangen Hu},
editor = {Robert A. Sottilare and Arthur C. Graesser and Xiangen Hu and Keith Brawner},
url = {http://ict.usc.edu/pubs/A%20Historical%20Perspective%20on%20Authoring%20and%20ITS%20-%20Reviewing%20Some%20Lessons%20Learned.pdf},
isbn = {978-0-9893923-7-2},
year = {2015},
date = {2015-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems: Volume 2: Authoring Tools and Expert Modeling Techniques},
pages = {67–70},
publisher = {U.S. Army Research Laboratory},
abstract = {This section discusses the practices and lessons learned from authoring tools that have been applied and revised through repeated use by researchers, content authors, and/or instructors. All of the tools noted in this section represent relatively mature applications that can be used to build and configure educationally-effective content. Each tool has been tailored to address both the tutoring content and the expected authors who will be using the tool. As such, even tools which support similar tutoring strategies may use very different interfaces to represent equivalent domain knowledge. In some cases, authoring tools even represent offshoots where different authoring goals led to divergent evolution of both the authoring tools and the intelligent tutoring systems (ITSs) from a common lineage. Understanding how these systems adapted their tools to their particular authoring challenges gives concrete examples of the tradeoffs involved for different types of authoring. By reviewing the successes and challenges of the past, these chapters provide lessons learned for the development of future systems.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Blumberg, Fran C.; Burke, Lauren C.; Hodent, Celia; Evans, Michael A.; Lane, H. Chad; Schell, Jesse
Serious Games for Health: Features, Challenges, Next Steps Journal Article
In: Games for Health Journal, vol. 3, no. 5, pp. 270–276, 2014, ISSN: 2161-783X, 2161-7856.
@article{blumberg_serious_2014,
title = {Serious Games for Health: Features, Challenges, Next Steps},
author = {Fran C. Blumberg and Lauren C. Burke and Celia Hodent and Michael A. Evans and H. Chad Lane and Jesse Schell},
url = {http://online.liebertpub.com/doi/abs/10.1089/g4h.2014.0079},
doi = {10.1089/g4h.2014.0079},
issn = {2161-783X, 2161-7856},
year = {2014},
date = {2014-10-01},
journal = {Games for Health Journal},
volume = {3},
number = {5},
pages = {270–276},
abstract = {As articles in this journal have demonstrated over the past 3 years, serious game development continues to flourish as a vehicle for formal and informal health education. How best to characterize a “serious” game remains somewhat elusive in the literature. Many researchers and practitioners view serious games as capitalizing on computer technology and state-of-the-art video graphics as an enjoyable means by which to provide and promote instruction and training, or to facilitate attitude change among its players. We invited four distinguished researchers and practitioners to further discuss with us how they view the characteristics of serious games for health, how those characteristics differ from those for academic purposes, the challenges posed for serious game development among players of different ages, and next steps for the development and empirical examination of the effectiveness of serious games for players' psychological and physical well-being.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Core, Mark; Lane, H. Chad; Traum, David
Intelligent Tutoring Support for Learners Interacting with Virtual Humans Book Section
In: Design Recommendations for Intelligent Tutoring Systems, vol. 2, pp. 249 – 257, 2014, ISBN: 978-0-9893923-2-7.
@incollection{core_intelligent_2014,
title = {Intelligent Tutoring Support for Learners Interacting with Virtual Humans},
author = {Mark Core and H. Chad Lane and David Traum},
url = {http://books.google.com/books?hl=en&lr=&id=BNWEBAAAQBAJ&oi=fnd&pg=PR2&dq=+Design+Recommendations+for+Intelligent+Tutoring+Systems,+volume+2&ots=jIk3zyGi4M&sig=qb_hc4KKE3-rMh2mrs8WkxBicG4#v=onepage&q&f=false},
isbn = {978-0-9893923-2-7},
year = {2014},
date = {2014-06-01},
booktitle = {Design Recommendations for Intelligent Tutoring Systems},
volume = {2},
pages = {249 – 257},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Gordon, Andrew; Core, Mark; Kang, Sin-Hwa; Wang, Catherine; Wienberg, Christopher
Civilian Analogs of Army Tasks: Supporting Pedagogical Storytelling Across Domains Journal Article
In: Proceedings of the 11th International Conference of the Learning Sciences, 2014.
@article{gordon_civilian_2014,
title = {Civilian Analogs of Army Tasks: Supporting Pedagogical Storytelling Across Domains},
author = {Andrew Gordon and Mark Core and Sin-Hwa Kang and Catherine Wang and Christopher Wienberg},
url = {http://ict.usc.edu/pubs/Civilian%20Analogs%20of%20Army%20Tasks%20-%20Supporting%20Pedagogical%20Storytelling%20Across%20Domains.pdf},
year = {2014},
date = {2014-06-01},
journal = {Proceedings of the 11th International Conference of the Learning Sciences},
abstract = {Storytelling is the most basic means by which people learn from the experiences of others. Advances in educational technologies offer new opportunities and experiences for learners, but risk losing the natural forms of pedagogical storytelling afforded by face-to-face teacher-student discussion. In this paper, we present a technology-supported solution to the problem of curating and algorithmically delivering relevant stories to learners in computer-based learning environments. Our approach is to mine public weblogs for textual narratives related to specific activity contexts, both inside and outside the domain of the target skillset. These stories are then linked directly to task representations in the learner model of an intelligent tutoring system, and delivered to learners along with other tutoring guidance. We demonstrate our approach to curating stories by creating collections of narratives that are analogous to tactical tasks of the U.S. Army, and evaluate the difficulty of incorporating these stories into intelligent tutoring systems.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hill, Randall W.
Virtual Reality and Leadership Development Book Section
In: Using Experience to Develop Leadership Talent: How Organizations Leverage On-The-Job Development, pp. 286–312, John Wiley & Sons, Inc., 2014, ISBN: 978-1-118-76783-2.
@incollection{hill_virtual_2014,
title = {Virtual Reality and Leadership Development},
author = {Randall W. Hill},
url = {http://www.amazon.com/dp/1118767837/ref=cm_sw_su_dp},
isbn = {978-1-118-76783-2},
year = {2014},
date = {2014-03-01},
booktitle = {Using Experience to Develop Leadership Talent: How Organizations Leverage On-The-Job Development},
pages = {286–312},
publisher = {John Wiley & Sons, Inc.},
series = {J-B SIOP Professional Practice Series (Book 1)},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Chaudhri, Vinay K.; Lane, H. Chad; Gunning, Dave; Roschelle, Jeremy
Intelligent Learning Technologies: Applications of Artificial Intelligence to Contemporary and Emerging Educational Challenges Journal Article
In: AI Magazine, vol. 34, no. 3, pp. 10–12, 2013.
@article{chaudhri_intelligent_2013,
title = {Intelligent Learning Technologies: Applications of Artificial Intelligence to Contemporary and Emerging Educational Challenges},
author = {Vinay K. Chaudhri and H. Chad Lane and Dave Gunning and Jeremy Roschelle},
url = {http://www.aaai.org/ojs/index.php/aimagazine/issue/view/203/showToc},
year = {2013},
date = {2013-12-01},
journal = {AI Magazine},
volume = {34},
number = {3},
pages = {10–12},
abstract = {This special issue of AI Magazine presents articles on some of the most interesting projects at the intersection of AI and Education. Included are articles on integrated systems such as virtual humans, an intellgent textbook a game-based learning environment as well as technology focused components such as student models and data mining. The issue concludes with an article summarizing the contemporary and emerging challenges at the intersection of AI and education.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Hays, Matthew Jensen; Core, Mark G.; Auerbach, Daniel
Learning intercultural communication skills with virtual humans: Feedback and fidelity. Journal Article
In: Journal of Educational Psychology, vol. 105, no. 4, pp. 1026–1035, 2013, ISSN: 1939-2176, 0022-0663.
@article{lane_learning_2013,
title = {Learning intercultural communication skills with virtual humans: Feedback and fidelity.},
author = {H. Chad Lane and Matthew Jensen Hays and Mark G. Core and Daniel Auerbach},
url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/a0031506},
doi = {10.1037/a0031506},
issn = {1939-2176, 0022-0663},
year = {2013},
date = {2013-11-01},
journal = {Journal of Educational Psychology},
volume = {105},
number = {4},
pages = {1026–1035},
abstract = {In the context of practicing intercultural communication skills, we investigated the role of fidelity in a game-based, virtual learning environment as well as the role of feedback delivered by an intelligent tutoring system. In 2 experiments, we compared variations on the game interface, use of the tutoring system, and the form of the feedback. Our findings suggest that for learning basic intercultural communicative skills, a 3-dimensional (3-D) interface with animation and sound produced equivalent learning to a more static 2-D interface. However, learners took significantly longer to analyze and respond to the actions of animated virtual humans, suggesting a deeper engagement. We found large gains in learning across conditions. There was no differential effect with the tutor engaged, but it was found to have a positive impact on learner success in a transfer task. This difference was most pronounced when the feedback was delivered in a more general form versus a concrete style.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lane, H. Chad; Cahill, Clara; Foutz, Susan; Auerbach, Daniel; Noren, Dan; Lussenhop, Catherine; Swartout, William
The Effects of a Pedagogical Agent for Informal Science Education on Learner Behaviors and Self-efficacy Proceedings Article
In: Artificial Intelligence in Education, pp. 309–318, Memphis, TN, 2013, ISBN: 978-3-642-39111-8.
@inproceedings{lane_effects_2013,
title = {The Effects of a Pedagogical Agent for Informal Science Education on Learner Behaviors and Self-efficacy},
author = {H. Chad Lane and Clara Cahill and Susan Foutz and Daniel Auerbach and Dan Noren and Catherine Lussenhop and William Swartout},
url = {http://ict.usc.edu/pubs/The%20Effects%20of%20a%20Pedagogical%20Agent%20for%20Informal%20Science%20Education%20on%20Learner%20Behaviors%20and%20Self-efficacy.pdf},
doi = {10.1007/978-3-642-39112-5_32},
isbn = {978-3-642-39111-8},
year = {2013},
date = {2013-07-01},
booktitle = {Artificial Intelligence in Education},
volume = {7926},
pages = {309–318},
address = {Memphis, TN},
series = {Lecture Notes on Computer Science},
abstract = {We describe Coach Mike, an animated pedagogical agent for informal computer science education, and report findings from two experiments that provide initial evidence for the efficacy of the system. In the first study, we found that Coach Mike’s presence led to 20% longer holding times, increased acceptance of programming challenges, and reduced misuse of the exhibit, but had limited cumulative impact on attitudes, awareness, and knowledge beyond what the host exhibit already achieved. In the second study, we compared two different versions of Coach Mike and found that the use of enthusiasm and selfregulatory feedback led to greater self-efficacy for programming.⬚},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Hays, Matthew Jensen; Lane, H. Chad; Auerbach, Daniel
Must Feedback Disrupt Presence in Serious Games? Proceedings Article
In: Workshop on Formative Feedback in Interactive Learning Environments at the International Conference on Artificial Intelligence in Education, Memphis, TN, 2013.
@inproceedings{hays_must_2013,
title = {Must Feedback Disrupt Presence in Serious Games?},
author = {Matthew Jensen Hays and H. Chad Lane and Daniel Auerbach},
url = {http://ict.usc.edu/pubs/Must%20Feedback%20Disrupt%20Presence%20in%20Serious%20Games.pdf},
year = {2013},
date = {2013-07-01},
booktitle = {Workshop on Formative Feedback in Interactive Learning Environments at the International Conference on Artificial Intelligence in Education},
address = {Memphis, TN},
abstract = {Serious games are generally designed with two goals in mind: promoting learning and creating compelling and engaging experiences (sometimes termed a sense of presence). Presence itself is believed to promote learning, but serious games often attempt to further increase pedagogical value. One way to do so is to use an intelligent tutoring system (ITS) to provide feedback during gameplay. Some researchers have expressed concern that, because feedback from an ITS is often extrinsic (i.e., it operates outside of the primary game mechanic), attending to it disrupts players’ sense of presence. As a result, learning may be unintentionally hindered by an ITS. However, the most beneficial conditions of instruction are often counterintuitive; in this paper, we challenge the assumption that feedback during learning hinders sense of presence. Across three experiments, we examined how an ITS that provided extrinsic feedback during a serious game affected presence. Across different modalities and conditions, we found that feedback and other ITS features do not always affect presence. Our results suggest that it is possible to provide extrinsic feedback in a serious game without detracting from the immersive power of the game itself.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Woolf, Beverly Park; Lane, H. Chad; Chaudhri, Vinay K.; Kolodner, Janet L.
AI Grand Challenges for Education Journal Article
In: AI magazine; Special issue on Intelligent Learning Technologies, 2013.
@article{woolf_ai_2013,
title = {AI Grand Challenges for Education},
author = {Beverly Park Woolf and H. Chad Lane and Vinay K. Chaudhri and Janet L. Kolodner},
url = {http://ict.usc.edu/pubs/AI%20Grand%20Challenges%20for%20Education.pdf},
doi = {10.1609/aimag.v34i4.2490},
year = {2013},
date = {2013-06-01},
journal = {AI magazine; Special issue on Intelligent Learning Technologies},
abstract = {This article focuses on contributions that AI can make to address long-term educational goals. It describes five challenges that would support: (1) mentors for every learner; (2) learning twenty-first century skills; (3) interaction data to support learning; (4) universal access to global classrooms; and (5) lifelong and life-wide learning. A vision and brief research agenda are described for each challenge along with goals that lead to access to global educational resources and the reuse and sharing of digital educational resources. Instructional systems with AI technology are described that currently support richer experiences for learners and supply researchers with new opportunities to analyze vast data sets of instructional behavior from big databases, containing elements of learning, affect, motivation, and social interaction. Personalized learning is described using computational tools that enhance student and group experience, reflection, and analysis, and supply data for development of novel theory development.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Hays, Matthew Jensen; Kornell, Nate; Bjork, Robert A.
When and Why a Failed Test Potentiates the Effectiveness of Subsequent Study Journal Article
In: Journal of Experimental Psychology: Learning, Memory, and Cognition, vol. 39, no. 1, pp. 290–296, 2013.
@article{hays_when_2013,
title = {When and Why a Failed Test Potentiates the Effectiveness of Subsequent Study},
author = {Matthew Jensen Hays and Nate Kornell and Robert A. Bjork},
url = {http://ict.usc.edu/pubs/When%20and%20Why%20a%20Failed%20Test%20Potentiates%20the%20Effectiveness%20of%20Subsequent%20Study.pdf},
year = {2013},
date = {2013-01-01},
journal = {Journal of Experimental Psychology: Learning, Memory, and Cognition},
volume = {39},
number = {1},
pages = {290–296},
abstract = {Teachers and trainers often try to prevent learners from making errors, but recent findings (e.g., Kornell, Hays, & Bjork, 2009) have demonstrated that tests can potentiate subsequent learning even when the correct answer is difficult or impossible to generate (e.g., “What is Nate Kornell’s middle name?”). In three experiments, we explored when and why a failed test enhances learning. We found that failed tests followed by immediate feedback produced greater retention than did a presentation-only condition. Failed tests followed by delayed feedback, by contrast, did not produce such a benefit—except when the direction of the final test was reversed (i.e., the participants were provided with the target and had to produce the original cue). Our findings suggest that generating an incorrect response to a cue both activates the semantic network associated with the cue and suppresses the correct response. These processes appear to have two consequences: If feedback is presented immediately, the semantic activation enhances the mapping of the cue to the correct response; if feedback is presented at a delay, the prior suppression boosts the learning of the suppressed response.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Filter
Sorry, no publications matched your criteria.