@inproceedings{cf3a45924531414e8cdc589aa272bd62,
title = "Free response evaluation via neural network for an IMathAS system",
abstract = "A fully interactive class with mixed reality and simulation learning should provide many free response types for students to learn beyond numerical answers and multiple choice. Essay and string responses in the IMathAS homework system have to be manually graded, making the free response questions difficult to generate instant feedback. The ability to write questions with automatic feedback during active lecture offer improvements to the current systems and provide an opportunity for critical thinking to occur. The following study provides framework for an interpretive neural network to be implemented into any IMathAS system. These responses can be in the form of equations, words and sentences, or pictures. Findings show that correctly trained networks using manually graded artifacts can be more than 90% accurate in providing feedback to a correct answer in student practice, allowing for lessons that guide students towards correct and well-phrased answers using their own words, and can even assign partial credit. The findings imply that Marzano's taxonomy level of analysis can be reached using the IMathAS system and that critical thinking methods can be directly applied for scoring. When integrated into the existing system, simulation-based or mixed reality homework can have free responses and the grades can be transferred via learning tool interoperability connection into the institutional learning management system for direct scoring in the gradebook.",
keywords = "critical thinking, mixed reality education application, neural networks, undergraduate education",
author = "Nathanial Wiggins and Milton Smith",
note = "Funding Information: The Critical Thinking Assessment Test (CAT) developed by the National Science Foundation research under the leadership of Tennessee Tech University has been validated and used nationally [6][7][8]. The test itself is comprised of open-ended responses that can be objectively scored. For the traditional scoring method, each exam was scored and cross-validated by groups of faculty. This time-consuming method ensures consistent grading is utilized across exams so that statistical comparisons are valid. To make the test more realistic for deployment, the CAT has been developed with a neural network for machine learning. The comparison with expert scoring to that of the machine learning yields an error of less than 1.39% and found the same minimum and maximum values [9]. While this tool helps to measure critical thinking, it does not provide feedback to the student regarding critical thinking and the methods used cannot be practiced without free response feedback . Funding Information: Special thanks to San Jacinto College, Texas Tech University and Wolfram Mathematica for continued support. Publisher Copyright: {\textcopyright} 2019 IEEE.; null ; Conference date: 19-09-2019 Through 21-09-2019",
year = "2019",
month = sep,
doi = "10.1109/ISMCR47492.2019.8955695",
language = "English",
series = "2019 22nd IEEE International Symposium on Measurement and Control in Robotics: Robotics for the Benefit of Humanity, ISMCR 2019",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
editor = "Harman, {Thomas L.} and Zafar Taqvi",
booktitle = "2019 22nd IEEE International Symposium on Measurement and Control in Robotics",
}