@article{Yuan_Recker_2015, title={Not All Rubrics Are Equal: A Review of Rubrics for Evaluating the Quality of Open Educational Resources}, volume={16}, url={https://www.irrodl.org/index.php/irrodl/article/view/2389}, DOI={10.19173/irrodl.v16i5.2389}, abstractNote={<p>The rapid growth in Internet technologies has led to a proliferation in the number of Open Educational Resources (OER), making the evaluation of OER quality a pressing need. In response, a number of rubrics have been developed to help guide the evaluation of OER quality; these, however, have had little accompanying evaluation of their utility or usability. This article presents a systematic review of 14 existing quality rubrics developed for OER evaluation. These quality rubrics are described and compared in terms of content, development processes, and application contexts, as well as, the kind of support they provide for users. Results from this research reveal a great diversity between these rubrics, providing users with a wide variety of options. Moreover, the widespread lack of rating scales, scoring guides, empirical testing, and iterative revisions for many of these rubrics raises reliability and validity concerns. Finally, rubrics implement varying amounts of user support, affecting their overall usability and educational utility.</p>}, number={5}, journal={The International Review of Research in Open and Distributed Learning}, author={Yuan, Min and Recker, Mimi}, year={2015}, month={Sep.} }