{ "id": "2010.11029", "version": "v1", "published": "2020-10-21T14:20:05.000Z", "updated": "2020-10-21T14:20:05.000Z", "title": "Learning Curves for Analysis of Deep Networks", "authors": [ "Derek Hoiem", "Tanmay Gupta", "Zhizhong Li", "Michal M. Shlapentokh-Rothman" ], "categories": [ "cs.LG", "cs.CV", "stat.ML" ], "abstract": "A learning curve models a classifier's test error as a function of the number of training samples. Prior works show that learning curves can be used to select model parameters and extrapolate performance. We investigate how to use learning curves to analyze the impact of design choices, such as pre-training, architecture, and data augmentation. We propose a method to robustly estimate learning curves, abstract their parameters into error and data-reliance, and evaluate the effectiveness of different parameterizations. We also provide several interesting observations based on learning curves for a variety of image classification models.", "revisions": [ { "version": "v1", "updated": "2020-10-21T14:20:05.000Z" } ], "analyses": { "keywords": [ "deep networks", "image classification models", "classifiers test error", "select model parameters", "robustly estimate learning curves" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }