{ "id": "2103.14987", "version": "v1", "published": "2021-03-27T20:27:47.000Z", "updated": "2021-03-27T20:27:47.000Z", "title": "Quadratic Convergence of Newton's Method for 0/1 Loss Optimization", "authors": [ "Shenglong Zhou", "Lili Pan", "Naihua Xiu", "Houduo Qi" ], "categories": [ "math.OC" ], "abstract": "It has been widely recognized that the 0/1 loss function is one of the most natural choices for modelling classification errors, and it has a wide range of applications including support vector machines and 1-bit compressed sensing. Due to the combinatorial nature of the 0/1 loss function, methods based on convex relaxations or smoothing approximations have dominated the existing research and are often able to provide approximate solutions of good quality. However, those methods are not optimizing the 0/1 loss function directly and hence no optimality has been established for the original problem. This paper aims to study the optimality conditions of the 0/1 function minimization, and for the first time to develop Newton's method that directly optimizes the 0/1 function with a local quadratic convergence under reasonable conditions. Extensive numerical experiments demonstrate its superior performance as one would expect from Newton-type methods.ions. Extensive numerical experiments demonstrate its superior performance as one would expect from Newton-type methods.", "revisions": [ { "version": "v1", "updated": "2021-03-27T20:27:47.000Z" } ], "analyses": { "keywords": [ "newtons method", "loss optimization", "loss function", "extensive numerical experiments demonstrate", "superior performance" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }