{ "id": "2203.03351", "version": "v1", "published": "2022-03-07T13:00:03.000Z", "updated": "2022-03-07T13:00:03.000Z", "title": "OFFO minimization algorithms for second-order optimality and their complexity", "authors": [ "S. Gratton", "Ph. L. Toint" ], "categories": [ "math.OC" ], "abstract": "An Adagrad-inspired class of algorithms for smooth unconstrained optimization is presented in which the objective function is never evaluated and yet the gradient norms decrease at least as fast as $\\calO(1/\\sqrt{k+1})$ while second-order optimality measures converge to zero at least as fast as $\\calO(1/(k+1)^{1/3})$. This latter rate of convergence is shown to be essentially sharp and is identical to that known for more standard algorithms (like trust-region or adaptive-regularization methods) using both function and derivatives' evaluations. A related \"divergent stepsize\" method is also described, whose essentially sharp rate of convergence is slighly inferior. It is finally discussed how to obtain weaker second-order optimality guarantees at a (much) reduced computional cost.", "revisions": [ { "version": "v1", "updated": "2022-03-07T13:00:03.000Z" } ], "analyses": { "subjects": [ "90C60", "90C30", "90C26", "90C15", "49N30", "F.2.1", "G.1.6" ], "keywords": [ "offo minimization algorithms", "complexity", "weaker second-order optimality guarantees", "second-order optimality measures converge", "gradient norms decrease" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }