{ "id": "1805.09077", "version": "v1", "published": "2018-05-23T12:04:30.000Z", "updated": "2018-05-23T12:04:30.000Z", "title": "Accelerating the Fast Gradient Method", "authors": [ "Ross Drummond", "Stephen Duncan" ], "comment": "8 pages. 6 figures", "categories": [ "math.OC" ], "abstract": "A set of accelerated first order optimisation algorithms for minimising strictly convex functions are proposed. Using only gradient information, these algorithms can achieve a worst-case convergence rate proportional to $ 1-\\left(\\frac{\\mu}{L}\\right)^{1/N}$ for $N = 1, 2, 3, \\dots$ and can exceed that of the fast gradient method. However, the improvement in the rate of convergence comes at the expense of a loss of robustness. Robustness is recovered by using an adaptive restarting controller that guarantees iterate convergences and recovers the faster convergence rates.", "revisions": [ { "version": "v1", "updated": "2018-05-23T12:04:30.000Z" } ], "analyses": { "keywords": [ "fast gradient method", "accelerated first order optimisation algorithms", "worst-case convergence rate proportional", "guarantees iterate convergences", "faster convergence rates" ], "note": { "typesetting": "TeX", "pages": 8, "language": "en", "license": "arXiv", "status": "editable" } } }