{ "id": "1511.01304", "version": "v1", "published": "2015-11-04T12:34:10.000Z", "updated": "2015-11-04T12:34:10.000Z", "title": "Dictionary descent in optimization", "authors": [ "Vladimir Temlyakov" ], "comment": "arXiv admin note: text overlap with arXiv:1206.0392", "categories": [ "stat.ML", "math.NA" ], "abstract": "The problem of convex optimization is studied. Usually in convex optimization the minimization is over a d-dimensional domain. Very often the convergence rate of an optimization algorithm depends on the dimension d. The algorithms studied in this paper utilize dictionaries instead of a canonical basis used in the coordinate descent algorithms. We show how this approach allows us to reduce dimensionality of the problem. Also, we investigate which properties of a dictionary are beneficial for the convergence rate of typical greedy-type algorithms.", "revisions": [ { "version": "v1", "updated": "2015-11-04T12:34:10.000Z" } ], "analyses": { "keywords": [ "dictionary descent", "convex optimization", "convergence rate", "coordinate descent algorithms", "reduce dimensionality" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }