{ "id": "2210.09665", "version": "v1", "published": "2022-10-18T08:07:07.000Z", "updated": "2022-10-18T08:07:07.000Z", "title": "On convergence of a $q$-random coordinate constrained algorithm for non-convex problems", "authors": [ "Alireza Ghaffari-Hadigheh", "Lennart Sinjorgo", "Renata Sotirov" ], "comment": "18 pages, 3 tables", "categories": [ "math.OC" ], "abstract": "We propose a random coordinate descent algorithm for optimizing a non-convex objective function subject to one linear constraint and simple bounds on the variables. Although it is common use to update only two random coordinates simultaneously in each iteration of a coordinate descent algorithm, our algorithm allows updating arbitrary number of coordinates. We provide a proof of convergence of the algorithm. The convergence rate of the algorithm improves when we update more coordinates per iteration. Numerical experiments on large scale instances of different optimization problems show the benefit of updating many coordinates simultaneously.", "revisions": [ { "version": "v1", "updated": "2022-10-18T08:07:07.000Z" } ], "analyses": { "subjects": [ "90C06", "90C30", "90C26" ], "keywords": [ "random coordinate constrained algorithm", "non-convex problems", "convergence", "random coordinate descent algorithm", "non-convex objective function subject" ], "note": { "typesetting": "TeX", "pages": 18, "language": "en", "license": "arXiv", "status": "editable" } } }