{ "id": "1702.05122", "version": "v1", "published": "2017-02-16T19:27:53.000Z", "updated": "2017-02-16T19:27:53.000Z", "title": "Exact Diffusion for Distributed Optimization and Learning --- Part I: Algorithm Development", "authors": [ "Kun Yuan", "Bicheng Ying", "Xiaochuan Zhao", "Ali H. Sayed" ], "comment": "13 pages; 9 figures; Submitted for publication", "categories": [ "math.OC" ], "abstract": "This work develops a distributed optimization strategy with guaranteed exact convergence for a broad class of left-stochastic combination policies. The resulting exact diffusion strategy is shown in Part II to have a wider stability range and superior convergence performance than the EXTRA strategy. The exact diffusion solution is applicable to non-symmetric left-stochastic combination matrices, while most earlier developments on exact consensus implementations are limited to doubly-stochastic matrices; these latter matrices impose stringent constraints on the network topology. Similar difficulties arise for implementations with right-stochastic policies, which are common in push-sum consensus solutions. The derivation of the exact diffusion strategy in this work relies on reformulating the aggregate optimization problem as a penalized problem and resorting to a diagonally-weighted incremental construction. Detailed stability and convergence analyses are pursued in Part II and are facilitated by examining the evolution of the error dynamics in a transformed domain. Numerical simulations illustrate the theoretical conclusions.", "revisions": [ { "version": "v1", "updated": "2017-02-16T19:27:53.000Z" } ], "analyses": { "keywords": [ "distributed optimization", "algorithm development", "matrices impose stringent constraints", "non-symmetric left-stochastic combination matrices", "left-stochastic combination policies" ], "note": { "typesetting": "TeX", "pages": 13, "language": "en", "license": "arXiv", "status": "editable" } } }