{ "id": "1806.00468", "version": "v1", "published": "2018-06-01T17:58:58.000Z", "updated": "2018-06-01T17:58:58.000Z", "title": "Implicit Bias of Gradient Descent on Linear Convolutional Networks", "authors": [ "Suriya Gunasekar", "Jason Lee", "Daniel Soudry", "Nathan Srebro" ], "categories": [ "cs.LG", "stat.ML" ], "abstract": "We show that gradient descent on full-width linear convolutional networks of depth $L$ converges to a linear predictor related to the $\\ell_{2/L}$ bridge penalty in the frequency domain. This is in contrast to linearly fully connected networks, where gradient descent converges to the hard margin linear support vector machine solution, regardless of depth.", "revisions": [ { "version": "v1", "updated": "2018-06-01T17:58:58.000Z" } ], "analyses": { "keywords": [ "gradient descent", "implicit bias", "linear support vector machine solution", "margin linear support vector machine", "hard margin linear support vector" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }