{ "id": "1706.03301", "version": "v1", "published": "2017-06-11T03:07:42.000Z", "updated": "2017-06-11T03:07:42.000Z", "title": "Neural networks and rational functions", "authors": [ "Matus Telgarsky" ], "comment": "To appear, ICML 2017", "categories": [ "cs.LG", "cs.NE", "stat.ML" ], "abstract": "Neural networks and rational functions efficiently approximate each other. In more detail, it is shown here that for any ReLU network, there exists a rational function of degree $O(\\text{polylog}(1/\\epsilon))$ which is $\\epsilon$-close, and similarly for any rational function there exists a ReLU network of size $O(\\text{polylog}(1/\\epsilon))$ which is $\\epsilon$-close. By contrast, polynomials need degree $\\Omega(\\text{poly}(1/\\epsilon))$ to approximate even a single ReLU. When converting a ReLU network to a rational function as above, the hidden constants depend exponentially on the number of layers, which is shown to be tight; in other words, a compositional representation can be beneficial even for rational functions.", "revisions": [ { "version": "v1", "updated": "2017-06-11T03:07:42.000Z" } ], "analyses": { "keywords": [ "neural networks", "relu network", "rational functions efficiently approximate", "polynomials need degree", "single relu" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }