{ "id": "1901.02104", "version": "v1", "published": "2019-01-07T23:33:14.000Z", "updated": "2019-01-07T23:33:14.000Z", "title": "On the effect of the activation function on the distribution of hidden nodes in a deep network", "authors": [ "Philip M. Long", "Hanie Sedghi" ], "categories": [ "cs.LG", "cs.AI", "cs.NE", "math.ST", "stat.ML", "stat.TH" ], "abstract": "We analyze the joint probability distribution on the lengths of the vectors of hidden variables in different layers of a fully connected deep network, when the weights and biases are chosen randomly according to Gaussian distributions, and the input is in $\\{ -1, 1\\}^N$. We show that, if the activation function $\\phi$ satisfies a minimal set of assumptions, satisfied by all activation functions that we know that are used in practice, then, as the width of the network gets large, the `length process' converges in probability to a length map that is determined as a simple function of the variances of the random weights and biases, and the activation function $\\phi$. We also show that this convergence may fail for $\\phi$ that violate our assumptions.", "revisions": [ { "version": "v1", "updated": "2019-01-07T23:33:14.000Z" } ], "analyses": { "keywords": [ "activation function", "hidden nodes", "joint probability distribution", "length map", "hidden variables" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }