{ "id": "1511.03243", "version": "v1", "published": "2015-11-10T20:02:48.000Z", "updated": "2015-11-10T20:02:48.000Z", "title": "Black-box $α$-divergence Minimization", "authors": [ "José Miguel Hernández-Lobato", "Yingzhen Li", "Daniel Hernández-Lobato", "Thang Bui", "Richard E. Turner" ], "comment": "To be presented at NIPS workshops on Advances in Approximate Bayesian Inference and Black Box Learning and Inference", "categories": [ "stat.ML" ], "abstract": "We present black-box alpha (BB-$\\alpha$), an approximate inference method based on the minimization of $\\alpha$-divergences between probability distributions. BB-$\\alpha$ scales to large datasets since it can be implemented using stochastic gradient descent. BB-$\\alpha$ can be applied to complex probabilistic models with little effort since it only requires as input the likelihood function and its gradients. These gradients can be easily obtained using automatic differentiation. By tuning the parameter $\\alpha$, we are able to interpolate between variational Bayes and an expectation propagation like algorithm. Experiments on probit and neural network regression problems illustrate the accuracy of the posterior approximations obtained with BB-$\\alpha$.", "revisions": [ { "version": "v1", "updated": "2015-11-10T20:02:48.000Z" } ], "analyses": { "keywords": [ "divergence minimization", "neural network regression problems illustrate", "complex probabilistic models", "stochastic gradient descent", "approximate inference method" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable", "adsabs": "2015arXiv151103243H" } } }