{ "id": "2207.08200", "version": "v1", "published": "2022-07-17T15:10:25.000Z", "updated": "2022-07-17T15:10:25.000Z", "title": "Uncertainty Calibration in Bayesian Neural Networks via Distance-Aware Priors", "authors": [ "Gianluca Detommaso", "Alberto Gasparin", "Andrew Wilson", "Cedric Archambeau" ], "categories": [ "stat.ML", "cs.AI", "cs.LG" ], "abstract": "As we move away from the data, the predictive uncertainty should increase, since a great variety of explanations are consistent with the little available information. We introduce Distance-Aware Prior (DAP) calibration, a method to correct overconfidence of Bayesian deep learning models outside of the training domain. We define DAPs as prior distributions over the model parameters that depend on the inputs through a measure of their distance from the training set. DAP calibration is agnostic to the posterior inference method, and it can be performed as a post-processing step. We demonstrate its effectiveness against several baselines in a variety of classification and regression problems, including benchmarks designed to test the quality of predictive distributions away from the data.", "revisions": [ { "version": "v1", "updated": "2022-07-17T15:10:25.000Z" } ], "analyses": { "keywords": [ "bayesian neural networks", "distance-aware prior", "uncertainty calibration", "bayesian deep learning models outside", "posterior inference method" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }