{ "id": "2101.02689", "version": "v1", "published": "2021-01-07T18:51:05.000Z", "updated": "2021-01-07T18:51:05.000Z", "title": "The Effect of Prior Lipschitz Continuity on the Adversarial Robustness of Bayesian Neural Networks", "authors": [ "Arno Blaas", "Stephen J. Roberts" ], "comment": "4 pages, 2 tables, AAAI 2021 Workshop Towards Robust, Secure and Efficient Machine Learning", "categories": [ "stat.ML", "cs.LG" ], "abstract": "It is desirable, and often a necessity, for machine learning models to be robust against adversarial attacks. This is particularly true for Bayesian models, as they are well-suited for safety-critical applications, in which adversarial attacks can have catastrophic outcomes. In this work, we take a deeper look at the adversarial robustness of Bayesian Neural Networks (BNNs). In particular, we consider whether the adversarial robustness of a BNN can be increased by model choices, particularly the Lipschitz continuity induced by the prior. Conducting in-depth analysis on the case of i.i.d., zero-mean Gaussian priors and posteriors approximated via mean-field variational inference, we find evidence that adversarial robustness is indeed sensitive to the prior variance.", "revisions": [ { "version": "v1", "updated": "2021-01-07T18:51:05.000Z" } ], "analyses": { "keywords": [ "bayesian neural networks", "adversarial robustness", "prior lipschitz continuity", "adversarial attacks", "zero-mean gaussian priors" ], "note": { "typesetting": "TeX", "pages": 4, "language": "en", "license": "arXiv", "status": "editable" } } }