{ "id": "1705.08475", "version": "v1", "published": "2017-05-23T18:48:20.000Z", "updated": "2017-05-23T18:48:20.000Z", "title": "Formal Guarantees on the Robustness of a Classifier against Adversarial Manipulation", "authors": [ "Matthias Hein", "Maksym Andriushchenko" ], "categories": [ "cs.LG", "cs.AI", "cs.CV", "stat.ML" ], "abstract": "Recent work has shown that state-of-the-art classifiers are quite brittle, in the sense that a small adversarial change of an originally with high confidence correctly classified input leads to a wrong classification again with high confidence. This raises concerns that such classifiers are vulnerable to attacks and calls into question their usage in safety-critical systems. We show in this paper for the first time formal guarantees on the robustness of a classifier by giving instance-specific lower bounds on the norm of the input manipulation required to change the classifier decision. Based on this analysis we propose the Cross-Lipschitz regularization functional. We show that using this form of regularization in kernel methods resp. neural networks improves the robustness of the classifier without any loss in prediction performance.", "revisions": [ { "version": "v1", "updated": "2017-05-23T18:48:20.000Z" } ], "analyses": { "keywords": [ "classifier", "adversarial manipulation", "robustness", "first time formal guarantees", "high confidence correctly classified input" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }