{ "id": "1905.11213", "version": "v1", "published": "2019-05-27T13:49:08.000Z", "updated": "2019-05-27T13:49:08.000Z", "title": "Provable robustness against all adversarial $l_p$-perturbations for $p\\geq 1$", "authors": [ "Francesco Croce", "Matthias Hein" ], "categories": [ "cs.LG", "cs.CR", "stat.ML" ], "abstract": "In recent years several adversarial attacks and defenses have been proposed. Often seemingly robust models turn out to be non-robust when more sophisticated attacks are used. One way out of this dilemma are provable robustness guarantees. While provably robust models for specific $l_p$-perturbation models have been developed, they are still vulnerable to other $l_q$-perturbations. We propose a new regularization scheme, MMR-Universal, for ReLU networks which enforces robustness wrt $l_1$- and $l_\\infty$-perturbations and show how that leads to provably robust models wrt any $l_p$-norm for $p\\geq 1$.", "revisions": [ { "version": "v1", "updated": "2019-05-27T13:49:08.000Z" } ], "analyses": { "keywords": [ "adversarial", "provably robust models wrt", "enforces robustness wrt", "seemingly robust models turn", "perturbation models" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }