{ "id": "1905.06455", "version": "v1", "published": "2019-05-15T22:07:19.000Z", "updated": "2019-05-15T22:07:19.000Z", "title": "On Norm-Agnostic Robustness of Adversarial Training", "authors": [ "Bai Li", "Changyou Chen", "Wenlin Wang", "Lawrence Carin" ], "comment": "4 pages, 2 figures, presented at the ICML 2019 Workshop on Uncertainty and Robustness in Deep Learning. arXiv admin note: text overlap with arXiv:1809.03113", "categories": [ "cs.LG", "cs.CR", "stat.ML" ], "abstract": "Adversarial examples are carefully perturbed in-puts for fooling machine learning models. A well-acknowledged defense method against such examples is adversarial training, where adversarial examples are injected into training data to increase robustness. In this paper, we propose a new attack to unveil an undesired property of the state-of-the-art adversarial training, that is it fails to obtain robustness against perturbations in $\\ell_2$ and $\\ell_\\infty$ norms simultaneously. We discuss a possible solution to this issue and its limitations as well.", "revisions": [ { "version": "v1", "updated": "2019-05-15T22:07:19.000Z" } ], "analyses": { "keywords": [ "adversarial training", "norm-agnostic robustness", "adversarial examples", "state-of-the-art adversarial", "increase robustness" ], "note": { "typesetting": "TeX", "pages": 4, "language": "en", "license": "arXiv", "status": "editable" } } }