{ "id": "1810.09619", "version": "v1", "published": "2018-10-23T01:05:41.000Z", "updated": "2018-10-23T01:05:41.000Z", "title": "Sparse DNNs with Improved Adversarial Robustness", "authors": [ "Yiwen Guo", "Chao Zhang", "Changshui Zhang", "Yurong Chen" ], "comment": "To appear in NIPS2018", "categories": [ "cs.LG", "cs.CR", "cs.CV", "cs.NE", "stat.ML" ], "abstract": "Deep neural networks (DNNs) are computationally/memory-intensive and vulnerable to adversarial attacks, making them prohibitive in some real-world applications. By converting dense models into sparse ones, pruning appears to be a promising solution to reducing the computation/memory cost. This paper studies classification models, especially DNN-based ones, to demonstrate that there exists intrinsic relationships between their sparsity and adversarial robustness. Our analyses reveal, both theoretically and empirically, that nonlinear DNN-based classifiers behave differently under $l_2$ attacks from some linear ones. We further demonstrate that an appropriately higher model sparsity implies better robustness of nonlinear DNNs, whereas over-sparsified models can be more difficult to resist adversarial examples.", "revisions": [ { "version": "v1", "updated": "2018-10-23T01:05:41.000Z" } ], "analyses": { "keywords": [ "adversarial robustness", "sparse dnns", "higher model sparsity implies better", "dnn-based classifiers behave", "model sparsity implies better robustness" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }