{ "id": "1805.07984", "version": "v1", "published": "2018-05-21T10:58:10.000Z", "updated": "2018-05-21T10:58:10.000Z", "title": "Adversarial Attacks on Classification Models for Graphs", "authors": [ "Daniel Zügner", "Amir Akbarnejad", "Stephan Günnemann" ], "comment": "Accepted as a full paper at KDD 2018 on May 6, 2018", "categories": [ "stat.ML", "cs.CR", "cs.LG" ], "abstract": "Deep learning models for graphs have achieved strong performance for the task of node classification. Despite their proliferation, currently there is no study of their robustness to adversarial attacks. Yet, in domains where they are likely to be used, e.g. the web, adversaries are common. Can deep learning models for graphs be easily fooled? In this work, we introduce the first study of adversarial attacks on attributed graphs, specifically focusing on models exploiting ideas of graph convolutions. We generate adversarial perturbations targeting the node's features and the graph structure, thus, taking the dependencies between instances in account. To cope with the underlying discrete domain we propose an efficient algorithm Nettack exploiting incremental computations. Our experimental study shows that accuracy of node classification significantly drops even when performing only few perturbations. Even more, our attacks are transferable: the learned attacks generalize to other state-of-the-art node classification models.", "revisions": [ { "version": "v1", "updated": "2018-05-21T10:58:10.000Z" } ], "analyses": { "keywords": [ "adversarial attacks", "deep learning models", "algorithm nettack exploiting incremental computations", "efficient algorithm nettack exploiting incremental", "state-of-the-art node classification models" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }