{ "id": "1901.08121", "version": "v1", "published": "2019-01-23T20:31:54.000Z", "updated": "2019-01-23T20:31:54.000Z", "title": "Sitatapatra: Blocking the Transfer of Adversarial Samples", "authors": [ "Ilia Shumailov", "Xitong Gao", "Yiren Zhao", "Robert Mullins", "Ross Anderson", "Cheng-Zhong Xu" ], "categories": [ "cs.LG", "cs.CR", "stat.ML" ], "abstract": "Convolutional Neural Networks (CNNs) are widely used to solve classification tasks in computer vision. However, they can be tricked into misclassifying specially crafted `adversarial' samples -- and samples built to trick one model often work alarmingly well against other models trained on the same task. In this paper we introduce Sitatapatra, a system designed to block the transfer of adversarial samples. It diversifies neural networks using a key, as in cryptography, and provides a mechanism for detecting attacks. What's more, when adversarial samples are detected they can typically be traced back to the individual device that was used to develop them. The run-time overheads are minimal permitting the use of Sitatapatra on constrained systems.", "revisions": [ { "version": "v1", "updated": "2019-01-23T20:31:54.000Z" } ], "analyses": { "keywords": [ "adversarial samples", "sitatapatra", "convolutional neural networks", "diversifies neural networks", "classification tasks" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }