{ "id": "1604.04428", "version": "v1", "published": "2016-04-15T11:07:45.000Z", "updated": "2016-04-15T11:07:45.000Z", "title": "The Artificial Mind's Eye: Resisting Adversarials for Convolutional Neural Networks using Internal Projection", "authors": [ "Harm Berntsen", "Wouter Kuijper", "Tom Heskes" ], "comment": "Under review as a conference paper at ECML PKDD 2016", "categories": [ "cs.LG", "cs.NE" ], "abstract": "We introduce a novel type of artificial neural network structure and training procedure that results in networks that are provably, quantitatively more robust to adversarial samples than classical, end-to-end trained classifiers. The main idea of our approach is to force the network to make predictions on what the given instance of the class under consideration would look like and subsequently test those predictions. By forcing the network to redraw the relevant parts of the image and subsequently comparing this new image to the original, we are having the network give a 'proof' of the presence of the object.", "revisions": [ { "version": "v1", "updated": "2016-04-15T11:07:45.000Z" } ], "analyses": { "keywords": [ "convolutional neural networks", "artificial minds eye", "internal projection", "resisting adversarials", "artificial neural network structure" ], "tags": [ "conference paper" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable", "adsabs": "2016arXiv160404428B" } } }