{ "id": "2005.00130", "version": "v1", "published": "2020-04-30T22:34:37.000Z", "updated": "2020-04-30T22:34:37.000Z", "title": "Hide-and-Seek: A Template for Explainable AI", "authors": [ "Thanos Tagaris", "Andreas Stafylopatis" ], "comment": "24 pages, 14 figures. Submitted on a special issue for Explainable AI, on Elsevier's \"Artificial Intelligence\"", "categories": [ "cs.LG", "cs.AI", "stat.ML" ], "abstract": "Lack of transparency has been the Achilles heal of Neural Networks and their wider adoption in industry. Despite significant interest this shortcoming has not been adequately addressed. This study proposes a novel framework called Hide-and-Seek (HnS) for training Interpretable Neural Networks and establishes a theoretical foundation for exploring and comparing similar ideas. Extensive experimentation indicates that a high degree of interpretability can be imputed into Neural Networks, without sacrificing their predictive power.", "revisions": [ { "version": "v1", "updated": "2020-04-30T22:34:37.000Z" } ], "analyses": { "subjects": [ "62M45", "I.2.6" ], "keywords": [ "explainable ai", "hide-and-seek", "despite significant interest", "wider adoption", "achilles heal" ], "note": { "typesetting": "TeX", "pages": 24, "language": "en", "license": "arXiv", "status": "editable" } } }