{ "id": "1912.03133", "version": "v1", "published": "2019-12-05T04:24:14.000Z", "updated": "2019-12-05T04:24:14.000Z", "title": "Why Should we Combine Training and Post-Training Methods for Out-of-Distribution Detection?", "authors": [ "Aristotelis-Angelos Papadopoulos", "Nazim Shaikh", "Mohammad Reza Rajati" ], "comment": "Preprint, 9 pages. arXiv admin note: text overlap with arXiv:1906.03509", "categories": [ "cs.LG", "cs.CV", "stat.ML" ], "abstract": "Deep neural networks are known to achieve superior results in classification tasks. However, it has been recently shown that they are incapable to detect examples that are generated by a distribution which is different than the one they have been trained on since they are making overconfident prediction for Out-Of-Distribution (OOD) examples. OOD detection has attracted a lot of attention recently. In this paper, we review some of the most seminal recent algorithms in the OOD detection field, we divide those methods into training and post-training and we experimentally show how the combination of the former with the latter can achieve state-of-the-art results in the OOD detection task.", "revisions": [ { "version": "v1", "updated": "2019-12-05T04:24:14.000Z" } ], "analyses": { "keywords": [ "out-of-distribution detection", "post-training methods", "achieve superior results", "achieve state-of-the-art results", "ood detection field" ], "note": { "typesetting": "TeX", "pages": 9, "language": "en", "license": "arXiv", "status": "editable" } } }