{ "id": "1802.04865", "version": "v1", "published": "2018-02-13T21:31:36.000Z", "updated": "2018-02-13T21:31:36.000Z", "title": "Learning Confidence for Out-of-Distribution Detection in Neural Networks", "authors": [ "Terrance DeVries", "Graham W. Taylor" ], "categories": [ "stat.ML", "cs.LG" ], "abstract": "Modern neural networks are very powerful predictive models, but they are often incapable of recognizing when their predictions may be wrong. Closely related to this is the task of out-of-distribution detection, where a network must determine whether or not an input is outside of the set on which it is expected to safely perform. To jointly address these issues, we propose a method of learning confidence estimates for neural networks that is simple to implement and produces intuitively interpretable outputs. We demonstrate that on the task of out-of-distribution detection, our technique surpasses recently proposed techniques which construct confidence based on the network's output distribution, without requiring any additional labels or access to out-of-distribution examples. Additionally, we address the problem of calibrating out-of-distribution detectors, where we demonstrate that misclassified in-distribution examples can be used as a proxy for out-of-distribution examples.", "revisions": [ { "version": "v1", "updated": "2018-02-13T21:31:36.000Z" } ], "analyses": { "keywords": [ "out-of-distribution detection", "learning confidence", "out-of-distribution examples", "networks output distribution", "confidence estimates" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }