{ "id": "2211.13445", "version": "v1", "published": "2022-11-24T07:12:51.000Z", "updated": "2022-11-24T07:12:51.000Z", "title": "Delving into Out-of-Distribution Detection with Vision-Language Representations", "authors": [ "Yifei Ming", "Ziyang Cai", "Jiuxiang Gu", "Yiyou Sun", "Wei Li", "Yixuan Li" ], "comment": "36th Conference on Neural Information Processing Systems (NeurIPS 2022)", "categories": [ "cs.CV", "cs.AI", "cs.LG" ], "abstract": "Recognizing out-of-distribution (OOD) samples is critical for machine learning systems deployed in the open world. The vast majority of OOD detection methods are driven by a single modality (e.g., either vision or language), leaving the rich information in multi-modal representations untapped. Inspired by the recent success of vision-language pre-training, this paper enriches the landscape of OOD detection from a single-modal to a multi-modal regime. Particularly, we propose Maximum Concept Matching (MCM), a simple yet effective zero-shot OOD detection method based on aligning visual features with textual concepts. We contribute in-depth analysis and theoretical insights to understand the effectiveness of MCM. Extensive experiments demonstrate that MCM achieves superior performance on a wide variety of real-world tasks. MCM with vision-language features outperforms a common baseline with pure visual features on a hard OOD task with semantically similar classes by 13.1% (AUROC). Code is available at https://github.com/deeplearning-wisc/MCM.", "revisions": [ { "version": "v1", "updated": "2022-11-24T07:12:51.000Z" } ], "analyses": { "keywords": [ "out-of-distribution detection", "vision-language representations", "effective zero-shot ood detection method", "mcm achieves superior performance", "vision-language features outperforms" ], "tags": [ "conference paper" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }