{ "id": "1907.02893", "version": "v1", "published": "2019-07-05T15:26:26.000Z", "updated": "2019-07-05T15:26:26.000Z", "title": "Invariant Risk Minimization", "authors": [ "Martin Arjovsky", "Léon Bottou", "Ishaan Gulrajani", "David Lopez-Paz" ], "categories": [ "stat.ML", "cs.AI", "cs.LG" ], "abstract": "We introduce Invariant Risk Minimization (IRM), a learning paradigm to estimate invariant correlations across multiple training distributions. To achieve this goal, IRM learns a data representation such that the optimal classifier, on top of that data representation, matches for all training distributions. Through theory and experiments, we show how the invariances learned by IRM relate to the causal structures governing the data and enable out-of-distribution generalization.", "revisions": [ { "version": "v1", "updated": "2019-07-05T15:26:26.000Z" } ], "analyses": { "keywords": [ "invariant risk minimization", "data representation", "estimate invariant correlations", "irm learns", "multiple training distributions" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }