{ "id": "1406.6812", "version": "v1", "published": "2014-06-26T08:57:05.000Z", "updated": "2014-06-26T08:57:05.000Z", "title": "Online learning in MDPs with side information", "authors": [ "Yasin Abbasi-Yadkori", "Gergely Neu" ], "categories": [ "cs.LG", "stat.ML" ], "abstract": "We study online learning of finite Markov decision process (MDP) problems when a side information vector is available. The problem is motivated by applications such as clinical trials, recommendation systems, etc. Such applications have an episodic structure, where each episode corresponds to a patient/customer. Our objective is to compete with the optimal dynamic policy that can take side information into account. We propose a computationally efficient algorithm and show that its regret is at most $O(\\sqrt{T})$, where $T$ is the number of rounds. To best of our knowledge, this is the first regret bound for this setting.", "revisions": [ { "version": "v1", "updated": "2014-06-26T08:57:05.000Z" } ], "analyses": { "keywords": [ "online learning", "finite markov decision process", "first regret bound", "side information vector", "optimal dynamic policy" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable", "adsabs": "2014arXiv1406.6812A" } } }