{ "id": "1910.06407", "version": "v1", "published": "2019-10-14T20:19:15.000Z", "updated": "2019-10-14T20:19:15.000Z", "title": "FireNet: Real-time Segmentation of Fire Perimeter from Aerial Video", "authors": [ "Jigar Doshi", "Dominic Garcia", "Cliff Massey", "Pablo Llueca", "Nicolas Borensztein", "Michael Baird", "Matthew Cook", "Devaki Raj" ], "comment": "Published at NeurIPS 2019; Workshop on Artificial Intelligence for Humanitarian Assistance and Disaster Response(AI+HADR 2019)", "categories": [ "cs.CV", "cs.LG", "eess.IV" ], "abstract": "In this paper, we share our approach to real-time segmentation of fire perimeter from aerial full-motion infrared video. We start by describing the problem from a humanitarian aid and disaster response perspective. Specifically, we explain the importance of the problem, how it is currently resolved, and how our machine learning approach improves it. To test our models we annotate a large-scale dataset of 400,000 frames with guidance from domain experts. Finally, we share our approach currently deployed in production with inference speed of 20 frames per second and an accuracy of 92 (F1 Score).", "revisions": [ { "version": "v1", "updated": "2019-10-14T20:19:15.000Z" } ], "analyses": { "keywords": [ "real-time segmentation", "aerial video", "aerial full-motion infrared video", "humanitarian aid", "machine learning approach" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }