{ "id": "2102.08078", "version": "v1", "published": "2021-02-16T10:59:28.000Z", "updated": "2021-02-16T10:59:28.000Z", "title": "Restore from Restored: Single-image Inpainting", "authors": [ "Eun Hye Lee", "Jeong Mu Kim", "Ji Su Kim", "Tae Hyun Kim" ], "categories": [ "cs.CV" ], "abstract": "Recent image inpainting methods show promising results due to the power of deep learning, which can explore external information available from a large training dataset. However, many state-of-the-art inpainting networks are still limited in exploiting internal information available in the given input image at test time. To mitigate this problem, we present a novel and efficient self-supervised fine-tuning algorithm that can adapt the parameters of fully pretrained inpainting networks without using ground-truth clean image in this work. We upgrade the parameters of the pretrained networks by utilizing existing self-similar patches within the given input image without changing network architectures. Qualitative and quantitative experimental results demonstrate the superiority of the proposed algorithm and we achieve state-of-the-art inpainting results on publicly available numerous benchmark datasets.", "revisions": [ { "version": "v1", "updated": "2021-02-16T10:59:28.000Z" } ], "analyses": { "keywords": [ "single-image inpainting", "input image", "quantitative experimental results demonstrate", "achieve state-of-the-art inpainting results", "ground-truth clean image" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }