{ "id": "2401.00370", "version": "v1", "published": "2023-12-31T02:16:29.000Z", "updated": "2023-12-31T02:16:29.000Z", "title": "UGPNet: Universal Generative Prior for Image Restoration", "authors": [ "Hwayoon Lee", "Kyoungkook Kang", "Hyeongmin Lee", "Seung-Hwan Baek", "Sunghyun Cho" ], "comment": "Accepted to WACV 2024", "categories": [ "cs.CV", "eess.IV" ], "abstract": "Recent image restoration methods can be broadly categorized into two classes: (1) regression methods that recover the rough structure of the original image without synthesizing high-frequency details and (2) generative methods that synthesize perceptually-realistic high-frequency details even though the resulting image deviates from the original structure of the input. While both directions have been extensively studied in isolation, merging their benefits with a single framework has been rarely studied. In this paper, we propose UGPNet, a universal image restoration framework that can effectively achieve the benefits of both approaches by simply adopting a pair of an existing regression model and a generative model. UGPNet first restores the image structure of a degraded input using a regression model and synthesizes a perceptually-realistic image with a generative model on top of the regressed output. UGPNet then combines the regressed output and the synthesized output, resulting in a final result that faithfully reconstructs the structure of the original image in addition to perceptually-realistic textures. Our extensive experiments on deblurring, denoising, and super-resolution demonstrate that UGPNet can successfully exploit both regression and generative methods for high-fidelity image restoration.", "revisions": [ { "version": "v1", "updated": "2023-12-31T02:16:29.000Z" } ], "analyses": { "keywords": [ "universal generative prior", "original image", "regression model", "universal image restoration framework", "generative model" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }