{ "id": "2001.06658", "version": "v1", "published": "2020-01-18T12:19:19.000Z", "updated": "2020-01-18T12:19:19.000Z", "title": "Text-to-Image Generation with Attention Based Recurrent Neural Networks", "authors": [ "Tehseen Zia", "Shahan Arif", "Shakeeb Murtaza", "Mirza Ahsan Ullah" ], "categories": [ "cs.CV" ], "abstract": "Conditional image modeling based on textual descriptions is a relatively new domain in unsupervised learning. Previous approaches use a latent variable model and generative adversarial networks. While the formers are approximated by using variational auto-encoders and rely on the intractable inference that can hamper their performance, the latter is unstable to train due to Nash equilibrium based objective function. We develop a tractable and stable caption-based image generation model. The model uses an attention-based encoder to learn word-to-pixel dependencies. A conditional autoregressive based decoder is used for learning pixel-to-pixel dependencies and generating images. Experimentations are performed on Microsoft COCO, and MNIST-with-captions datasets and performance is evaluated by using the Structural Similarity Index. Results show that the proposed model performs better than contemporary approaches and generate better quality images. Keywords: Generative image modeling, autoregressive image modeling, caption-based image generation, neural attention, recurrent neural networks.", "revisions": [ { "version": "v1", "updated": "2020-01-18T12:19:19.000Z" } ], "analyses": { "keywords": [ "recurrent neural networks", "text-to-image generation", "generate better quality images", "image modeling", "stable caption-based image generation model" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }