{ "id": "2401.15121", "version": "v1", "published": "2024-01-26T05:59:40.000Z", "updated": "2024-01-26T05:59:40.000Z", "title": "Expressive Power of ReLU and Step Networks under Floating-Point Operations", "authors": [ "Yeachan Park", "Geonho Hwang", "Wonyeol Lee", "Sejun Park" ], "categories": [ "cs.LG", "cs.AI" ], "abstract": "The study of the expressive power of neural networks has investigated the fundamental limits of neural networks. Most existing results assume real-valued inputs and parameters as well as exact operations during the evaluation of neural networks. However, neural networks are typically executed on computers that can only represent a tiny subset of the reals and apply inexact operations. In this work, we analyze the expressive power of neural networks under a more realistic setup: when we use floating-point numbers and operations. Our first set of results assumes floating-point operations where the significand of a float is represented by finite bits but its exponent can take any integer value. Under this setup, we show that neural networks using a binary threshold unit or ReLU can memorize any finite input/output pairs and can approximate any continuous function within a small error. We also show similar results on memorization and universal approximation when floating-point operations use finite bits for both significand and exponent; these results are applicable to many popular floating-point formats such as those defined in the IEEE 754 standard (e.g., 32-bit single-precision format) and bfloat16.", "revisions": [ { "version": "v1", "updated": "2024-01-26T05:59:40.000Z" } ], "analyses": { "keywords": [ "neural networks", "expressive power", "step networks", "results assumes floating-point operations", "finite bits" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }