{ "id": "2410.09615", "version": "v2", "published": "2024-10-12T18:36:07.000Z", "updated": "2025-02-04T01:30:52.000Z", "title": "SLiM: One-shot Quantization and Sparsity with Low-rank Approximation for LLM Weight Compression", "authors": [ "Mohammad Mozaffari", "Amir Yazdanbakhsh", "Maryam Mehri Dehnavi" ], "categories": [ "cs.LG", "cs.AI", "cs.PF" ], "abstract": "Conventional model compression techniques for LLMs address high memory consumption and slow inference challenges but typically require computationally expensive retraining to preserve accuracy. In contrast, one-shot compression methods eliminate retraining cost, but struggle to achieve accuracy comparable to dense models. This paper presents SLIM, a new one-shot compression framework that holistically integrates hardware-friendly quantization, sparsity, and low-rank approximation into a unified process. First, we formulate the quantization process using a probabilistic approach (SLIM-Quant) that enables us to apply uniform quantization. Then, we use an existing one-shot pruning method to apply semi-structured sparsity on top of the quantized weights. Finally, to compensate for the introduced aggregated quantization and sparsity error, we use a novel saliency function with unique invertible and additive features that enables us to mathematically compute the value of low-rank adapters. SLIM improves model accuracy by up to 5.66% (LLaMA-2-7B) for 2:4 sparsity with 4-bit weight quantization, outperforming prior methods. Models compressed with SLIM achieve up to 3.78x and 3.75x layer-wise speedup on Nvidia RTX3060 and A100 GPUs, respectively. We also propose an optional PEFT recipe that further improves accuracy by up to 1.66% (LLaMA-2-13B) compared to SLIM without fine-tuning", "revisions": [ { "version": "v2", "updated": "2025-02-04T01:30:52.000Z" } ], "analyses": { "keywords": [ "llm weight compression", "low-rank approximation", "one-shot quantization", "methods eliminate retraining cost", "compression methods eliminate retraining" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }