{ "id": "2502.01651", "version": "v1", "published": "2025-01-30T19:36:33.000Z", "updated": "2025-01-30T19:36:33.000Z", "title": "Fine-tuning LLaMA 2 interference: a comparative study of language implementations for optimal efficiency", "authors": [ "Sazzad Hossain", "Touhidul Alam Seyam", "Avijit Chowdhury", "Munis Xamidov", "Rajib Ghose", "Abhijit Pathak" ], "comment": "11 pages, conference paper. International conference on Artificial Intelligence and Future Civilization", "categories": [ "cs.LG", "cs.AI" ], "abstract": "This paper presents a comparative study aimed at optimizing Llama2 inference, a critical aspect of machine learning and natural language processing (NLP). We evaluate various programming languages and frameworks, including TensorFlow, PyTorch, Python, Mojo, C++, and Java, analyzing their performance in terms of speed, memory consumption, and ease of implementation through extensive benchmarking. Strengths and limitations of each approach are highlighted, along with proposed optimization strategies for parallel processing and hardware utilization. Furthermore, we investigate the Mojo SDK, a novel framework designed for large language model (LLM) inference on Apple Silicon, benchmarking its performance against implementations in C, C++, Rust, Zig, Go, and Julia. Our experiments, conducted on an Apple M1 Max, demonstrate Mojo SDK's competitive performance, ease of use, and seamless Python compatibility, positioning it as a strong alternative for LLM inference on Apple Silicon. We also discuss broader implications for LLM deployment on resource-constrained hardware and identify potential directions for future research.", "revisions": [ { "version": "v1", "updated": "2025-01-30T19:36:33.000Z" } ], "analyses": { "keywords": [ "comparative study", "language implementations", "optimal efficiency", "fine-tuning llama", "demonstrate mojo sdks competitive performance" ], "tags": [ "conference paper" ], "note": { "typesetting": "TeX", "pages": 11, "language": "en", "license": "arXiv", "status": "editable" } } }