{ "id": "2308.08614", "version": "v1", "published": "2023-08-16T18:13:27.000Z", "updated": "2023-08-16T18:13:27.000Z", "title": "Boosting Logical Reasoning in Large Language Models through a New Framework: The Graph of Thought", "authors": [ "Bin Lei", "pei-Hung Lin", "Chunhua Liao", "Caiwen Ding" ], "categories": [ "cs.LG", "cs.AI", "cs.CL" ], "abstract": "Recent advancements in large-scale models, such as GPT-4, have showcased remarkable capabilities in addressing standard queries. However, when facing complex problems that require multi-step logical reasoning, their accuracy dramatically decreases. Current research has explored the realm of \\textit{prompting engineering} to bolster the inferential capacities of these models. Our paper unveils a pioneering prompting technique, dubbed \\textit{Graph of Thoughts (GoT)}. Through testing on a trio of escalating challenges: the 24-point game, resolution of high-degree polynomial equations, and derivation of formulas for recursive sequences, our method outperformed GPT-4, achieving accuracy improvements of $89.7\\%$, $86\\%$, and $56\\%$ for each respective task. Moreover, when juxtaposed with the state-of-the-art (SOTA) prompting method, \\textit{Tree of Thought (ToT)}, our approach registered an average accuracy boost of $23\\%$, $24\\%$, and $15\\%$.", "revisions": [ { "version": "v1", "updated": "2023-08-16T18:13:27.000Z" } ], "analyses": { "keywords": [ "large language models", "boosting logical reasoning", "average accuracy boost", "high-degree polynomial equations", "addressing standard queries" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }