Buckets:
| <meta charset="utf-8" /><meta name="hf:doc:metadata" content="{"title":"LoRA Without Regret","local":"lora-without-regret","sections":[{"title":"Benefits of LoRA over full fine-tuning","local":"benefits-of-lora-over-full-fine-tuning","sections":[],"depth":2},{"title":"Examples with TRL","local":"examples-with-trl","sections":[{"title":"Supervised Fine-Tuning (SFT)","local":"supervised-fine-tuning-sft","sections":[],"depth":3},{"title":"Reinforcement Learning (GRPO)","local":"reinforcement-learning-grpo","sections":[],"depth":3}],"depth":2},{"title":"Key findings in optimizing LoRA","local":"key-findings-in-optimizing-lora","sections":[{"title":"1. LoRA performs better when applied to all weight matrices","local":"1-lora-performs-better-when-applied-to-all-weight-matrices","sections":[],"depth":3},{"title":"2. The adapter needs sufficient capacity to learn from the dataset","local":"2-the-adapter-needs-sufficient-capacity-to-learn-from-the-dataset","sections":[],"depth":3},{"title":"3. “FullFT and high-rank LoRAs have similar learning curves”","local":"3-fullft-and-high-rank-loras-have-similar-learning-curves","sections":[],"depth":3},{"title":"4. “In some scenarios, LoRA is less tolerant of large batch sizes than full fine-tuning.”","local":"4-in-some-scenarios-lora-is-less-tolerant-of-large-batch-sizes-than-full-fine-tuning","sections":[],"depth":3}],"depth":2},{"title":"Takeaways","local":"takeaways","sections":[],"depth":2},{"title":"Citation","local":"citation","sections":[],"depth":2}],"depth":1}"> | |
| <link href="/docs/trl/pr_5607/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/scheduler.7b731bd4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/singletons.2cf51804.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.ac28c20f.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/paths.ba01f37d.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/preload-helper.e1689b3a.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.cc268345.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/0.cd288160.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/each.e59479a4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/31.fbe7277e.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.f0d99f98.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/CodeBlock.169a125f.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/HfOption.9f04abd1.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{"title":"LoRA Without Regret","local":"lora-without-regret","sections":[{"title":"Benefits of LoRA over full fine-tuning","local":"benefits-of-lora-over-full-fine-tuning","sections":[],"depth":2},{"title":"Examples with TRL","local":"examples-with-trl","sections":[{"title":"Supervised Fine-Tuning (SFT)","local":"supervised-fine-tuning-sft","sections":[],"depth":3},{"title":"Reinforcement Learning (GRPO)","local":"reinforcement-learning-grpo","sections":[],"depth":3}],"depth":2},{"title":"Key findings in optimizing LoRA","local":"key-findings-in-optimizing-lora","sections":[{"title":"1. LoRA performs better when applied to all weight matrices","local":"1-lora-performs-better-when-applied-to-all-weight-matrices","sections":[],"depth":3},{"title":"2. The adapter needs sufficient capacity to learn from the dataset","local":"2-the-adapter-needs-sufficient-capacity-to-learn-from-the-dataset","sections":[],"depth":3},{"title":"3. “FullFT and high-rank LoRAs have similar learning curves”","local":"3-fullft-and-high-rank-loras-have-similar-learning-curves","sections":[],"depth":3},{"title":"4. “In some scenarios, LoRA is less tolerant of large batch sizes than full fine-tuning.”","local":"4-in-some-scenarios-lora-is-less-tolerant-of-large-batch-sizes-than-full-fine-tuning","sections":[],"depth":3}],"depth":2},{"title":"Takeaways","local":"takeaways","sections":[],"depth":2},{"title":"Citation","local":"citation","sections":[],"depth":2}],"depth":1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <div class="items-center shrink-0 min-w-[100px] max-sm:min-w-[50px] justify-end ml-auto flex" style="float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"><div class="inline-flex rounded-md max-sm:rounded-sm"><button class="inline-flex items-center gap-1 h-7 max-sm:h-7 px-2 max-sm:px-1.5 text-sm font-medium text-gray-800 border border-r-0 rounded-l-md max-sm:rounded-l-sm border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-live="polite"><span class="inline-flex items-center justify-center rounded-md p-0.5 max-sm:p-0 hover:text-gray-800 dark:hover:text-gray-200"><svg class="sm:size-3.5 size-3" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg></span> <span>Copy page</span></button> <button class="inline-flex items-center justify-center w-6 max-sm:w-5 h-7 max-sm:h-7 disabled:pointer-events-none text-sm text-gray-500 hover:text-gray-700 dark:hover:text-white rounded-r-md max-sm:rounded-r-sm border border-l transition border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-haspopup="menu" aria-expanded="false" aria-label="Open copy menu"><svg class="transition-transform text-gray-400 overflow-visible sm:size-3.5 size-3 rotate-0" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></button></div> </div> <h1 class="relative group"><a id="lora-without-regret" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#lora-without-regret"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LoRA Without Regret</span></h1> <p data-svelte-h="svelte-gx5fze">Recent research from the team at <a href="https://thinkingmachines.ai/blog/lora/" rel="nofollow">Thinking Machines Lab</a> (Schulman et al., 2025) shows that <strong>LoRA can match full fine-tuning performance</strong> when configured correctly, while using only ~67% of the compute. These findings are exciting to TRL users because they’re straightforward to implement and can improve model performance on smaller budgets.</p> <p data-svelte-h="svelte-lywk0l">This guide provides simple instructions to reproduce the results of the blog post in TRL.</p> <blockquote class="tip" data-svelte-h="svelte-8qn8ze"><p>It is recommended to read the blog post before following this guide, or to consult both resources in parallel for best results.</p></blockquote> <h2 class="relative group"><a id="benefits-of-lora-over-full-fine-tuning" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#benefits-of-lora-over-full-fine-tuning"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Benefits of LoRA over full fine-tuning</span></h2> <p data-svelte-h="svelte-16iolmm">First of all, let’s remind ourselves of the benefits of <a href="https://huggingface.co/docs/trl/en/peft_integration" rel="nofollow">LoRA over full fine-tuning</a>.</p> <p data-svelte-h="svelte-1sfgmfi">LoRA adds adapter layers on top of the base model, which contains significantly fewer parameters than the base model itself. This design reduces GPU memory requirements and enables more efficient training. As described in the <a href="https://thinkingmachines.ai/blog/lora/" rel="nofollow">blog</a>, this approach was originally thought to involve a performance trade-off, although careful configuration can overcome this trade-off and match full fine-tuning performance.</p> <h2 class="relative group"><a id="examples-with-trl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#examples-with-trl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Examples with TRL</span></h2> <p data-svelte-h="svelte-1cpj0vt">Let’s implement and train LoRA adapters in TRL scripts based on the core findings of the blog post. Afterwards, we’ll revisit each finding in light of the TRL results.</p> <h3 class="relative group"><a id="supervised-fine-tuning-sft" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#supervised-fine-tuning-sft"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Supervised Fine-Tuning (SFT)</span></h3> <p data-svelte-h="svelte-yezjl">The blog post performs SFT on a range of models and datasets from the Hub, which we can reproduce in TRL.</p> <table data-svelte-h="svelte-1cam6uk"><thead><tr><th>Model</th> <th>Dataset</th></tr></thead> <tbody><tr><td><a href="https://huggingface.co/meta-llama/Llama-3.2-1B" rel="nofollow">Llama-3.2-1B-Instruct</a></td> <td><a href="https://huggingface.co/datasets/allenai/tulu-3-sft-mixture" rel="nofollow">allenai/tulu-3-sft-mixture</a></td></tr> <tr><td><a href="https://huggingface.co/meta-llama/Llama-3.2-1B" rel="nofollow">Llama-3.2-1B-Instruct</a></td> <td><a href="https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" rel="nofollow">open-thoughts/OpenThoughts-114k</a></td></tr> <tr><td><a href="https://huggingface.co/meta-llama/Llama-3.1-8B" rel="nofollow">Llama-3.1-8B-Instruct</a></td> <td><a href="https://huggingface.co/datasets/allenai/tulu-3-sft-mixture" rel="nofollow">allenai/tulu-3-sft-mixture</a></td></tr> <tr><td><a href="https://huggingface.co/meta-llama/Llama-3.1-8B" rel="nofollow">Llama-3.1-8B-Instruct</a></td> <td><a href="https://huggingface.co/datasets/open-thoughts/OpenThoughts-114k" rel="nofollow">open-thoughts/OpenThoughts-114k</a></td></tr></tbody></table> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd border-gray-800 bg-black dark:bg-gray-700 text-white">python </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">jobs </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">local </div></div> <div class="language-select"><p data-svelte-h="svelte-51snjk">We can integrate these findings with the TRL Python API like so:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --> | |
| <span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTTrainer, SFTConfig | |
| dataset = load_dataset(<span class="hljs-string">"open-thoughts/OpenThoughts-114k"</span>, split=<span class="hljs-string">"train"</span>) | |
| peft_config = LoraConfig(r=<span class="hljs-number">256</span>, lora_alpha=<span class="hljs-number">16</span>, target_modules=<span class="hljs-string">"all-linear"</span>) | |
| training_args = SFTConfig( | |
| learning_rate=<span class="hljs-number">2e-4</span>, | |
| per_device_train_batch_size=<span class="hljs-number">1</span>, | |
| gradient_accumulation_steps=<span class="hljs-number">4</span>, | |
| num_train_epochs=<span class="hljs-number">1</span>, | |
| report_to=[<span class="hljs-string">"trackio"</span>], | |
| ) | |
| trainer = SFTTrainer( | |
| model=<span class="hljs-string">"Qwen/Qwen2.5-3B-Instruct"</span>, | |
| train_dataset=dataset, | |
| peft_config=peft_config, | |
| args=training_args, | |
| ) | |
| trainer.train() | |
| <!-- HTML_TAG_END --></pre></div> </div> <p data-svelte-h="svelte-17tq63o">Once training starts, you can monitor the progress in <a href="https://huggingface.co/trackio" rel="nofollow">Trackio</a>, which will log the URL.</p> <h3 class="relative group"><a id="reinforcement-learning-grpo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#reinforcement-learning-grpo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Reinforcement Learning (GRPO)</span></h3> <p data-svelte-h="svelte-1szlgb4">The blog post performs GRPO on a range of models and datasets from the Hub, and once again we can reproduce the results in TRL.</p> <table data-svelte-h="svelte-9ydvhw"><thead><tr><th>Model</th> <th>Dataset</th></tr></thead> <tbody><tr><td><a href="https://huggingface.co/meta-llama/Llama-3.2-1B" rel="nofollow">Llama-3.1-8B-Base</a></td> <td><a href="https://huggingface.co/datasets/openai/gsm8k" rel="nofollow">GSM8k</a></td></tr> <tr><td><a href="https://huggingface.co/meta-llama/Llama-3.2-1B" rel="nofollow">Llama-3.1-8B-Base</a></td> <td><a href="https://huggingface.co/datasets/zwhe99/DeepMath-103K" rel="nofollow">DeepMath-103K</a></td></tr> <tr><td><a href="https://huggingface.co/Qwen/Qwen3-8b-base" rel="nofollow">Qwen3-8b-base</a></td> <td><a href="https://huggingface.co/datasets/zwhe99/DeepMath-103K" rel="nofollow">DeepMath-103K</a></td></tr></tbody></table> <p data-svelte-h="svelte-154qkks">For reinforcement learning, the blog uses a math reasoning task that we can reproduce as a Python function.</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd border-gray-800 bg-black dark:bg-gray-700 text-white">python </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">jobs </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">local </div></div> <div class="language-select"><p data-svelte-h="svelte-160cqoc">We can implement these recommendations with the TRL Python API like so:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --> | |
| <span class="hljs-keyword">from</span> datasets <span class="hljs-keyword">import</span> load_dataset | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> GRPOConfig, GRPOTrainer | |
| <span class="hljs-keyword">from</span> trl.rewards <span class="hljs-keyword">import</span> reasoning_accuracy_reward | |
| dataset = load_dataset(<span class="hljs-string">"HuggingFaceH4/OpenR1-Math-220k-default-verified"</span>, split=<span class="hljs-string">"train"</span>) | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">1</span>, | |
| lora_alpha=<span class="hljs-number">32</span>, | |
| target_modules=<span class="hljs-string">"all-linear"</span> | |
| ) | |
| training_args = GRPOConfig( | |
| learning_rate=<span class="hljs-number">5e-5</span>, | |
| per_device_train_batch_size=<span class="hljs-number">1</span>, | |
| gradient_accumulation_steps=<span class="hljs-number">4</span>, | |
| num_train_epochs=<span class="hljs-number">1</span>, | |
| num_generations=<span class="hljs-number">8</span>, | |
| generation_batch_size=<span class="hljs-number">8</span>, | |
| report_to=[<span class="hljs-string">"trackio"</span>], | |
| ) | |
| trainer = GRPOTrainer( | |
| model=<span class="hljs-string">"Qwen/Qwen3-0.6B"</span>, | |
| reward_funcs=reasoning_accuracy_reward, | |
| args=training_args, | |
| train_dataset=dataset, | |
| peft_config=peft_config, | |
| ) | |
| trainer.train() | |
| <!-- HTML_TAG_END --></pre></div> <blockquote class="warning" data-svelte-h="svelte-ovpx5d"><p>This snippet skips the reward function which is defined above to keep the example concise.</p></blockquote> </div> <p data-svelte-h="svelte-1a12qcw">The reinforcement learning script with GRPO is implemented as a custom script in TRL, which uses the reward function shown above. You can review it at <a href="https://huggingface.co/datasets/burtenshaw/lora-without-regrets/blob/main/grpo.py" rel="nofollow"><code>grpo.py</code></a> - Reinforcement learning with LoRA best practices</p> <h2 class="relative group"><a id="key-findings-in-optimizing-lora" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#key-findings-in-optimizing-lora"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Key findings in optimizing LoRA</span></h2> <p data-svelte-h="svelte-1rbx596">The authors recommend applying LoRA to all weight matrices rather than limiting it to attention layers, as increasing the rank does not compensate for this restriction. In TRL, this can be configured using <code>--lora_target_modules all-linear</code> to apply LoRA to all weight matrices.</p> <p data-svelte-h="svelte-1q6iln2">We were able to reproduce the results of the blog post using TRL and the SmolLM3 model. We trained the model for 500 steps on the <a href="https://huggingface.co/datasets/HuggingFaceH4/OpenR1-Math-220k-default-verified" rel="nofollow">Math 220k dataset</a> with the reward function and configuration above. As you can see in the figure below, the LoRA model’s average train reward curve matches the full fine-tuning curve.</p> <p data-svelte-h="svelte-5jwmx7"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/5.png" alt="train reward"></p> <p data-svelte-h="svelte-1ntrgd7">And most importantly, the LoRA model uses significantly less memory than the full fine-tuning model, as we can see in the figure below.</p> <p data-svelte-h="svelte-wr9961"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/6.png" alt="memory usage"></p> <p data-svelte-h="svelte-16yw02p">Here are the parameters we used to train the above models</p> <table data-svelte-h="svelte-wjf5fg"><thead><tr><th>Parameter</th> <th>LoRA</th> <th>Full FT</th></tr></thead> <tbody><tr><td><code>--model_name_or_path</code></td> <td>HuggingFaceTB/SmolLM3-3B</td> <td>HuggingFaceTB/SmolLM3-3B</td></tr> <tr><td><code>--dataset_name</code></td> <td>HuggingFaceH4/OpenR1-Math-220k-default-verified</td> <td>HuggingFaceH4/OpenR1-Math-220k-default-verified</td></tr> <tr><td><code>--learning_rate</code></td> <td>1.0e-5</td> <td>1.0e-6</td></tr> <tr><td><code>--max_completion_length</code></td> <td>4096</td> <td>4096</td></tr> <tr><td><code>--lora_r</code></td> <td>1</td> <td>-</td></tr> <tr><td><code>--lora_alpha</code></td> <td>32</td> <td>-</td></tr> <tr><td><code>--lora_dropout</code></td> <td>0.0</td> <td>-</td></tr> <tr><td><code>--lora_target_modules</code></td> <td>all-linear</td> <td>-</td></tr></tbody></table> <p data-svelte-h="svelte-9byj4s">Let’s break down the key findings of the blog post and how we were able to reproduce them.</p> <h3 class="relative group"><a id="1-lora-performs-better-when-applied-to-all-weight-matrices" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#1-lora-performs-better-when-applied-to-all-weight-matrices"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>1. LoRA performs better when applied to all weight matrices</span></h3> <p data-svelte-h="svelte-18p9m09">The authors recommend applying LoRA to all weight matrices rather than limiting it to attention layers, as increasing the rank does not compensate for this restriction.</p> <p data-svelte-h="svelte-1l3v04x"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/1.png" alt="all layers"></p> <p data-svelte-h="svelte-1jny121">Attention-only LoRA underperforms even when using a higher rank to match parameter count. In TRL, this can be configured using <code>--lora_target_modules all-linear</code> to apply LoRA to all weight matrices. In Python, we can do this like so:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| peft_config = LoraConfig(target_modules=<span class="hljs-string">"all-linear"</span>) <!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="2-the-adapter-needs-sufficient-capacity-to-learn-from-the-dataset" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#2-the-adapter-needs-sufficient-capacity-to-learn-from-the-dataset"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>2. The adapter needs sufficient capacity to learn from the dataset</span></h3> <p data-svelte-h="svelte-1bps3m9">The blog post recommends using a sufficient LoRA rank to learn from the dataset. The rank determines the number of trainable parameters in the LoRA adapter. Therefore, “For datasets that exceed LoRA capacity, LoRA underperforms FullFT”.</p> <p data-svelte-h="svelte-1185kpu"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/3.png" alt="learning rate"></p> <p data-svelte-h="svelte-mj5shq">In the TRL script, we could use <code>--lora_r</code> to set the rank and adapt it based on the task and dataset we’re training on. The blog post recommends the following ranks based on the task and dataset size:</p> <p data-svelte-h="svelte-1sirafg">Reinforcement learning tasks typically require lower capacity, so smaller LoRA ranks can be used. This is because policy gradient algorithms extract roughly ~1 bit of information per episode, demanding minimal parameter capacity.</p> <p data-svelte-h="svelte-1pw9zaa">The blog post defines the ideal dataset size for LoRA to match full fine-tuning as “Post-training scale”. Which we can use to determine the recommended rank for SFT and RL LoRAs as:</p> <table data-svelte-h="svelte-1o3naio"><thead><tr><th>Task Type</th> <th>Dataset Size</th> <th>Recommended Rank</th></tr></thead> <tbody><tr><td><strong>SFT</strong></td> <td>Post-training scale</td> <td>256</td></tr> <tr><td><strong>RL</strong></td> <td>Any size</td> <td>1-32</td></tr></tbody></table> <h3 class="relative group"><a id="3-fullft-and-high-rank-loras-have-similar-learning-curves" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#3-fullft-and-high-rank-loras-have-similar-learning-curves"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>3. “FullFT and high-rank LoRAs have similar learning curves”</span></h3> <p>Counterintuitively, the blog post recommends using a higher learning rate than for full fine-tuning. In the table above, we used 1.0e-5 for LoRA and 1.0e-6 for full fine-tuning. In the TRL script, we could use <code data-svelte-h="svelte-xp6ezz">--learning_rate</code> to set the learning rate. The <!-- HTML_TAG_START --><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mfrac><mn>1</mn><mi>r</mi></mfrac></mrow><annotation encoding="application/x-tex"> \frac{1}{r} </annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.1901em;vertical-align:-0.345em;"></span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.8451em;"><span style="top:-2.655em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight" style="margin-right:0.02778em;">r</span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.394em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span></span></span></span></span><span class="vlist-s"></span></span><span class="vlist-r"><span class="vlist" style="height:0.345em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span></span><!-- HTML_TAG_END --> scaling in LoRA makes the optimal learning rate approximately rank-independent.</p> <p data-svelte-h="svelte-19ynrxt"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/2.png" alt="learning rate"></p> <h3 class="relative group"><a id="4-in-some-scenarios-lora-is-less-tolerant-of-large-batch-sizes-than-full-fine-tuning" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#4-in-some-scenarios-lora-is-less-tolerant-of-large-batch-sizes-than-full-fine-tuning"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>4. “In some scenarios, LoRA is less tolerant of large batch sizes than full fine-tuning.”</span></h3> <p data-svelte-h="svelte-631to2">The blog post recommends using an effective batch size < 32 because the authors found LoRA to be less tolerant of large batch sizes. This could not be mitigated by increasing the LoRA rank. In the TRL script, we could use <code>--per_device_train_batch_size</code> and <code>--gradient_accumulation_steps</code> to set the batch size.</p> <p data-svelte-h="svelte-1pqdfv"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lora_without_regret/4.png" alt="learning rate"></p> <h2 class="relative group"><a id="takeaways" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#takeaways"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Takeaways</span></h2> <p data-svelte-h="svelte-1uyxd5t">Using TRL, you can efficiently implement LoRA adapters to match full fine-tuning performance, applying the core insights (targeting all weight matrices, choosing the right rank, and managing batch size and learning rate) without the heavy compute cost of FullFT.</p> <h2 class="relative group"><a id="citation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#citation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Citation</span></h2> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="language-xml">@article</span><span class="hljs-template-variable">{schulman2025lora, | |
| title = {{LoRA Without Regret}</span><span class="language-xml">}, | |
| author = </span><span class="hljs-template-variable">{John Schulman and Thinking Machines Lab}</span><span class="language-xml">, | |
| year = 2025, | |
| journal = </span><span class="hljs-template-variable">{Thinking Machines Lab: Connectionism}</span><span class="language-xml">, | |
| doi = </span><span class="hljs-template-variable">{10.64434/tml.20250929}</span><span class="language-xml">, | |
| note = </span><span class="hljs-template-variable">{https://thinkingmachines.ai/blog/lora/}</span><span class="language-xml"> | |
| } </span><!-- HTML_TAG_END --></pre></div> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/trl/blob/main/docs/source/lora_without_regret.md" target="_blank"><svg class="mr-1" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M31,16l-7,7l-1.41-1.41L28.17,16l-5.58-5.59L24,9l7,7z"></path><path d="M1,16l7-7l1.41,1.41L3.83,16l5.58,5.59L8,23l-7-7z"></path><path d="M12.419,25.484L17.639,6.552l1.932,0.518L14.351,26.002z"></path></svg> <span data-svelte-h="svelte-zjs2n5"><span class="underline">Update</span> on GitHub</span></a> <p></p> | |
| <script> | |
| { | |
| __sveltekit_1hqaf25 = { | |
| assets: "/docs/trl/pr_5607/en", | |
| base: "/docs/trl/pr_5607/en", | |
| env: {} | |
| }; | |
| const element = document.currentScript.parentElement; | |
| const data = [null,null]; | |
| Promise.all([ | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"), | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js") | |
| ]).then(([kit, app]) => { | |
| kit.start(app, element, { | |
| node_ids: [0, 31], | |
| data, | |
| form: null, | |
| error: null | |
| }); | |
| }); | |
| } | |
| </script> | |
Xet Storage Details
- Size:
- 45 kB
- Xet hash:
- 0893223a9112d005243d774cd251efb1f4040c9521bf4f356528f2aba67f83c5
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.