Buckets:
| <meta charset="utf-8" /><meta name="hf:doc:metadata" content="{"title":"PEFT Integration","local":"peft-integration","sections":[{"title":"Installation","local":"installation","sections":[],"depth":2},{"title":"Quick Start","local":"quick-start","sections":[],"depth":2},{"title":"Three Ways to Configure PEFT","local":"three-ways-to-configure-peft","sections":[{"title":"1. Using CLI Flags (Simplest)","local":"1-using-cli-flags-simplest","sections":[],"depth":3},{"title":"2. Passing peft_config to Trainer (Recommended)","local":"2-passing-peftconfig-to-trainer-recommended","sections":[],"depth":3},{"title":"3. Applying PEFT to Model Directly (Advanced)","local":"3-applying-peft-to-model-directly-advanced","sections":[],"depth":3}],"depth":2},{"title":"Learning Rate Considerations","local":"learning-rate-considerations","sections":[],"depth":2},{"title":"PEFT with Different Trainers","local":"peft-with-different-trainers","sections":[{"title":"Supervised Fine-Tuning (SFT)","local":"supervised-fine-tuning-sft","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Direct Preference Optimization (DPO)","local":"direct-preference-optimization-dpo","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Group Relative Policy Optimization (GRPO)","local":"group-relative-policy-optimization-grpo","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Proximal Policy Optimization (PPO)","local":"proximal-policy-optimization-ppo","sections":[{"title":"Multi-Adapter RL Training","local":"multi-adapter-rl-training","sections":[],"depth":4}],"depth":3}],"depth":2},{"title":"QLoRA: Quantized Low-Rank Adaptation","local":"qlora-quantized-low-rank-adaptation","sections":[{"title":"How QLoRA Works","local":"how-qlora-works","sections":[],"depth":3},{"title":"Using QLoRA with TRL","local":"using-qlora-with-trl","sections":[{"title":"Command Line","local":"command-line","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"QLoRA Configuration Options","local":"qlora-configuration-options","sections":[],"depth":3},{"title":"8-bit Quantization","local":"8-bit-quantization","sections":[],"depth":3}],"depth":2},{"title":"Prompt Tuning","local":"prompt-tuning","sections":[{"title":"How Prompt Tuning Works","local":"how-prompt-tuning-works","sections":[],"depth":3},{"title":"Using Prompt Tuning with TRL","local":"using-prompt-tuning-with-trl","sections":[],"depth":3},{"title":"Prompt Tuning Configuration","local":"prompt-tuning-configuration","sections":[],"depth":3},{"title":"Prompt Tuning vs LoRA","local":"prompt-tuning-vs-lora","sections":[],"depth":3}],"depth":2},{"title":"Advanced PEFT Configurations","local":"advanced-peft-configurations","sections":[{"title":"LoRA Configuration Parameters","local":"lora-configuration-parameters","sections":[],"depth":3},{"title":"Target Module Selection","local":"target-module-selection","sections":[],"depth":3},{"title":"Using Command-Line Arguments","local":"using-command-line-arguments","sections":[],"depth":3}],"depth":2},{"title":"Saving and Loading PEFT Models","local":"saving-and-loading-peft-models","sections":[{"title":"Saving","local":"saving","sections":[],"depth":3},{"title":"Loading","local":"loading","sections":[],"depth":3},{"title":"Pushing to Hub","local":"pushing-to-hub","sections":[],"depth":3}],"depth":2},{"title":"Multi-GPU Training","local":"multi-gpu-training","sections":[{"title":"Naive Pipeline Parallelism (NPP) for Large Models","local":"naive-pipeline-parallelism-npp-for-large-models","sections":[],"depth":3}],"depth":2},{"title":"Resources","local":"resources","sections":[{"title":"TRL Examples and Notebooks","local":"trl-examples-and-notebooks","sections":[],"depth":3},{"title":"Documentation","local":"documentation","sections":[],"depth":3},{"title":"Research Papers","local":"research-papers","sections":[],"depth":3}],"depth":2}],"depth":1}"> | |
| <link href="/docs/trl/pr_5607/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/scheduler.7b731bd4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/singletons.2cf51804.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.ac28c20f.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/paths.ba01f37d.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/preload-helper.e1689b3a.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.cc268345.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/0.cd288160.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/each.e59479a4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/41.04af0ad3.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.f0d99f98.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/CodeBlock.169a125f.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/HfOption.9f04abd1.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{"title":"PEFT Integration","local":"peft-integration","sections":[{"title":"Installation","local":"installation","sections":[],"depth":2},{"title":"Quick Start","local":"quick-start","sections":[],"depth":2},{"title":"Three Ways to Configure PEFT","local":"three-ways-to-configure-peft","sections":[{"title":"1. Using CLI Flags (Simplest)","local":"1-using-cli-flags-simplest","sections":[],"depth":3},{"title":"2. Passing peft_config to Trainer (Recommended)","local":"2-passing-peftconfig-to-trainer-recommended","sections":[],"depth":3},{"title":"3. Applying PEFT to Model Directly (Advanced)","local":"3-applying-peft-to-model-directly-advanced","sections":[],"depth":3}],"depth":2},{"title":"Learning Rate Considerations","local":"learning-rate-considerations","sections":[],"depth":2},{"title":"PEFT with Different Trainers","local":"peft-with-different-trainers","sections":[{"title":"Supervised Fine-Tuning (SFT)","local":"supervised-fine-tuning-sft","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Direct Preference Optimization (DPO)","local":"direct-preference-optimization-dpo","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Group Relative Policy Optimization (GRPO)","local":"group-relative-policy-optimization-grpo","sections":[{"title":"With LoRA","local":"with-lora","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"Proximal Policy Optimization (PPO)","local":"proximal-policy-optimization-ppo","sections":[{"title":"Multi-Adapter RL Training","local":"multi-adapter-rl-training","sections":[],"depth":4}],"depth":3}],"depth":2},{"title":"QLoRA: Quantized Low-Rank Adaptation","local":"qlora-quantized-low-rank-adaptation","sections":[{"title":"How QLoRA Works","local":"how-qlora-works","sections":[],"depth":3},{"title":"Using QLoRA with TRL","local":"using-qlora-with-trl","sections":[{"title":"Command Line","local":"command-line","sections":[],"depth":4},{"title":"Python Example","local":"python-example","sections":[],"depth":4}],"depth":3},{"title":"QLoRA Configuration Options","local":"qlora-configuration-options","sections":[],"depth":3},{"title":"8-bit Quantization","local":"8-bit-quantization","sections":[],"depth":3}],"depth":2},{"title":"Prompt Tuning","local":"prompt-tuning","sections":[{"title":"How Prompt Tuning Works","local":"how-prompt-tuning-works","sections":[],"depth":3},{"title":"Using Prompt Tuning with TRL","local":"using-prompt-tuning-with-trl","sections":[],"depth":3},{"title":"Prompt Tuning Configuration","local":"prompt-tuning-configuration","sections":[],"depth":3},{"title":"Prompt Tuning vs LoRA","local":"prompt-tuning-vs-lora","sections":[],"depth":3}],"depth":2},{"title":"Advanced PEFT Configurations","local":"advanced-peft-configurations","sections":[{"title":"LoRA Configuration Parameters","local":"lora-configuration-parameters","sections":[],"depth":3},{"title":"Target Module Selection","local":"target-module-selection","sections":[],"depth":3},{"title":"Using Command-Line Arguments","local":"using-command-line-arguments","sections":[],"depth":3}],"depth":2},{"title":"Saving and Loading PEFT Models","local":"saving-and-loading-peft-models","sections":[{"title":"Saving","local":"saving","sections":[],"depth":3},{"title":"Loading","local":"loading","sections":[],"depth":3},{"title":"Pushing to Hub","local":"pushing-to-hub","sections":[],"depth":3}],"depth":2},{"title":"Multi-GPU Training","local":"multi-gpu-training","sections":[{"title":"Naive Pipeline Parallelism (NPP) for Large Models","local":"naive-pipeline-parallelism-npp-for-large-models","sections":[],"depth":3}],"depth":2},{"title":"Resources","local":"resources","sections":[{"title":"TRL Examples and Notebooks","local":"trl-examples-and-notebooks","sections":[],"depth":3},{"title":"Documentation","local":"documentation","sections":[],"depth":3},{"title":"Research Papers","local":"research-papers","sections":[],"depth":3}],"depth":2}],"depth":1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <div class="items-center shrink-0 min-w-[100px] max-sm:min-w-[50px] justify-end ml-auto flex" style="float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"><div class="inline-flex rounded-md max-sm:rounded-sm"><button class="inline-flex items-center gap-1 h-7 max-sm:h-7 px-2 max-sm:px-1.5 text-sm font-medium text-gray-800 border border-r-0 rounded-l-md max-sm:rounded-l-sm border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-live="polite"><span class="inline-flex items-center justify-center rounded-md p-0.5 max-sm:p-0 hover:text-gray-800 dark:hover:text-gray-200"><svg class="sm:size-3.5 size-3" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg></span> <span>Copy page</span></button> <button class="inline-flex items-center justify-center w-6 max-sm:w-5 h-7 max-sm:h-7 disabled:pointer-events-none text-sm text-gray-500 hover:text-gray-700 dark:hover:text-white rounded-r-md max-sm:rounded-r-sm border border-l transition border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-haspopup="menu" aria-expanded="false" aria-label="Open copy menu"><svg class="transition-transform text-gray-400 overflow-visible sm:size-3.5 size-3 rotate-0" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></button></div> </div> <h1 class="relative group"><a id="peft-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#peft-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PEFT Integration</span></h1> <p data-svelte-h="svelte-1aa4si9">TRL supports <a href="https://github.com/huggingface/peft" rel="nofollow">PEFT</a> (Parameter-Efficient Fine-Tuning) methods for memory-efficient model training. PEFT enables fine-tuning large language models by training only a small number of additional parameters while keeping the base model frozen, significantly reducing computational costs and memory requirements.</p> <p data-svelte-h="svelte-eyysuf">This guide covers how to use PEFT with different TRL trainers, including LoRA, QLoRA, and prompt tuning techniques.</p> <p data-svelte-h="svelte-vdlhef">For a complete working example, see the <a href="https://github.com/huggingface/trl/blob/main/examples/notebooks/sft_trl_lora_qlora.ipynb" rel="nofollow">SFT with LoRA/QLoRA notebook</a>.</p> <h2 class="relative group"><a id="installation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation</span></h2> <p data-svelte-h="svelte-1a7j66l">To use PEFT with TRL, install the required dependencies:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install trl[peft]<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-140s00u">For QLoRA support (4-bit and 8-bit quantization), also install:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install bitsandbytes<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="quick-start" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#quick-start"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Quick Start</span></h2> <p data-svelte-h="svelte-145s0q3">All TRL trainers support PEFT through the <code>peft_config</code> argument. The simplest way to enable PEFT is by using the command-line interface with the <code>--use_peft</code> flag:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path Qwen/Qwen2-0.5B \ | |
| --dataset_name trl-lib/Capybara \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16 \ | |
| --output_dir Qwen2-0.5B-SFT-LoRA<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-ubc45n">Alternatively, you can pass a PEFT config directly in your Python code:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTTrainer | |
| <span class="hljs-comment"># Configure LoRA</span> | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">32</span>, | |
| lora_alpha=<span class="hljs-number">16</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| ) | |
| <span class="hljs-comment"># Configure training - note the higher learning rate for LoRA (10x base rate)</span> | |
| training_args = SFTConfig( | |
| learning_rate=<span class="hljs-number">2.0e-4</span>, <span class="hljs-comment"># 10x the base rate (2.0e-5) for LoRA</span> | |
| ... | |
| ) | |
| <span class="hljs-comment"># Create trainer with PEFT</span> | |
| trainer = SFTTrainer( | |
| model=model, | |
| train_dataset=dataset, | |
| peft_config=peft_config, | |
| )<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="three-ways-to-configure-peft" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#three-ways-to-configure-peft"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Three Ways to Configure PEFT</span></h2> <p data-svelte-h="svelte-195hlna">TRL provides three different methods to configure PEFT, each suited for different use cases:</p> <h3 class="relative group"><a id="1-using-cli-flags-simplest" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#1-using-cli-flags-simplest"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>1. Using CLI Flags (Simplest)</span></h3> <p data-svelte-h="svelte-ytojdr">The easiest way to enable PEFT is to use the <code>--use_peft</code> flag with the command-line interface. This method is ideal for quick experiments and standard configurations:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path Qwen/Qwen2-0.5B \ | |
| --dataset_name trl-lib/Capybara \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16 \ | |
| --lora_dropout 0.05 \ | |
| --output_dir Qwen2-0.5B-SFT-LoRA<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1h44p9a"><strong>Pros</strong>: Quick setup, no code required</p> <p data-svelte-h="svelte-6pnbnx"><strong>Cons</strong>: Limited to LoRA, fewer customization options</p> <h3 class="relative group"><a id="2-passing-peftconfig-to-trainer-recommended" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#2-passing-peftconfig-to-trainer-recommended"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>2. Passing peft_config to Trainer (Recommended)</span></h3> <p data-svelte-h="svelte-19rmu4f">For more control, pass a PEFT configuration directly to the trainer. This is the recommended approach for most use cases:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTConfig, SFTTrainer | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">32</span>, | |
| lora_alpha=<span class="hljs-number">16</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"v_proj"</span>, <span class="hljs-string">"k_proj"</span>, <span class="hljs-string">"o_proj"</span>], | |
| ) | |
| trainer = SFTTrainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=dataset, | |
| peft_config=peft_config, <span class="hljs-comment"># Pass config here</span> | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1q9st0j"><strong>Pros</strong>: Full control, supports all PEFT methods (LoRA, Prompt Tuning, etc.)</p> <p data-svelte-h="svelte-piihuf"><strong>Cons</strong>: Requires Python code</p> <h3 class="relative group"><a id="3-applying-peft-to-model-directly-advanced" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#3-applying-peft-to-model-directly-advanced"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>3. Applying PEFT to Model Directly (Advanced)</span></h3> <p data-svelte-h="svelte-gkqfv">For maximum flexibility, you can apply PEFT to your model before passing it to the trainer:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig, get_peft_model | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTConfig, SFTTrainer | |
| <span class="hljs-comment"># Load base model</span> | |
| model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">"Qwen/Qwen2-0.5B"</span>) | |
| <span class="hljs-comment"># Apply PEFT configuration</span> | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">32</span>, | |
| lora_alpha=<span class="hljs-number">16</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| ) | |
| model = get_peft_model(model, peft_config) | |
| <span class="hljs-comment"># Pass PEFT-wrapped model to trainer</span> | |
| trainer = SFTTrainer( | |
| model=model, <span class="hljs-comment"># Already has PEFT applied</span> | |
| args=training_args, | |
| train_dataset=dataset, | |
| <span class="hljs-comment"># Note: no peft_config needed here</span> | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-7gz8pq"><strong>Pros</strong>: Maximum control, useful for custom model architectures or complex setups</p> <p data-svelte-h="svelte-uvcbqp"><strong>Cons</strong>: More verbose, requires understanding of PEFT internals</p> <h2 class="relative group"><a id="learning-rate-considerations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#learning-rate-considerations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Learning Rate Considerations</span></h2> <p data-svelte-h="svelte-19gln0l">When using LoRA or other PEFT methods, you typically need to use a <strong>higher learning rate</strong> (approximately 10x) compared to full fine-tuning. This is because PEFT methods train only a small fraction of parameters, requiring a larger learning rate to achieve similar parameter updates.</p> <p data-svelte-h="svelte-56wxxv"><strong>Recommended learning rates:</strong></p> <table data-svelte-h="svelte-vcu64p"><thead><tr><th>Trainer</th> <th>Full Fine-Tuning</th> <th>With LoRA (10x)</th></tr></thead> <tbody><tr><td><strong>SFT</strong></td> <td><code>2.0e-5</code></td> <td><code>2.0e-4</code></td></tr> <tr><td><strong>DPO</strong></td> <td><code>5.0e-7</code></td> <td><code>5.0e-6</code></td></tr> <tr><td><strong>GRPO</strong></td> <td><code>1.0e-6</code></td> <td><code>1.0e-5</code></td></tr> <tr><td><strong>Prompt Tuning</strong></td> <td>N/A</td> <td><code>1.0e-2</code> to <code>3.0e-2</code></td></tr></tbody></table> <blockquote data-svelte-h="svelte-8pdduk"><p><strong>Why 10x?</strong> LoRA adapters have significantly fewer trainable parameters than the full model. A higher learning rate compensates for this reduced parameter count, ensuring effective training. For detailed explanation, see <a href="https://thinkingmachines.ai/blog/lora/" rel="nofollow">this blog post</a>.</p></blockquote> <p data-svelte-h="svelte-1eq8vqg">For additional best practices on using LoRA effectively, refer to the <a href="lora_without_regret">LoRA Without Regret</a> documentation.</p> <h2 class="relative group"><a id="peft-with-different-trainers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#peft-with-different-trainers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>PEFT with Different Trainers</span></h2> <p data-svelte-h="svelte-lhxbnr">TRL’s trainers support PEFT configurations for various training paradigms. Below are detailed examples for each major trainer.</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd border-gray-800 bg-black dark:bg-gray-700 text-white">sft </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">dpo </div><div class="flex items-center border rounded-lg px-1.5 py-1 leading-none select-none text-smd text-gray-500 cursor-pointer opacity-90 hover:text-gray-700 dark:hover:text-gray-200 hover:shadow-sm">grpo </div></div> <div class="language-select"> <h3 class="relative group"><a id="supervised-fine-tuning-sft" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#supervised-fine-tuning-sft"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Supervised Fine-Tuning (SFT)</span></h3> <p data-svelte-h="svelte-jcnl34">The <code>SFTTrainer</code> is used for supervised fine-tuning on instruction datasets.</p> <h4 class="relative group"><a id="with-lora" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#with-lora"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>With LoRA</span></h4> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path Qwen/Qwen2-0.5B \ | |
| --dataset_name trl-lib/Capybara \ | |
| --learning_rate 2.0e-4 \ | |
| --num_train_epochs 1 \ | |
| --per_device_train_batch_size 2 \ | |
| --gradient_accumulation_steps 8 \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16 \ | |
| --output_dir Qwen2-0.5B-SFT-LoRA<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="python-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#python-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Python Example</span></h4> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTConfig, SFTTrainer | |
| <span class="hljs-comment"># Configure LoRA</span> | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">32</span>, | |
| lora_alpha=<span class="hljs-number">16</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"v_proj"</span>], <span class="hljs-comment"># optional: specify target modules</span> | |
| ) | |
| <span class="hljs-comment"># Configure training with higher learning rate for LoRA</span> | |
| training_args = SFTConfig( | |
| learning_rate=<span class="hljs-number">2.0e-4</span>, <span class="hljs-comment"># 10x the base rate for LoRA</span> | |
| ... | |
| ) | |
| <span class="hljs-comment"># Create trainer with PEFT config</span> | |
| trainer = SFTTrainer( | |
| model=<span class="hljs-string">"Qwen/Qwen2-0.5B"</span>, <span class="hljs-comment"># can pass model name or loaded model</span> | |
| args=training_args, | |
| train_dataset=dataset, | |
| peft_config=peft_config, <span class="hljs-comment"># pass PEFT config here</span> | |
| ) | |
| trainer.train()<!-- HTML_TAG_END --></pre></div> </div> <h3 class="relative group"><a id="proximal-policy-optimization-ppo" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#proximal-policy-optimization-ppo"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Proximal Policy Optimization (PPO)</span></h3> <h4 class="relative group"><a id="multi-adapter-rl-training" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multi-adapter-rl-training"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multi-Adapter RL Training</span></h4> <p data-svelte-h="svelte-1a94gbn">You can use a single base model with multiple PEFT adapters for the entire PPO algorithm - including retrieving reference logits, computing active logits, and calculating rewards. This approach is useful for memory-efficient RL training.</p> <blockquote class="warning" data-svelte-h="svelte-1vy4ntf"><p>This feature is experimental and convergence has not been extensively tested. We encourage the community to share feedback and report any issues.</p></blockquote> <p data-svelte-h="svelte-ioylk5"><strong>Requirements</strong></p> <p data-svelte-h="svelte-1gs5c9v">Install PEFT and optionally bitsandbytes for 8-bit models:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install peft bitsandbytes<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1q7scme"><strong>Training Workflow</strong></p> <p data-svelte-h="svelte-1cwfc7z">The multi-adapter approach requires three stages:</p> <ol data-svelte-h="svelte-170dem6"><li><strong>Supervised Fine-Tuning (SFT)</strong>: Train a base model on your target domain (e.g., IMDB dataset) using <code>SFTTrainer</code></li> <li><strong>Reward Model Training</strong>: Train a reward model adapter using PEFT and <code>RewardTrainer</code> (see <a href="https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py" rel="nofollow">reward modeling example</a>)</li> <li><strong>PPO Training</strong>: Fine-tune new adapters using PPO with the reward adapter</li></ol> <blockquote class="important" data-svelte-h="svelte-1r7qnul"><p>Use the same base model (architecture and weights) for stages 2 & 3.</p></blockquote> <p data-svelte-h="svelte-1h24w70"><strong>Basic Usage</strong></p> <p data-svelte-h="svelte-1826sco">After training your reward adapter and pushing it to the Hub:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> trl.experimental.ppo <span class="hljs-keyword">import</span> PPOTrainer, AutoModelForCausalLMWithValueHead | |
| model_name = <span class="hljs-string">"huggyllama/llama-7b"</span> | |
| rm_adapter_id = <span class="hljs-string">"trl-lib/llama-7b-hh-rm-adapter"</span> | |
| <span class="hljs-comment"># Configure PPO adapter</span> | |
| lora_config = LoraConfig( | |
| r=<span class="hljs-number">16</span>, | |
| lora_alpha=<span class="hljs-number">32</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| ) | |
| <span class="hljs-comment"># Load model with reward adapter</span> | |
| model = AutoModelForCausalLMWithValueHead.from_pretrained( | |
| model_name, | |
| peft_config=lora_config, | |
| reward_adapter=rm_adapter_id, | |
| ) | |
| trainer = PPOTrainer(model=model, ...)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-m3b0ez">In your training loop, compute rewards using:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->rewards = trainer.model.compute_reward_score(**inputs)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-iimns0"><strong>Advanced Features</strong></p> <p data-svelte-h="svelte-s0v8y9"><strong>Quantized Base Models</strong></p> <p data-svelte-h="svelte-1u4rolx">For memory-efficient training, load the base model in 8-bit or 4-bit while keeping adapters in float32:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig | |
| model = AutoModelForCausalLMWithValueHead.from_pretrained( | |
| model_name, | |
| peft_config=lora_config, | |
| reward_adapter=rm_adapter_id, | |
| quantization_config=BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>), | |
| )<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="qlora-quantized-low-rank-adaptation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#qlora-quantized-low-rank-adaptation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QLoRA: Quantized Low-Rank Adaptation</span></h2> <p data-svelte-h="svelte-18m242a">QLoRA combines 4-bit quantization with LoRA to enable fine-tuning of very large models on consumer hardware. This technique can reduce memory requirements by up to 4x compared to standard LoRA.</p> <h3 class="relative group"><a id="how-qlora-works" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-qlora-works"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How QLoRA Works</span></h3> <ol data-svelte-h="svelte-1q5rstb"><li><strong>4-bit Quantization</strong>: The base model is loaded in 4-bit precision using <code>bitsandbytes</code></li> <li><strong>Frozen Weights</strong>: The quantized model weights remain frozen during training</li> <li><strong>LoRA Adapters</strong>: Only the LoRA adapter parameters are trained in higher precision</li> <li><strong>Memory Efficiency</strong>: Enables fine-tuning of models like Llama-70B on a single consumer GPU</li></ol> <h3 class="relative group"><a id="using-qlora-with-trl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-qlora-with-trl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using QLoRA with TRL</span></h3> <p data-svelte-h="svelte-2emjj2">Simply combine <code>load_in_4bit=True</code> with PEFT configuration:</p> <h4 class="relative group"><a id="command-line" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#command-line"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Command Line</span></h4> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path meta-llama/Llama-2-7b-hf \ | |
| --dataset_name trl-lib/Capybara \ | |
| --load_in_4bit \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16 \ | |
| --per_device_train_batch_size 1 \ | |
| --gradient_accumulation_steps 16 \ | |
| --output_dir Llama-2-7b-QLoRA<!-- HTML_TAG_END --></pre></div> <h4 class="relative group"><a id="python-example" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#python-example"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Python Example</span></h4> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM, BitsAndBytesConfig | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTConfig, SFTTrainer | |
| <span class="hljs-comment"># Configure 4-bit quantization</span> | |
| bnb_config = BitsAndBytesConfig( | |
| load_in_4bit=<span class="hljs-literal">True</span>, | |
| bnb_4bit_quant_type=<span class="hljs-string">"nf4"</span>, | |
| bnb_4bit_compute_dtype=torch.bfloat16, | |
| bnb_4bit_use_double_quant=<span class="hljs-literal">True</span>, | |
| ) | |
| <span class="hljs-comment"># Load model with quantization</span> | |
| model = AutoModelForCausalLM.from_pretrained( | |
| <span class="hljs-string">"meta-llama/Llama-2-7b-hf"</span>, | |
| quantization_config=bnb_config, | |
| device_map=<span class="hljs-string">"auto"</span>, | |
| ) | |
| <span class="hljs-comment"># Configure LoRA</span> | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">32</span>, | |
| lora_alpha=<span class="hljs-number">16</span>, | |
| lora_dropout=<span class="hljs-number">0.05</span>, | |
| bias=<span class="hljs-string">"none"</span>, | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, | |
| ) | |
| <span class="hljs-comment"># Configure training with higher learning rate for LoRA</span> | |
| training_args = SFTConfig( | |
| learning_rate=<span class="hljs-number">2.0e-4</span>, <span class="hljs-comment"># 10x the base rate for QLoRA</span> | |
| ... | |
| ) | |
| <span class="hljs-comment"># Create trainer with PEFT config</span> | |
| trainer = SFTTrainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=dataset, | |
| peft_config=peft_config, | |
| ) | |
| trainer.train()<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="qlora-configuration-options" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#qlora-configuration-options"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>QLoRA Configuration Options</span></h3> <p data-svelte-h="svelte-1tzfl0e">The <code>BitsAndBytesConfig</code> provides several options to optimize memory and performance:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">import</span> torch | |
| <span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig | |
| bnb_config = BitsAndBytesConfig( | |
| load_in_4bit=<span class="hljs-literal">True</span>, | |
| bnb_4bit_quant_type=<span class="hljs-string">"nf4"</span>, <span class="hljs-comment"># or "fp4"</span> | |
| bnb_4bit_compute_dtype=torch.bfloat16, <span class="hljs-comment"># Compute dtype for 4-bit base models</span> | |
| bnb_4bit_use_double_quant=<span class="hljs-literal">True</span>, <span class="hljs-comment"># Nested quantization for additional memory savings</span> | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1sihssd"><strong>Configuration Parameters:</strong></p> <ul data-svelte-h="svelte-vp4ki5"><li><code>bnb_4bit_quant_type</code>: Quantization data type (<code>"nf4"</code> or <code>"fp4"</code>). NF4 is recommended.</li> <li><code>bnb_4bit_compute_dtype</code>: The dtype used for computation. Use <code>bfloat16</code> for better training stability.</li> <li><code>bnb_4bit_use_double_quant</code>: Enable nested quantization to save additional ~0.4 bits per parameter.</li></ul> <h3 class="relative group"><a id="8-bit-quantization" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#8-bit-quantization"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>8-bit Quantization</span></h3> <p data-svelte-h="svelte-1xizljh">For slightly higher precision with reduced memory savings, you can use 8-bit quantization:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> BitsAndBytesConfig, AutoModelForCausalLM | |
| bnb_config = BitsAndBytesConfig(load_in_8bit=<span class="hljs-literal">True</span>) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| <span class="hljs-string">"meta-llama/Llama-2-7b-hf"</span>, | |
| quantization_config=bnb_config, | |
| device_map=<span class="hljs-string">"auto"</span>, | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-131mx14">Or via command line:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path meta-llama/Llama-2-7b-hf \ | |
| --load_in_8bit \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="prompt-tuning" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prompt-tuning"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Prompt Tuning</span></h2> <p data-svelte-h="svelte-1t6iq03">Prompt tuning is another PEFT technique that learns soft prompts (continuous embeddings) prepended to the input, while keeping the entire model frozen. This is particularly effective for large models.</p> <h3 class="relative group"><a id="how-prompt-tuning-works" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#how-prompt-tuning-works"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>How Prompt Tuning Works</span></h3> <ol data-svelte-h="svelte-8qfcaq"><li><strong>Virtual Tokens</strong>: Adds learnable continuous embeddings (virtual tokens) to the input</li> <li><strong>Frozen Model</strong>: The entire base model remains frozen</li> <li><strong>Task-Specific Prompts</strong>: Each task learns its own prompt embeddings</li> <li><strong>Extreme Efficiency</strong>: Only the prompt embeddings are trained (typically 8-20 tokens)</li></ol> <h3 class="relative group"><a id="using-prompt-tuning-with-trl" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-prompt-tuning-with-trl"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using Prompt Tuning with TRL</span></h3> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> PromptTuningConfig, PromptTuningInit, TaskType | |
| <span class="hljs-keyword">from</span> trl <span class="hljs-keyword">import</span> SFTConfig, SFTTrainer | |
| <span class="hljs-comment"># Configure Prompt Tuning</span> | |
| peft_config = PromptTuningConfig( | |
| task_type=TaskType.CAUSAL_LM, | |
| prompt_tuning_init=PromptTuningInit.TEXT, | |
| num_virtual_tokens=<span class="hljs-number">8</span>, | |
| prompt_tuning_init_text=<span class="hljs-string">"Classify if the tweet is a complaint or not:"</span>, | |
| tokenizer_name_or_path=<span class="hljs-string">"Qwen/Qwen2-0.5B"</span>, | |
| ) | |
| <span class="hljs-comment"># Configure training with higher learning rate for Prompt Tuning</span> | |
| training_args = SFTConfig( | |
| learning_rate=<span class="hljs-number">2.0e-2</span>, <span class="hljs-comment"># Prompt Tuning typically uses 1e-2 to 3e-2</span> | |
| ... | |
| ) | |
| <span class="hljs-comment"># Create trainer with PEFT config</span> | |
| trainer = SFTTrainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=dataset, | |
| peft_config=peft_config, <span class="hljs-comment"># pass PEFT config here</span> | |
| ) | |
| trainer.train()<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="prompt-tuning-configuration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prompt-tuning-configuration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Prompt Tuning Configuration</span></h3> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> PromptTuningConfig, PromptTuningInit, TaskType | |
| peft_config = PromptTuningConfig( | |
| task_type=TaskType.CAUSAL_LM, <span class="hljs-comment"># Task type</span> | |
| prompt_tuning_init=PromptTuningInit.TEXT, <span class="hljs-comment"># Initialize from text</span> | |
| num_virtual_tokens=<span class="hljs-number">8</span>, <span class="hljs-comment"># Number of virtual tokens</span> | |
| prompt_tuning_init_text=<span class="hljs-string">"Your initialization text here"</span>, | |
| tokenizer_name_or_path=<span class="hljs-string">"model_name"</span>, | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1sihssd"><strong>Configuration Parameters:</strong></p> <ul data-svelte-h="svelte-1aukhjc"><li><code>task_type</code>: The task type (<code>TaskType.CAUSAL_LM</code> for language modeling)</li> <li><code>prompt_tuning_init</code>: Initialization method (<code>TEXT</code>, <code>RANDOM</code>)</li> <li><code>num_virtual_tokens</code>: Number of virtual tokens to prepend (typically 8-20)</li> <li><code>prompt_tuning_init_text</code>: Text to initialize the virtual tokens (when using <code>TEXT</code> init)</li> <li><code>tokenizer_name_or_path</code>: Tokenizer for initializing from text</li></ul> <h3 class="relative group"><a id="prompt-tuning-vs-lora" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#prompt-tuning-vs-lora"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Prompt Tuning vs LoRA</span></h3> <table data-svelte-h="svelte-esrk6j"><thead><tr><th>Feature</th> <th>Prompt Tuning</th> <th>LoRA</th></tr></thead> <tbody><tr><td><strong>Parameters Trained</strong></td> <td>~0.001%</td> <td>~0.1-1%</td></tr> <tr><td><strong>Memory Usage</strong></td> <td>Minimal</td> <td>Low</td></tr> <tr><td><strong>Training Speed</strong></td> <td>Fastest</td> <td>Fast</td></tr> <tr><td><strong>Model Modification</strong></td> <td>None</td> <td>Adapter layers</td></tr> <tr><td><strong>Best For</strong></td> <td>Large models, many tasks</td> <td>General fine-tuning</td></tr> <tr><td><strong>Learning Rate</strong></td> <td>Higher (1e-2 to 3e-2)</td> <td>Standard (1e-4 to 3e-4)</td></tr></tbody></table> <h2 class="relative group"><a id="advanced-peft-configurations" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#advanced-peft-configurations"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Advanced PEFT Configurations</span></h2> <h3 class="relative group"><a id="lora-configuration-parameters" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#lora-configuration-parameters"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>LoRA Configuration Parameters</span></h3> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| peft_config = LoraConfig( | |
| r=<span class="hljs-number">16</span>, <span class="hljs-comment"># LoRA rank</span> | |
| lora_alpha=<span class="hljs-number">32</span>, <span class="hljs-comment"># LoRA scaling factor</span> | |
| lora_dropout=<span class="hljs-number">0.05</span>, <span class="hljs-comment"># Dropout probability</span> | |
| bias=<span class="hljs-string">"none"</span>, <span class="hljs-comment"># Bias training strategy</span> | |
| task_type=<span class="hljs-string">"CAUSAL_LM"</span>, <span class="hljs-comment"># Task type</span> | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"v_proj"</span>], <span class="hljs-comment"># Modules to apply LoRA</span> | |
| modules_to_save=<span class="hljs-literal">None</span>, <span class="hljs-comment"># Additional modules to train</span> | |
| )<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-11zdjd0"><strong>Key Parameters:</strong></p> <ul data-svelte-h="svelte-heuyrh"><li><code>r</code>: LoRA rank (typical values: 8, 16, 32, 64). Higher rank = more parameters but potentially better performance.</li> <li><code>lora_alpha</code>: Scaling factor (typically 2x the rank). Controls the magnitude of LoRA updates.</li> <li><code>lora_dropout</code>: Dropout probability for LoRA layers (typical: 0.05-0.1).</li> <li><code>target_modules</code>: Which modules to apply LoRA to. Common choices: | |
| <ul><li><code>["q_proj", "v_proj"]</code>: Attention query and value (memory efficient)</li> <li><code>["q_proj", "k_proj", "v_proj", "o_proj"]</code>: All attention projections</li> <li><code>["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]</code>: All linear layers</li></ul></li> <li><code>modules_to_save</code>: Additional modules to fully train (e.g., <code>["embed_tokens", "lm_head"]</code>)</li></ul> <h3 class="relative group"><a id="target-module-selection" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#target-module-selection"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Target Module Selection</span></h3> <p data-svelte-h="svelte-ey0006">You can specify which modules to apply LoRA to. Common patterns:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Minimal (most memory efficient)</span> | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"v_proj"</span>] | |
| <span class="hljs-comment"># Attention only</span> | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"k_proj"</span>, <span class="hljs-string">"v_proj"</span>, <span class="hljs-string">"o_proj"</span>] | |
| <span class="hljs-comment"># All linear layers (best performance, more memory)</span> | |
| target_modules=[<span class="hljs-string">"q_proj"</span>, <span class="hljs-string">"k_proj"</span>, <span class="hljs-string">"v_proj"</span>, <span class="hljs-string">"o_proj"</span>, <span class="hljs-string">"gate_proj"</span>, <span class="hljs-string">"up_proj"</span>, <span class="hljs-string">"down_proj"</span>]<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="using-command-line-arguments" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#using-command-line-arguments"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Using Command-Line Arguments</span></h3> <p data-svelte-h="svelte-zcediw">TRL scripts accept PEFT parameters via command line:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python trl/scripts/sft.py \ | |
| --model_name_or_path Qwen/Qwen2-0.5B \ | |
| --dataset_name trl-lib/Capybara \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16 \ | |
| --lora_dropout 0.05 \ | |
| --lora_target_modules q_proj v_proj \ | |
| --output_dir output<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1oizey2">Available flags:</p> <ul data-svelte-h="svelte-1g0a0nd"><li><code>--use_peft</code>: Enable PEFT</li> <li><code>--lora_r</code>: LoRA rank (default: 16)</li> <li><code>--lora_alpha</code>: LoRA alpha (default: 32)</li> <li><code>--lora_dropout</code>: LoRA dropout (default: 0.05)</li> <li><code>--lora_target_modules</code>: Target modules (space-separated)</li> <li><code>--lora_modules_to_save</code>: Additional modules to train</li> <li><code>--use_rslora</code>: Enable Rank-Stabilized LoRA</li> <li><code>--use_dora</code>: Enable Weight-Decomposed LoRA (DoRA)</li> <li><code>--load_in_4bit</code>: Enable 4-bit quantization (QLoRA)</li> <li><code>--load_in_8bit</code>: Enable 8-bit quantization</li></ul> <h2 class="relative group"><a id="saving-and-loading-peft-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#saving-and-loading-peft-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Saving and Loading PEFT Models</span></h2> <h3 class="relative group"><a id="saving" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#saving"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Saving</span></h3> <p data-svelte-h="svelte-1cfqg9t">After training, save your PEFT adapters:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Save the adapters</span> | |
| trainer.save_model(<span class="hljs-string">"path/to/adapters"</span>) | |
| <span class="hljs-comment"># Or manually</span> | |
| model.save_pretrained(<span class="hljs-string">"path/to/adapters"</span>)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-dbw3jd">This saves only the adapter weights (~few MB) rather than the full model (~several GB).</p> <h3 class="relative group"><a id="loading" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#loading"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Loading</span></h3> <p data-svelte-h="svelte-y2qx63">Load a PEFT model for inference:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> PeftModel | |
| <span class="hljs-comment"># Load base model</span> | |
| base_model = AutoModelForCausalLM.from_pretrained(<span class="hljs-string">"Qwen/Qwen2-0.5B"</span>) | |
| <span class="hljs-comment"># Load PEFT adapters</span> | |
| model = PeftModel.from_pretrained(base_model, <span class="hljs-string">"path/to/adapters"</span>) | |
| <span class="hljs-comment"># Optionally merge adapters into base model for faster inference</span> | |
| model = model.merge_and_unload()<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="pushing-to-hub" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#pushing-to-hub"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Pushing to Hub</span></h3> <p data-svelte-h="svelte-mqk2d0">You can easily share your PEFT adapters on the Hugging Face Hub:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Push adapters to Hub</span> | |
| model.push_to_hub(<span class="hljs-string">"username/model-name-lora"</span>) | |
| <span class="hljs-comment"># Load from Hub</span> | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> PeftModel | |
| model = PeftModel.from_pretrained(base_model, <span class="hljs-string">"username/model-name-lora"</span>)<!-- HTML_TAG_END --></pre></div> <h2 class="relative group"><a id="multi-gpu-training" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#multi-gpu-training"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Multi-GPU Training</span></h2> <p data-svelte-h="svelte-gkn6hn">PEFT works seamlessly with TRL’s multi-GPU support through <code>accelerate</code>:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Configure accelerate</span> | |
| accelerate config | |
| <span class="hljs-comment"># Launch training</span> | |
| accelerate launch trl/scripts/sft.py \ | |
| --model_name_or_path Qwen/Qwen2-0.5B \ | |
| --dataset_name trl-lib/Capybara \ | |
| --use_peft \ | |
| --lora_r 32 \ | |
| --lora_alpha 16<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1jh5w4w">For QLoRA with multiple GPUs, the base model is automatically sharded:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->accelerate launch trl/scripts/sft.py \ | |
| --model_name_or_path meta-llama/Llama-2-70b-hf \ | |
| --load_in_4bit \ | |
| --use_peft \ | |
| --lora_r 32<!-- HTML_TAG_END --></pre></div> <h3 class="relative group"><a id="naive-pipeline-parallelism-npp-for-large-models" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#naive-pipeline-parallelism-npp-for-large-models"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Naive Pipeline Parallelism (NPP) for Large Models</span></h3> <p data-svelte-h="svelte-1lawuya">For very large models (>60B parameters), TRL supports Naive Pipeline Parallelism (NPP), which distributes the model and adapters across multiple GPUs. The activations and gradients are communicated across GPUs, supporting both <code>int8</code> and other data types.</p> <p data-svelte-h="svelte-1ot7jbs"><img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-npp.png" alt="NPP"></p> <p data-svelte-h="svelte-lyjvon"><strong>How to Use NPP</strong></p> <p data-svelte-h="svelte-oib2x8">Load your model with a custom <code>device_map</code> to split it across multiple devices:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> transformers <span class="hljs-keyword">import</span> AutoModelForCausalLM | |
| <span class="hljs-keyword">from</span> peft <span class="hljs-keyword">import</span> LoraConfig | |
| <span class="hljs-comment"># Create custom device map (see accelerate documentation)</span> | |
| device_map = { | |
| <span class="hljs-string">"model.embed_tokens"</span>: <span class="hljs-number">0</span>, | |
| <span class="hljs-string">"model.layers.0"</span>: <span class="hljs-number">0</span>, | |
| <span class="hljs-comment"># ... distribute layers across GPUs</span> | |
| <span class="hljs-string">"lm_head"</span>: <span class="hljs-number">0</span>, <span class="hljs-comment"># Must be on GPU 0</span> | |
| } | |
| model = AutoModelForCausalLM.from_pretrained( | |
| <span class="hljs-string">"meta-llama/Llama-2-70b-hf"</span>, | |
| device_map=device_map, | |
| peft_config=lora_config, | |
| )<!-- HTML_TAG_END --></pre></div> <blockquote class="important" data-svelte-h="svelte-1ngz8y5"><ul><li>Keep the <code>lm_head</code> module on the first GPU (device 0) to avoid errors</li> <li>See this <a href="https://github.com/huggingface/blog/blob/main/accelerate-large-models.md" rel="nofollow">tutorial on device maps</a> for proper configuration</li> <li>Run training scripts directly (not with <code>accelerate launch</code>): <code>python script.py</code></li> <li>Data Parallelism is not yet supported with NPP</li></ul></blockquote> <h2 class="relative group"><a id="resources" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#resources"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Resources</span></h2> <h3 class="relative group"><a id="trl-examples-and-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#trl-examples-and-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>TRL Examples and Notebooks</span></h3> <ul data-svelte-h="svelte-1wuy9tc"><li><strong><a href="https://github.com/huggingface/trl/blob/main/examples/notebooks/sft_trl_lora_qlora.ipynb" rel="nofollow">SFT with LoRA/QLoRA Notebook</a></strong> - Complete working example showing both LoRA and QLoRA implementations</li> <li><strong><a href="https://github.com/huggingface/trl/tree/main/examples" rel="nofollow">TRL Examples Directory</a></strong> - Collection of training scripts demonstrating PEFT with different trainers</li> <li><strong><a href="https://github.com/huggingface/cookbook/tree/main/notebooks/transformers" rel="nofollow">TRL Cookbook Recipes</a></strong> - Step-by-step guides for common PEFT training scenarios</li></ul> <h3 class="relative group"><a id="documentation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#documentation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Documentation</span></h3> <ul data-svelte-h="svelte-1ru624y"><li><a href="https://huggingface.co/docs/peft" rel="nofollow">PEFT Documentation</a> - Official PEFT library documentation</li> <li><a href="https://huggingface.co/docs/trl" rel="nofollow">TRL Documentation</a> - Complete TRL documentation with trainer guides</li> <li><a href="lora_without_regret">LoRA Without Regret</a> - Best practices for using LoRA effectively</li></ul> <h3 class="relative group"><a id="research-papers" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#research-papers"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Research Papers</span></h3> <ul data-svelte-h="svelte-17ay4ku"><li><a href="https://huggingface.co/papers/2106.09685" rel="nofollow">LoRA Paper</a> - Original LoRA methodology and results</li> <li><a href="https://huggingface.co/papers/2305.14314" rel="nofollow">QLoRA Paper</a> - Efficient finetuning with 4-bit quantization</li> <li><a href="https://huggingface.co/papers/2104.08691" rel="nofollow">Prompt Tuning Paper</a> - The Power of Scale for Parameter-Efficient Prompt Tuning</li></ul> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/trl/blob/main/docs/source/peft_integration.md" target="_blank"><svg class="mr-1" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M31,16l-7,7l-1.41-1.41L28.17,16l-5.58-5.59L24,9l7,7z"></path><path d="M1,16l7-7l1.41,1.41L3.83,16l5.58,5.59L8,23l-7-7z"></path><path d="M12.419,25.484L17.639,6.552l1.932,0.518L14.351,26.002z"></path></svg> <span data-svelte-h="svelte-zjs2n5"><span class="underline">Update</span> on GitHub</span></a> <p></p> | |
| <script> | |
| { | |
| __sveltekit_1hqaf25 = { | |
| assets: "/docs/trl/pr_5607/en", | |
| base: "/docs/trl/pr_5607/en", | |
| env: {} | |
| }; | |
| const element = document.currentScript.parentElement; | |
| const data = [null,null]; | |
| Promise.all([ | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"), | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js") | |
| ]).then(([kit, app]) => { | |
| kit.start(app, element, { | |
| node_ids: [0, 41], | |
| data, | |
| form: null, | |
| error: null | |
| }); | |
| }); | |
| } | |
| </script> | |
Xet Storage Details
- Size:
- 131 kB
- Xet hash:
- de2b57f0c00dee44659b2cf4612f0f95c709edf6f60686550c7cfbba324b82bb
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.