Buckets:
| <meta charset="utf-8" /><meta name="hf:doc:metadata" content="{"title":"Examples","local":"examples","sections":[{"title":"Notebooks","local":"notebooks","sections":[{"title":"OpenEnv Notebooks","local":"openenv-notebooks","sections":[],"depth":3}],"depth":2},{"title":"Scripts","local":"scripts","sections":[{"title":"OpenEnv Scripts","local":"openenv-scripts","sections":[],"depth":3}],"depth":2},{"title":"Distributed Training (for scripts)","local":"distributed-training-for-scripts","sections":[],"depth":2}],"depth":1}"> | |
| <link href="/docs/trl/pr_5607/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/scheduler.7b731bd4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/singletons.2cf51804.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.ac28c20f.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/paths.ba01f37d.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/preload-helper.e1689b3a.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/index.cc268345.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/0.cd288160.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/each.e59479a4.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/nodes/17.07eb5b96.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/MermaidChart.svelte_svelte_type_style_lang.f0d99f98.js"> | |
| <link rel="modulepreload" href="/docs/trl/pr_5607/en/_app/immutable/chunks/CodeBlock.169a125f.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{"title":"Examples","local":"examples","sections":[{"title":"Notebooks","local":"notebooks","sections":[{"title":"OpenEnv Notebooks","local":"openenv-notebooks","sections":[],"depth":3}],"depth":2},{"title":"Scripts","local":"scripts","sections":[{"title":"OpenEnv Scripts","local":"openenv-scripts","sections":[],"depth":3}],"depth":2},{"title":"Distributed Training (for scripts)","local":"distributed-training-for-scripts","sections":[],"depth":2}],"depth":1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <div class="items-center shrink-0 min-w-[100px] max-sm:min-w-[50px] justify-end ml-auto flex" style="float: right; margin-left: 10px; display: inline-flex; position: relative; z-index: 10;"><div class="inline-flex rounded-md max-sm:rounded-sm"><button class="inline-flex items-center gap-1 h-7 max-sm:h-7 px-2 max-sm:px-1.5 text-sm font-medium text-gray-800 border border-r-0 rounded-l-md max-sm:rounded-l-sm border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-live="polite"><span class="inline-flex items-center justify-center rounded-md p-0.5 max-sm:p-0 hover:text-gray-800 dark:hover:text-gray-200"><svg class="sm:size-3.5 size-3" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg></span> <span>Copy page</span></button> <button class="inline-flex items-center justify-center w-6 max-sm:w-5 h-7 max-sm:h-7 disabled:pointer-events-none text-sm text-gray-500 hover:text-gray-700 dark:hover:text-white rounded-r-md max-sm:rounded-r-sm border border-l transition border-gray-200 bg-white hover:shadow-inner dark:border-gray-850 dark:bg-gray-950 dark:text-gray-200 dark:hover:bg-gray-800" aria-haspopup="menu" aria-expanded="false" aria-label="Open copy menu"><svg class="transition-transform text-gray-400 overflow-visible sm:size-3.5 size-3 rotate-0" width="1em" height="1em" viewBox="0 0 12 7" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M1 1L6 6L11 1" stroke="currentColor"></path></svg></button></div> </div> <h1 class="relative group"><a id="examples" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#examples"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Examples</span></h1> <p data-svelte-h="svelte-k5mfpg">This directory contains a collection of examples that demonstrate how to use the TRL library for various applications. We provide both <strong>scripts</strong> for advanced use cases and <strong>notebooks</strong> for an easy start and interactive experimentation.</p> <p data-svelte-h="svelte-1wqwut1">The notebooks are self-contained and can run on <strong>free Colab</strong>, while the scripts can run on <strong>single GPU, multi-GPU, or DeepSpeed</strong> setups.</p> <p data-svelte-h="svelte-qtgy0c"><strong>Getting Started</strong></p> <p data-svelte-h="svelte-1dkoh1m">Install TRL and additional dependencies as follows:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->pip install --upgrade trl[quantization]<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1mvzre">Check for additional optional dependencies <a href="https://github.com/huggingface/trl/blob/main/pyproject.toml" rel="nofollow">here</a>.</p> <p data-svelte-h="svelte-1v3luk0">For scripts, you will also need an 🤗 Accelerate config (recommended for multi-gpu settings):</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->accelerate config <span class="hljs-comment"># will prompt you to define the training configuration</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1lneeeh">This allows you to run scripts with <code>accelerate launch</code> in single or multi-GPU settings.</p> <h2 class="relative group"><a id="notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Notebooks</span></h2> <p data-svelte-h="svelte-5rbss">These notebooks are easier to run and are designed for quick experimentation with TRL. The list of notebooks can be found in the <a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/" rel="nofollow"><code>trl/examples/notebooks/</code></a> directory.</p> <table data-svelte-h="svelte-l52ede"><thead><tr><th>Notebook</th> <th>Description</th> <th>Open in Colab</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_trl_lora_qlora.ipynb" rel="nofollow"><code>grpo_trl_lora_qlora.ipynb</code></a></td> <td>GRPO using QLoRA on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/grpo_trl_lora_qlora.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_agent.ipynb" rel="nofollow"><code>grpo_agent.ipynb</code></a></td> <td>GRPO for agent training</td> <td>Not available due to OOM with Colab GPUs</td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_rnj_1_instruct.ipynb" rel="nofollow"><code>grpo_rnj_1_instruct.ipynb</code></a></td> <td>GRPO rnj-1-instruct with QLoRA using TRL on Colab to add reasoning capabilities</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/grpo_rnj_1_instruct.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/sft_ministral3_vl.ipynb" rel="nofollow"><code>sft_ministral3_vl.ipynb</code></a></td> <td>Supervised Fine-Tuning (SFT) Ministral 3 with QLoRA using TRL on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/sft_ministral3_vl.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_ministral3_vl.ipynb" rel="nofollow"><code>grpo_ministral3_vl.ipynb</code></a></td> <td>GRPO Ministral 3 with QLoRA using TRL on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/grpo_ministral3_vl.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/sft_nemotron_3.ipynb" rel="nofollow"><code>sft_nemotron_3.ipynb</code></a></td> <td>SFT with LoRA on NVIDIA Nemotron 3 models</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/sft_nemotron_3.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/sft_trl_lora_qlora.ipynb" rel="nofollow"><code>sft_trl_lora_qlora.ipynb</code></a></td> <td>Supervised Fine-Tuning (SFT) using QLoRA on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/sft_trl_lora_qlora.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/sft_qwen_vl.ipynb" rel="nofollow"><code>sft_qwen_vl.ipynb</code></a></td> <td>Supervised Fine-Tuning (SFT) Qwen3-VL with QLoRA using TRL on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/sft_qwen_vl.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/sft_tool_calling.ipynb" rel="nofollow"><code>sft_tool_calling.ipynb</code></a></td> <td>Teaching tool calling to a model without native tool-calling support using SFT with QLoRA</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/sft_tool_calling.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_qwen3_vl.ipynb" rel="nofollow"><code>grpo_qwen3_vl.ipynb</code></a></td> <td>GRPO Qwen3-VL with QLoRA using TRL on free Colab</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/grpo_qwen3_vl.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr></tbody></table> <h3 class="relative group"><a id="openenv-notebooks" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#openenv-notebooks"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OpenEnv Notebooks</span></h3> <p data-svelte-h="svelte-ra53gr">These notebooks demonstrate how to train models with <a href="openenv">OpenEnv</a> environments using <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a>’s <code>environment_factory</code>. The BrowserGym notebook uses the lower-level <code>rollout_func</code> API instead. See the <a href="openenv">OpenEnv Integration</a> guide for more details.</p> <table data-svelte-h="svelte-15a5xeu"><thead><tr><th>Notebook</th> <th>Description</th> <th>Open in Colab</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/openenv_wordle_grpo.ipynb" rel="nofollow"><code>openenv_wordle_grpo.ipynb</code></a></td> <td>GRPO to play Wordle on an OpenEnv environment</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/openenv_wordle_grpo.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/openenv_sudoku_grpo.ipynb" rel="nofollow"><code>openenv_sudoku_grpo.ipynb</code></a></td> <td>GRPO to play Sudoku on an OpenEnv environment</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/openenv_sudoku_grpo.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr> <tr><td><a href="https://github.com/huggingface/trl/tree/main/examples/notebooks/grpo_functiongemma_browsergym_openenv.ipynb" rel="nofollow"><code>grpo_functiongemma_browsergym_openenv.ipynb</code></a></td> <td>GRPO on FunctionGemma in the BrowserGym environment</td> <td><a href="https://colab.research.google.com/github/huggingface/trl/blob/main/examples/notebooks/grpo_functiongemma_browsergym_openenv.ipynb" rel="nofollow"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a></td></tr></tbody></table> <h2 class="relative group"><a id="scripts" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#scripts"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Scripts</span></h2> <p data-svelte-h="svelte-12sunt7">Scripts are maintained in the <a href="https://github.com/huggingface/trl/blob/main/trl/scripts" rel="nofollow"><code>trl/scripts</code></a> and <a href="https://github.com/huggingface/trl/blob/main/examples/scripts" rel="nofollow"><code>examples/scripts</code></a> directories. They show how to use different trainers such as <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a>, <code>PPOTrainer</code>, <a href="/docs/trl/pr_5607/en/bema_for_reference_model#trl.DPOTrainer">DPOTrainer</a>, <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a>, and more.</p> <table data-svelte-h="svelte-1q9shcc"><thead><tr><th>File</th> <th>Description</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/bco.py" rel="nofollow"><code>examples/scripts/bco.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/kto_trainer#trl.KTOTrainer">experimental.kto.KTOTrainer</a> with the BCO loss to fine-tune a model to increase instruction-following, truthfulness, honesty, and helpfulness using the <a href="https://huggingface.co/datasets/openbmb/UltraFeedback" rel="nofollow">openbmb/UltraFeedback</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/cpo.py" rel="nofollow"><code>examples/scripts/cpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/cpo_trainer#trl.experimental.cpo.CPOTrainer">experimental.cpo.CPOTrainer</a> to fine-tune a model to increase helpfulness and harmlessness using the <a href="https://huggingface.co/datasets/Anthropic/hh-rlhf" rel="nofollow">Anthropic/hh-rlhf</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/trl/scripts/dpo.py" rel="nofollow"><code>trl/scripts/dpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/bema_for_reference_model#trl.DPOTrainer">DPOTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/dpo_vlm.py" rel="nofollow"><code>examples/scripts/dpo_vlm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/bema_for_reference_model#trl.DPOTrainer">DPOTrainer</a> to fine-tune a Vision Language Model to reduce hallucinations using the <a href="https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset" rel="nofollow">openbmb/RLAIF-V-Dataset</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/gkd.py" rel="nofollow"><code>examples/scripts/gkd.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/gkd_trainer#trl.experimental.gkd.GKDTrainer">experimental.gkd.GKDTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/trl/scripts/grpo.py" rel="nofollow"><code>trl/scripts/grpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/trl/scripts/grpo_agent.py" rel="nofollow"><code>trl/scripts/grpo_agent.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to fine-tune a model to enable agentic usage.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/grpo_vlm.py" rel="nofollow"><code>examples/scripts/grpo_vlm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to fine-tune a multimodal model for reasoning using the <a href="https://huggingface.co/datasets/lmms-lab/multimodal-open-r1-8k-verified" rel="nofollow">lmms-lab/multimodal-open-r1-8k-verified</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/gspo.py" rel="nofollow"><code>examples/scripts/gspo.py</code></a></td> <td>This script shows how to use GSPO via the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to fine-tune model for reasoning using the <a href="https://huggingface.co/datasets/AI-MO/NuminaMath-TIR" rel="nofollow">AI-MO/NuminaMath-TIR</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/gspo_vlm.py" rel="nofollow"><code>examples/scripts/gspo_vlm.py</code></a></td> <td>This script shows how to use GSPO via the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to fine-tune a multimodal model for reasoning using the <a href="https://huggingface.co/datasets/lmms-lab/multimodal-open-r1-8k-verified" rel="nofollow">lmms-lab/multimodal-open-r1-8k-verified</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/kto.py" rel="nofollow"><code>examples/scripts/kto.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/kto_trainer#trl.KTOTrainer">experimental.kto.KTOTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/mpo_vlm.py" rel="nofollow"><code>examples/scripts/mpo_vlm.py</code></a></td> <td>This script shows how to use MPO via the <a href="/docs/trl/pr_5607/en/bema_for_reference_model#trl.DPOTrainer">DPOTrainer</a> to align a model based on preferences using the <a href="https://huggingface.co/datasets/HuggingFaceH4/rlaif-v_formatted" rel="nofollow">HuggingFaceH4/rlaif-v_formatted</a> dataset and a set of loss weights with weights.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/nash_md.py" rel="nofollow"><code>examples/scripts/nash_md.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/nash_md_trainer#trl.experimental.nash_md.NashMDTrainer">experimental.nash_md.NashMDTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/nemo_gym/train_multi_environment.py" rel="nofollow"><code>examples/scripts/nemo_gym/train_multi_environment.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a> to train language models in NVIDIA NeMo-Gym environments. Supports multi-turn and tool calling environments, and multi-environment training. See the <a href="nemo_gym">NeMo-Gym Integration</a> guide for setup and usage.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/online_dpo.py" rel="nofollow"><code>examples/scripts/online_dpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/online_dpo_trainer#trl.experimental.online_dpo.OnlineDPOTrainer">experimental.online_dpo.OnlineDPOTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/online_dpo_vlm.py" rel="nofollow"><code>examples/scripts/online_dpo_vlm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/online_dpo_trainer#trl.experimental.online_dpo.OnlineDPOTrainer">experimental.online_dpo.OnlineDPOTrainer</a> to fine-tune a a Vision Language Model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/orpo.py" rel="nofollow"><code>examples/scripts/orpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/orpo_trainer#trl.experimental.orpo.ORPOTrainer">experimental.orpo.ORPOTrainer</a> to fine-tune a model to increase helpfulness and harmlessness using the <a href="https://huggingface.co/datasets/Anthropic/hh-rlhf" rel="nofollow">Anthropic/hh-rlhf</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/ppo/ppo.py" rel="nofollow"><code>examples/scripts/ppo/ppo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/ppo_trainer#trl.experimental.ppo.PPOTrainer">experimental.ppo.PPOTrainer</a> to fine-tune a model to improve its ability to continue text with positive sentiment or physically descriptive language.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/ppo/ppo_tldr.py" rel="nofollow"><code>examples/scripts/ppo/ppo_tldr.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/ppo_trainer#trl.experimental.ppo.PPOTrainer">experimental.ppo.PPOTrainer</a> to fine-tune a model to improve its ability to generate TL;DR summaries.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/prm.py" rel="nofollow"><code>examples/scripts/prm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/prm_trainer#trl.experimental.prm.PRMTrainer">experimental.prm.PRMTrainer</a> to fine-tune a Process-supervised Reward Model (PRM).</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/reward_modeling.py" rel="nofollow"><code>examples/scripts/reward_modeling.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/reward_trainer#trl.RewardTrainer">RewardTrainer</a> to train an Outcome Reward Model (ORM) on your own dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/rloo.py" rel="nofollow"><code>examples/scripts/rloo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/rloo_trainer#trl.RLOOTrainer">RLOOTrainer</a> to fine-tune a model to improve its ability to solve math questions.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/trl/scripts/sft.py" rel="nofollow"><code>trl/scripts/sft.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_gemma3.py" rel="nofollow"><code>examples/scripts/sft_gemma3.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a Gemma 3 model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_nemotron_3.py" rel="nofollow"><code>examples/scripts/sft_nemotron_3.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune an NVIDIA Nemotron 3 model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_tiny_aya_tool_calling.py" rel="nofollow"><code>examples/scripts/sft_tiny_aya_tool_calling.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to teach tool calling to a model without native tool-calling support using the <a href="https://huggingface.co/datasets/bebechien/SimpleToolCalling" rel="nofollow">bebechien/SimpleToolCalling</a> dataset.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_video_llm.py" rel="nofollow"><code>examples/scripts/sft_video_llm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a Video Language Model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm.py" rel="nofollow"><code>examples/scripts/sft_vlm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a Vision Language Model in a chat setting. The script has only been tested with <a href="https://huggingface.co/llava-hf/llava-1.5-7b-hf" rel="nofollow">LLaVA 1.5</a>, <a href="https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf" rel="nofollow">LLaVA 1.6</a>, and <a href="https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct" rel="nofollow">Llama-3.2-11B-Vision-Instruct</a> models, so users may see unexpected behaviour in other model architectures.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm_gemma3.py" rel="nofollow"><code>examples/scripts/sft_vlm_gemma3.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a Gemma 3 model on vision to text tasks.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/sft_vlm_smol_vlm.py" rel="nofollow"><code>examples/scripts/sft_vlm_smol_vlm.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/sft_trainer#trl.SFTTrainer">SFTTrainer</a> to fine-tune a SmolVLM model.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/xpo.py" rel="nofollow"><code>examples/scripts/xpo.py</code></a></td> <td>This script shows how to use the <a href="/docs/trl/pr_5607/en/xpo_trainer#trl.experimental.xpo.XPOTrainer">experimental.xpo.XPOTrainer</a> to fine-tune a model.</td></tr></tbody></table> <h3 class="relative group"><a id="openenv-scripts" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#openenv-scripts"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>OpenEnv Scripts</span></h3> <p data-svelte-h="svelte-xzepzw">These scripts demonstrate how to train models with <a href="openenv">OpenEnv</a> environments using <a href="/docs/trl/pr_5607/en/gspo_token#trl.GRPOTrainer">GRPOTrainer</a>’s <code>environment_factory</code>. See the <a href="openenv">OpenEnv Integration</a> guide for more details.</p> <table data-svelte-h="svelte-cs0cqt"><thead><tr><th>File</th> <th>Description</th></tr></thead> <tbody><tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/echo.py" rel="nofollow"><code>examples/scripts/openenv/echo.py</code></a></td> <td>GRPO training with the Echo environment (minimal example).</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/wordle.py" rel="nofollow"><code>examples/scripts/openenv/wordle.py</code></a></td> <td>GRPO training with the Wordle (TextArena) environment.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/catch.py" rel="nofollow"><code>examples/scripts/openenv/catch.py</code></a></td> <td>GRPO training with the Catch (OpenSpiel) environment.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/sudoku.py" rel="nofollow"><code>examples/scripts/openenv/sudoku.py</code></a></td> <td>GRPO training with the Sudoku environment.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/multi_env.py" rel="nofollow"><code>examples/scripts/openenv/multi_env.py</code></a></td> <td>Multi-environment GRPO training: Wordle + Catch in the same training run.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/browsergym.py" rel="nofollow"><code>examples/scripts/openenv/browsergym.py</code></a></td> <td>GRPO training with the BrowserGym environment for VLMs.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/browsergym_llm.py" rel="nofollow"><code>examples/scripts/openenv/browsergym_llm.py</code></a></td> <td>GRPO training with the BrowserGym environment for LLMs.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/carla.py" rel="nofollow"><code>examples/scripts/openenv/carla.py</code></a></td> <td>GRPO training with the CARLA environment for autonomous driving.</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/carla_vlm.py" rel="nofollow"><code>examples/scripts/openenv/carla_vlm.py</code></a></td> <td>GRPO training with CARLA for VLMs with multimodal tool responses (camera images).</td></tr> <tr><td><a href="https://github.com/huggingface/trl/blob/main/examples/scripts/openenv/carla_vlm_gemma.py" rel="nofollow"><code>examples/scripts/openenv/carla_vlm_gemma.py</code></a></td> <td>GRPO training with CARLA for Gemma 4 with multimodal tool responses (camera images).</td></tr></tbody></table> <h2 class="relative group"><a id="distributed-training-for-scripts" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#distributed-training-for-scripts"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Distributed Training (for scripts)</span></h2> <p data-svelte-h="svelte-1b16zas">You can run scripts on multiple GPUs with 🤗 Accelerate:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-142p8w8">For DeepSpeed ZeRO-{1,2,3}:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero{1,2,3}.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-cgbim7">Adjust <code>NUM_GPUS</code> and <code>--all_arguments_of_the_script</code> as needed.</p> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/trl/blob/main/docs/source/example_overview.md" target="_blank"><svg class="mr-1" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M31,16l-7,7l-1.41-1.41L28.17,16l-5.58-5.59L24,9l7,7z"></path><path d="M1,16l7-7l1.41,1.41L3.83,16l5.58,5.59L8,23l-7-7z"></path><path d="M12.419,25.484L17.639,6.552l1.932,0.518L14.351,26.002z"></path></svg> <span data-svelte-h="svelte-zjs2n5"><span class="underline">Update</span> on GitHub</span></a> <p></p> | |
| <script> | |
| { | |
| __sveltekit_1hqaf25 = { | |
| assets: "/docs/trl/pr_5607/en", | |
| base: "/docs/trl/pr_5607/en", | |
| env: {} | |
| }; | |
| const element = document.currentScript.parentElement; | |
| const data = [null,null]; | |
| Promise.all([ | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/start.151d81bd.js"), | |
| import("/docs/trl/pr_5607/en/_app/immutable/entry/app.3d9a91c0.js") | |
| ]).then(([kit, app]) => { | |
| kit.start(app, element, { | |
| node_ids: [0, 17], | |
| data, | |
| form: null, | |
| error: null | |
| }); | |
| }); | |
| } | |
| </script> | |
Xet Storage Details
- Size:
- 42.8 kB
- Xet hash:
- b42c06d25af7df7284c89bd4f19ed7f88bec5951712683656262b8c655d5a1f9
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.