Buckets:

rtrm's picture
download
raw
109 kB
<meta charset="utf-8" /><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Optimized Inference Deployment&quot;,&quot;local&quot;:&quot;optimized-inference-deployment&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Framework Selection Guide&quot;,&quot;local&quot;:&quot;framework-selection-guide&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Memory Management and Performance&quot;,&quot;local&quot;:&quot;memory-management-and-performance&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Deployment and Integration&quot;,&quot;local&quot;:&quot;deployment-and-integration&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Getting Started&quot;,&quot;local&quot;:&quot;getting-started&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Installation and Basic Setup&quot;,&quot;local&quot;:&quot;installation-and-basic-setup&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Basic Text Generation&quot;,&quot;local&quot;:&quot;basic-text-generation&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Advanced Generation Control&quot;,&quot;local&quot;:&quot;advanced-generation-control&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Token Selection and Sampling&quot;,&quot;local&quot;:&quot;token-selection-and-sampling&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Controlling Repetition&quot;,&quot;local&quot;:&quot;controlling-repetition&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Length Control and Stop Sequences&quot;,&quot;local&quot;:&quot;length-control-and-stop-sequences&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Memory Management&quot;,&quot;local&quot;:&quot;memory-management&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Resources&quot;,&quot;local&quot;:&quot;resources&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2}],&quot;depth&quot;:1}">
<link href="/docs/course/pr_1069/en/_app/immutable/assets/0.e3b0c442.css" rel="modulepreload">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/entry/start.c5306bb2.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/scheduler.37c15a92.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/singletons.bc78d867.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/index.18351ede.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/paths.76894643.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/entry/app.4264f5f8.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/index.7cb9c9b8.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/nodes/0.f5347c47.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/each.e59479a4.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/nodes/43.ec377b59.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/Tip.d10b3fc9.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/CodeBlock.abae2786.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/getInferenceSnippets.f9350a3f.js">
<link rel="modulepreload" href="/docs/course/pr_1069/en/_app/immutable/chunks/stores.cb4752a8.js"><!-- HEAD_svelte-u9bgzb_START --><meta name="hf:doc:metadata" content="{&quot;title&quot;:&quot;Optimized Inference Deployment&quot;,&quot;local&quot;:&quot;optimized-inference-deployment&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Framework Selection Guide&quot;,&quot;local&quot;:&quot;framework-selection-guide&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Memory Management and Performance&quot;,&quot;local&quot;:&quot;memory-management-and-performance&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Deployment and Integration&quot;,&quot;local&quot;:&quot;deployment-and-integration&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Getting Started&quot;,&quot;local&quot;:&quot;getting-started&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Installation and Basic Setup&quot;,&quot;local&quot;:&quot;installation-and-basic-setup&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Basic Text Generation&quot;,&quot;local&quot;:&quot;basic-text-generation&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Advanced Generation Control&quot;,&quot;local&quot;:&quot;advanced-generation-control&quot;,&quot;sections&quot;:[{&quot;title&quot;:&quot;Token Selection and Sampling&quot;,&quot;local&quot;:&quot;token-selection-and-sampling&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Controlling Repetition&quot;,&quot;local&quot;:&quot;controlling-repetition&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3},{&quot;title&quot;:&quot;Length Control and Stop Sequences&quot;,&quot;local&quot;:&quot;length-control-and-stop-sequences&quot;,&quot;sections&quot;:[],&quot;depth&quot;:3}],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Memory Management&quot;,&quot;local&quot;:&quot;memory-management&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2},{&quot;title&quot;:&quot;Resources&quot;,&quot;local&quot;:&quot;resources&quot;,&quot;sections&quot;:[],&quot;depth&quot;:2}],&quot;depth&quot;:1}"><!-- HEAD_svelte-u9bgzb_END --> <p></p> <h1 class="relative group"><a id="optimized-inference-deployment" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#optimized-inference-deployment"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Optimized Inference Deployment</span></h1> <p data-svelte-h="svelte-1c3ix4n">In this section, we’ll explore advanced frameworks for optimizing LLM deployments: Text Generation Inference (TGI), vLLM, and llama.cpp. These applications are primarily used in production environments to serve LLMs to users. This section focuses on how to deploy these frameworks in production rather than how to use them for inference on a single machine.</p> <p data-svelte-h="svelte-1xh6xvz">We’ll cover how these tools maximize inference efficiency and simplify production deployments of Large Language Models.</p> <h2 class="relative group"><a id="framework-selection-guide" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#framework-selection-guide"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Framework Selection Guide</span></h2> <p data-svelte-h="svelte-xxm0i8">TGI, vLLM, and llama.cpp serve similar purposes but have distinct characteristics that make them better suited for different use cases. Let’s look at the key differences between them, focusing on performance and integration.</p> <h3 class="relative group"><a id="memory-management-and-performance" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#memory-management-and-performance"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Memory Management and Performance</span></h3> <p data-svelte-h="svelte-ms8d3b"><strong>TGI</strong> is designed to be stable and predictable in production, using fixed sequence lengths to keep memory usage consistent. TGI manages memory using Flash Attention 2 and continuous batching techniques. This means it can process attention calculations very efficiently and keep the GPU busy by constantly feeding it work. The system can move parts of the model between CPU and GPU when needed, which helps handle larger models.</p> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/flash-attn.png" alt="Flash Attention"> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p data-svelte-h="svelte-1rkqssk">Flash Attention is a technique that optimizes the attention mechanism in transformer models by addressing memory bandwidth bottlenecks. As discussed earlier in <a href="/course/chapter1/8">Chapter 1.8</a>, the attention mechanism has quadratic complexity and memory usage, making it inefficient for long sequences.</p> <p data-svelte-h="svelte-jnwo3v">The key innovation is in how it manages memory transfers between High Bandwidth Memory (HBM) and faster SRAM cache. Traditional attention repeatedly transfers data between HBM and SRAM, creating bottlenecks by leaving the GPU idle. Flash Attention loads data once into SRAM and performs all calculations there, minimizing expensive memory transfers.</p> <p data-svelte-h="svelte-9afbfq">While the benefits are most significant during training, Flash Attention’s reduced VRAM usage and improved efficiency make it valuable for inference as well, enabling faster and more scalable LLM serving.</p></div> <p data-svelte-h="svelte-11etipa"><strong>vLLM</strong> takes a different approach by using PagedAttention. Just like how a computer manages its memory in pages, vLLM splits the model’s memory into smaller blocks. This clever system means it can handle different-sized requests more flexibly and doesn’t waste memory space. It’s particularly good at sharing memory between different requests and reduces memory fragmentation, which makes the whole system more efficient.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p data-svelte-h="svelte-121sini">PagedAttention is a technique that addresses another critical bottleneck in LLM inference: KV cache memory management. As discussed in <a href="/course/chapter1/8">Chapter 1.8</a>, during text generation, the model stores attention keys and values (KV cache) for each generated token to reduce redundant computations. The KV cache can become enormous, especially with long sequences or multiple concurrent requests.</p> <p data-svelte-h="svelte-1dh20ya">vLLM’s key innovation lies in how it manages this cache:</p> <ol data-svelte-h="svelte-af7t9r"><li><strong>Memory Paging</strong>: Instead of treating the KV cache as one large block, it’s divided into fixed-size “pages” (similar to virtual memory in operating systems).</li> <li><strong>Non-contiguous Storage</strong>: Pages don’t need to be stored contiguously in GPU memory, allowing for more flexible memory allocation.</li> <li><strong>Page Table Management</strong>: A page table tracks which pages belong to which sequence, enabling efficient lookup and access.</li> <li><strong>Memory Sharing</strong>: For operations like parallel sampling, pages storing the KV cache for the prompt can be shared across multiple sequences.</li></ol> <p data-svelte-h="svelte-15futu5">The PagedAttention approach can lead to up to 24x higher throughput compared to traditional methods, making it a game-changer for production LLM deployments. If you want to go really deep into how PagedAttention works, you can read the <a href="https://docs.vllm.ai/en/latest/design/kernel/paged_attention.html" rel="nofollow">the guide from the vLLM documentation</a>.</p></div> <p data-svelte-h="svelte-19a6l8z"><strong>llama.cpp</strong> is a highly optimized C/C++ implementation originally designed for running LLaMA models on consumer hardware. It focuses on CPU efficiency with optional GPU acceleration and is ideal for resource-constrained environments. llama.cpp uses quantization techniques to reduce model size and memory requirements while maintaining good performance. It implements optimized kernels for various CPU architectures and supports basic KV cache management for efficient token generation.</p> <div class="course-tip bg-gradient-to-br dark:bg-gradient-to-r before:border-green-500 dark:before:border-green-800 from-green-50 dark:from-gray-900 to-white dark:to-gray-950 border border-green-50 text-green-700 dark:text-gray-400"><p data-svelte-h="svelte-7sflxc">Quantization in llama.cpp reduces the precision of model weights from 32-bit or 16-bit floating point to lower precision formats like 8-bit integers (INT8), 4-bit, or even lower. This significantly reduces memory usage and improves inference speed with minimal quality loss.</p> <p data-svelte-h="svelte-wl8vzm">Key quantization features in llama.cpp include:</p> <ol data-svelte-h="svelte-m0ejls"><li><strong>Multiple Quantization Levels</strong>: Supports 8-bit, 4-bit, 3-bit, and even 2-bit quantization</li> <li><strong>GGML/GGUF Format</strong>: Uses custom tensor formats optimized for quantized inference</li> <li><strong>Mixed Precision</strong>: Can apply different quantization levels to different parts of the model</li> <li><strong>Hardware-Specific Optimizations</strong>: Includes optimized code paths for various CPU architectures (AVX2, AVX-512, NEON)</li></ol> <p data-svelte-h="svelte-gau5ug">This approach enables running billion-parameter models on consumer hardware with limited memory, making it perfect for local deployments and edge devices.</p></div> <h3 class="relative group"><a id="deployment-and-integration" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#deployment-and-integration"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Deployment and Integration</span></h3> <p data-svelte-h="svelte-o1p1av">Let’s move on to the deployment and integration differences between the frameworks.</p> <p data-svelte-h="svelte-l34lqu"><strong>TGI</strong> excels in enterprise-level deployment with its production-ready features. It comes with built-in Kubernetes support and includes everything you need for running in production, like monitoring through Prometheus and Grafana, automatic scaling, and comprehensive safety features. The system also includes enterprise-grade logging and various protective measures like content filtering and rate limiting to keep your deployment secure and stable.</p> <p data-svelte-h="svelte-1isy26s"><strong>vLLM</strong> takes a more flexible, developer-friendly approach to deployment. It’s built with Python at its core and can easily replace OpenAI’s API in your existing applications. The framework focuses on delivering raw performance and can be customized to fit your specific needs. It works particularly well with Ray for managing clusters, making it a great choice when you need high performance and adaptability.</p> <p data-svelte-h="svelte-1gsq6x1"><strong>llama.cpp</strong> prioritizes simplicity and portability. Its server implementation is lightweight and can run on a wide range of hardware, from powerful servers to consumer laptops and even some high-end mobile devices. With minimal dependencies and a simple C/C++ core, it’s easy to deploy in environments where installing Python frameworks would be challenging. The server provides an OpenAI-compatible API while maintaining a much smaller resource footprint than other solutions.</p> <h2 class="relative group"><a id="getting-started" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#getting-started"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Getting Started</span></h2> <p data-svelte-h="svelte-1k0ewbc">Let’s explore how to use these frameworks for deploying LLMs, starting with installation and basic setup.</p> <h3 class="relative group"><a id="installation-and-basic-setup" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#installation-and-basic-setup"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Installation and Basic Setup</span></h3> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<p data-svelte-h="svelte-1xscn0n">TGI is easy to install and use, with deep integration into the Hugging Face ecosystem.</p> <p data-svelte-h="svelte-1g20rmd">First, launch the TGI server using Docker:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->docker run --gpus all \
--shm-size 1g \
-p 8080:80 \
-v ~/.cache/huggingface:/data \
ghcr.io/huggingface/text-generation-inference:latest \
--model-id HuggingFaceTB/SmolLM2-360M-Instruct<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-168g8u0">Then interact with it using Hugging Face’s InferenceClient:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
<span class="hljs-comment"># Initialize client pointing to TGI endpoint</span>
client = InferenceClient(
model=<span class="hljs-string">&quot;http://localhost:8080&quot;</span>, <span class="hljs-comment"># URL to the TGI server</span>
)
<span class="hljs-comment"># Text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Tell me a story&quot;</span>,
max_new_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
details=<span class="hljs-literal">True</span>,
stop_sequences=[],
)
<span class="hljs-built_in">print</span>(response.generated_text)
<span class="hljs-comment"># For chat format</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1vrpxju">Alternatively, you can use the OpenAI client:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
<span class="hljs-comment"># Initialize client pointing to TGI endpoint</span>
client = OpenAI(
base_url=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>, <span class="hljs-comment"># Make sure to include /v1</span>
api_key=<span class="hljs-string">&quot;not-needed&quot;</span>, <span class="hljs-comment"># TGI doesn&#x27;t require an API key by default</span>
)
<span class="hljs-comment"># Chat completion</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-360M-Instruct&quot;</span>,
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<p data-svelte-h="svelte-1uzfhnr">llama.cpp is easy to install and use, requiring minimal dependencies and supporting both CPU and GPU inference.</p> <p data-svelte-h="svelte-1xrr3cs">First, install and build llama.cpp:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Clone the repository</span>
git <span class="hljs-built_in">clone</span> https://github.com/ggerganov/llama.cpp
<span class="hljs-built_in">cd</span> llama.cpp
<span class="hljs-comment"># Build the project</span>
make
<span class="hljs-comment"># Download the SmolLM2-1.7B-Instruct-GGUF model</span>
curl -L -O https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct-GGUF/resolve/main/smollm2-1.7b-instruct.Q4_K_M.gguf<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-9xyw1x">Then, launch the server (with OpenAI API compatibility):</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Start the server</span>
./server \
-m smollm2-1.7b-instruct.Q4_K_M.gguf \
--host 0.0.0.0 \
--port 8080 \
-c 4096 \
--n-gpu-layers 0 <span class="hljs-comment"># Set to a higher number to use GPU</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-5g6wzi">Interact with the server using Hugging Face’s InferenceClient:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
<span class="hljs-comment"># Initialize client pointing to llama.cpp server</span>
client = InferenceClient(
model=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>, <span class="hljs-comment"># URL to the llama.cpp server</span>
token=<span class="hljs-string">&quot;sk-no-key-required&quot;</span>, <span class="hljs-comment"># llama.cpp server requires this placeholder</span>
)
<span class="hljs-comment"># Text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Tell me a story&quot;</span>,
max_new_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
details=<span class="hljs-literal">True</span>,
)
<span class="hljs-built_in">print</span>(response.generated_text)
<span class="hljs-comment"># For chat format</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1vrpxju">Alternatively, you can use the OpenAI client:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
<span class="hljs-comment"># Initialize client pointing to llama.cpp server</span>
client = OpenAI(
base_url=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>,
api_key=<span class="hljs-string">&quot;sk-no-key-required&quot;</span>, <span class="hljs-comment"># llama.cpp server requires this placeholder</span>
)
<span class="hljs-comment"># Chat completion</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;smollm2-1.7b-instruct&quot;</span>, <span class="hljs-comment"># Model identifier can be anything as server only loads one model</span>
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<p data-svelte-h="svelte-1sx76te">vLLM is easy to install and use, with both OpenAI API compatibility and a native Python interface.</p> <p data-svelte-h="svelte-9ezhmp">First, launch the vLLM OpenAI-compatible server:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->python -m vllm.entrypoints.openai.api_server \
--model HuggingFaceTB/SmolLM2-360M-Instruct \
--host 0.0.0.0 \
--port 8000<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-168g8u0">Then interact with it using Hugging Face’s InferenceClient:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
<span class="hljs-comment"># Initialize client pointing to vLLM endpoint</span>
client = InferenceClient(
model=<span class="hljs-string">&quot;http://localhost:8000/v1&quot;</span>, <span class="hljs-comment"># URL to the vLLM server</span>
)
<span class="hljs-comment"># Text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Tell me a story&quot;</span>,
max_new_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
details=<span class="hljs-literal">True</span>,
)
<span class="hljs-built_in">print</span>(response.generated_text)
<span class="hljs-comment"># For chat format</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1vrpxju">Alternatively, you can use the OpenAI client:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
<span class="hljs-comment"># Initialize client pointing to vLLM endpoint</span>
client = OpenAI(
base_url=<span class="hljs-string">&quot;http://localhost:8000/v1&quot;</span>,
api_key=<span class="hljs-string">&quot;not-needed&quot;</span>, <span class="hljs-comment"># vLLM doesn&#x27;t require an API key by default</span>
)
<span class="hljs-comment"># Chat completion</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-360M-Instruct&quot;</span>,
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a helpful assistant.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Tell me a story&quot;</span>},
],
max_tokens=<span class="hljs-number">100</span>,
temperature=<span class="hljs-number">0.7</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h3 class="relative group"><a id="basic-text-generation" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#basic-text-generation"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Basic Text Generation</span></h3> <p data-svelte-h="svelte-yy57k8">Let’s look at examples of text generation with the frameworks:</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<p data-svelte-h="svelte-p4nmid">First, deploy TGI with advanced parameters:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->docker run --gpus all \
--shm-size 1g \
-p 8080:80 \
-v ~/.cache/huggingface:/data \
ghcr.io/huggingface/text-generation-inference:latest \
--model-id HuggingFaceTB/SmolLM2-360M-Instruct \
--max-total-tokens 4096 \
--max-input-length 3072 \
--max-batch-total-tokens 8192 \
--waiting-served-ratio 1.2<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-8cq2xt">Use the InferenceClient for flexible text generation:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
client = InferenceClient(model=<span class="hljs-string">&quot;http://localhost:8080&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>,
max_tokens=<span class="hljs-number">200</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)
<span class="hljs-comment"># Raw text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Write a creative story about space exploration&quot;</span>,
max_new_tokens=<span class="hljs-number">200</span>,
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
repetition_penalty=<span class="hljs-number">1.1</span>,
do_sample=<span class="hljs-literal">True</span>,
details=<span class="hljs-literal">True</span>,
)
<span class="hljs-built_in">print</span>(response.generated_text)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-gwduza">Or use the OpenAI client:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
client = OpenAI(base_url=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>, api_key=<span class="hljs-string">&quot;not-needed&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-360M-Instruct&quot;</span>,
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<p data-svelte-h="svelte-b43ol">For llama.cpp, you can set advanced parameters when launching the server:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->./server \
-m smollm2-1.7b-instruct.Q4_K_M.gguf \
--host 0.0.0.0 \
--port 8080 \
-c 4096 \ <span class="hljs-comment"># Context size</span>
--threads 8 \ <span class="hljs-comment"># CPU threads to use</span>
--batch-size 512 \ <span class="hljs-comment"># Batch size for prompt evaluation</span>
--n-gpu-layers 0 <span class="hljs-comment"># GPU layers (0 = CPU only)</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-u8i4ra">Use the InferenceClient:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
client = InferenceClient(model=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>, token=<span class="hljs-string">&quot;sk-no-key-required&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>,
max_tokens=<span class="hljs-number">200</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)
<span class="hljs-comment"># For direct text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Write a creative story about space exploration&quot;</span>,
max_new_tokens=<span class="hljs-number">200</span>,
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
repetition_penalty=<span class="hljs-number">1.1</span>,
details=<span class="hljs-literal">True</span>,
)
<span class="hljs-built_in">print</span>(response.generated_text)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1jxmijw">Or use the OpenAI client for generation with control over the sampling parameters:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
client = OpenAI(base_url=<span class="hljs-string">&quot;http://localhost:8080/v1&quot;</span>, api_key=<span class="hljs-string">&quot;sk-no-key-required&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;smollm2-1.7b-instruct&quot;</span>,
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
top_p=<span class="hljs-number">0.95</span>, <span class="hljs-comment"># Nucleus sampling probability</span>
frequency_penalty=<span class="hljs-number">0.5</span>, <span class="hljs-comment"># Reduce repetition of frequent tokens</span>
presence_penalty=<span class="hljs-number">0.5</span>, <span class="hljs-comment"># Reduce repetition by penalizing tokens already present</span>
max_tokens=<span class="hljs-number">200</span>, <span class="hljs-comment"># Maximum generation length</span>
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1yadfem">You can also use llama.cpp’s native library for even more control:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Using llama-cpp-python package for direct model access</span>
<span class="hljs-keyword">from</span> llama_cpp <span class="hljs-keyword">import</span> Llama
<span class="hljs-comment"># Load the model</span>
llm = Llama(
model_path=<span class="hljs-string">&quot;smollm2-1.7b-instruct.Q4_K_M.gguf&quot;</span>,
n_ctx=<span class="hljs-number">4096</span>, <span class="hljs-comment"># Context window size</span>
n_threads=<span class="hljs-number">8</span>, <span class="hljs-comment"># CPU threads</span>
n_gpu_layers=<span class="hljs-number">0</span>, <span class="hljs-comment"># GPU layers (0 = CPU only)</span>
)
<span class="hljs-comment"># Format prompt according to the model&#x27;s expected format</span>
prompt = <span class="hljs-string">&quot;&quot;&quot;&lt;|im_start|&gt;system
You are a creative storyteller.
&lt;|im_end|&gt;
&lt;|im_start|&gt;user
Write a creative story
&lt;|im_end|&gt;
&lt;|im_start|&gt;assistant
&quot;&quot;&quot;</span>
<span class="hljs-comment"># Generate response with precise parameter control</span>
output = llm(
prompt,
max_tokens=<span class="hljs-number">200</span>,
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
frequency_penalty=<span class="hljs-number">0.5</span>,
presence_penalty=<span class="hljs-number">0.5</span>,
stop=[<span class="hljs-string">&quot;&lt;|im_end|&gt;&quot;</span>],
)
<span class="hljs-built_in">print</span>(output[<span class="hljs-string">&quot;choices&quot;</span>][<span class="hljs-number">0</span>][<span class="hljs-string">&quot;text&quot;</span>])<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<p data-svelte-h="svelte-jbmhsw">For advanced usage with vLLM, you can use the InferenceClient:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> huggingface_hub <span class="hljs-keyword">import</span> InferenceClient
client = InferenceClient(model=<span class="hljs-string">&quot;http://localhost:8000/v1&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat_completion(
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>,
max_tokens=<span class="hljs-number">200</span>,
top_p=<span class="hljs-number">0.95</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)
<span class="hljs-comment"># For direct text generation</span>
response = client.text_generation(
<span class="hljs-string">&quot;Write a creative story about space exploration&quot;</span>,
max_new_tokens=<span class="hljs-number">200</span>,
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
details=<span class="hljs-literal">True</span>,
)
<span class="hljs-built_in">print</span>(response.generated_text)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-m9xqnx">You can also use the OpenAI client:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> openai <span class="hljs-keyword">import</span> OpenAI
client = OpenAI(base_url=<span class="hljs-string">&quot;http://localhost:8000/v1&quot;</span>, api_key=<span class="hljs-string">&quot;not-needed&quot;</span>)
<span class="hljs-comment"># Advanced parameters example</span>
response = client.chat.completions.create(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-360M-Instruct&quot;</span>,
messages=[
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
],
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
max_tokens=<span class="hljs-number">200</span>,
)
<span class="hljs-built_in">print</span>(response.choices[<span class="hljs-number">0</span>].message.content)<!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1xs95kf">vLLM also provides a native Python interface with fine-grained control:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> vllm <span class="hljs-keyword">import</span> LLM, SamplingParams
<span class="hljs-comment"># Initialize the model with advanced parameters</span>
llm = LLM(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-360M-Instruct&quot;</span>,
gpu_memory_utilization=<span class="hljs-number">0.85</span>,
max_num_batched_tokens=<span class="hljs-number">8192</span>,
max_num_seqs=<span class="hljs-number">256</span>,
block_size=<span class="hljs-number">16</span>,
)
<span class="hljs-comment"># Configure sampling parameters</span>
sampling_params = SamplingParams(
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
top_p=<span class="hljs-number">0.95</span>, <span class="hljs-comment"># Consider top 95% probability mass</span>
max_tokens=<span class="hljs-number">100</span>, <span class="hljs-comment"># Maximum length</span>
presence_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Reduce repetition</span>
frequency_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Reduce repetition</span>
stop=[<span class="hljs-string">&quot;\n\n&quot;</span>, <span class="hljs-string">&quot;###&quot;</span>], <span class="hljs-comment"># Stop sequences</span>
)
<span class="hljs-comment"># Generate text</span>
prompt = <span class="hljs-string">&quot;Write a creative story&quot;</span>
outputs = llm.generate(prompt, sampling_params)
<span class="hljs-built_in">print</span>(outputs[<span class="hljs-number">0</span>].outputs[<span class="hljs-number">0</span>].text)
<span class="hljs-comment"># For chat-style interactions</span>
chat_prompt = [
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;system&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;You are a creative storyteller.&quot;</span>},
{<span class="hljs-string">&quot;role&quot;</span>: <span class="hljs-string">&quot;user&quot;</span>, <span class="hljs-string">&quot;content&quot;</span>: <span class="hljs-string">&quot;Write a creative story&quot;</span>},
]
formatted_prompt = llm.get_chat_template()(chat_prompt) <span class="hljs-comment"># Uses model&#x27;s chat template</span>
outputs = llm.generate(formatted_prompt, sampling_params)
<span class="hljs-built_in">print</span>(outputs[<span class="hljs-number">0</span>].outputs[<span class="hljs-number">0</span>].text)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h2 class="relative group"><a id="advanced-generation-control" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#advanced-generation-control"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Advanced Generation Control</span></h2> <h3 class="relative group"><a id="token-selection-and-sampling" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#token-selection-and-sampling"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Token Selection and Sampling</span></h3> <p data-svelte-h="svelte-1jdaj55">The process of generating text involves selecting the next token at each step. This selection process can be controlled through various parameters:</p> <ol data-svelte-h="svelte-1j60hyx"><li><strong>Raw Logits</strong>: The initial output probabilities for each token</li> <li><strong>Temperature</strong>: Controls randomness in selection (higher = more creative)</li> <li><strong>Top-p (Nucleus) Sampling</strong>: Filters to top tokens making up X% of probability mass</li> <li><strong>Top-k Filtering</strong>: Limits selection to k most likely tokens</li></ol> <p data-svelte-h="svelte-nakymk">Here’s how to configure these parameters:</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->client.generate(
<span class="hljs-string">&quot;Write a creative story&quot;</span>,
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
top_p=<span class="hljs-number">0.95</span>, <span class="hljs-comment"># Consider top 95% probability mass</span>
top_k=<span class="hljs-number">50</span>, <span class="hljs-comment"># Consider top 50 tokens</span>
max_new_tokens=<span class="hljs-number">100</span>, <span class="hljs-comment"># Maximum length</span>
repetition_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Reduce repetition</span>
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Via OpenAI API compatibility</span>
response = client.completions.create(
model=<span class="hljs-string">&quot;smollm2-1.7b-instruct&quot;</span>, <span class="hljs-comment"># Model name (can be any string for llama.cpp server)</span>
prompt=<span class="hljs-string">&quot;Write a creative story&quot;</span>,
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
top_p=<span class="hljs-number">0.95</span>, <span class="hljs-comment"># Consider top 95% probability mass</span>
frequency_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Reduce repetition</span>
presence_penalty=<span class="hljs-number">0.1</span>, <span class="hljs-comment"># Reduce repetition</span>
max_tokens=<span class="hljs-number">100</span>, <span class="hljs-comment"># Maximum length</span>
)
<span class="hljs-comment"># Via llama-cpp-python direct access</span>
output = llm(
<span class="hljs-string">&quot;Write a creative story&quot;</span>,
temperature=<span class="hljs-number">0.8</span>,
top_p=<span class="hljs-number">0.95</span>,
top_k=<span class="hljs-number">50</span>,
max_tokens=<span class="hljs-number">100</span>,
repeat_penalty=<span class="hljs-number">1.1</span>,
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->params = SamplingParams(
temperature=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Higher for more creativity</span>
top_p=<span class="hljs-number">0.95</span>, <span class="hljs-comment"># Consider top 95% probability mass</span>
top_k=<span class="hljs-number">50</span>, <span class="hljs-comment"># Consider top 50 tokens</span>
max_tokens=<span class="hljs-number">100</span>, <span class="hljs-comment"># Maximum length</span>
presence_penalty=<span class="hljs-number">0.1</span>, <span class="hljs-comment"># Reduce repetition</span>
)
llm.generate(<span class="hljs-string">&quot;Write a creative story&quot;</span>, sampling_params=params)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h3 class="relative group"><a id="controlling-repetition" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#controlling-repetition"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Controlling Repetition</span></h3> <p data-svelte-h="svelte-euetng">Both frameworks provide ways to prevent repetitive text generation:</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->client.generate(
<span class="hljs-string">&quot;Write a varied text&quot;</span>,
repetition_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Penalize repeated tokens</span>
no_repeat_ngram_size=<span class="hljs-number">3</span>, <span class="hljs-comment"># Prevent 3-gram repetition</span>
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Via OpenAI API</span>
response = client.completions.create(
model=<span class="hljs-string">&quot;smollm2-1.7b-instruct&quot;</span>,
prompt=<span class="hljs-string">&quot;Write a varied text&quot;</span>,
frequency_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Penalize frequent tokens</span>
presence_penalty=<span class="hljs-number">0.8</span>, <span class="hljs-comment"># Penalize tokens already present</span>
)
<span class="hljs-comment"># Via direct library</span>
output = llm(
<span class="hljs-string">&quot;Write a varied text&quot;</span>,
repeat_penalty=<span class="hljs-number">1.1</span>, <span class="hljs-comment"># Penalize repeated tokens</span>
frequency_penalty=<span class="hljs-number">0.5</span>, <span class="hljs-comment"># Additional frequency penalty</span>
presence_penalty=<span class="hljs-number">0.5</span>, <span class="hljs-comment"># Additional presence penalty</span>
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->params = SamplingParams(
presence_penalty=<span class="hljs-number">0.1</span>, <span class="hljs-comment"># Penalize token presence</span>
frequency_penalty=<span class="hljs-number">0.1</span>, <span class="hljs-comment"># Penalize token frequency</span>
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h3 class="relative group"><a id="length-control-and-stop-sequences" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#length-control-and-stop-sequences"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Length Control and Stop Sequences</span></h3> <p data-svelte-h="svelte-1ut8dfv">You can control generation length and specify when to stop:</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->client.generate(
<span class="hljs-string">&quot;Generate a short paragraph&quot;</span>,
max_new_tokens=<span class="hljs-number">100</span>,
min_new_tokens=<span class="hljs-number">10</span>,
stop_sequences=[<span class="hljs-string">&quot;\n\n&quot;</span>, <span class="hljs-string">&quot;###&quot;</span>],
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Via OpenAI API</span>
response = client.completions.create(
model=<span class="hljs-string">&quot;smollm2-1.7b-instruct&quot;</span>,
prompt=<span class="hljs-string">&quot;Generate a short paragraph&quot;</span>,
max_tokens=<span class="hljs-number">100</span>,
stop=[<span class="hljs-string">&quot;\n\n&quot;</span>, <span class="hljs-string">&quot;###&quot;</span>],
)
<span class="hljs-comment"># Via direct library</span>
output = llm(<span class="hljs-string">&quot;Generate a short paragraph&quot;</span>, max_tokens=<span class="hljs-number">100</span>, stop=[<span class="hljs-string">&quot;\n\n&quot;</span>, <span class="hljs-string">&quot;###&quot;</span>])<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->params = SamplingParams(
max_tokens=<span class="hljs-number">100</span>,
min_tokens=<span class="hljs-number">10</span>,
stop=[<span class="hljs-string">&quot;###&quot;</span>, <span class="hljs-string">&quot;\n\n&quot;</span>],
ignore_eos=<span class="hljs-literal">False</span>,
skip_special_tokens=<span class="hljs-literal">True</span>,
)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h2 class="relative group"><a id="memory-management" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#memory-management"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Memory Management</span></h2> <p data-svelte-h="svelte-b9x09v">Both frameworks implement advanced memory management techniques for efficient inference.</p> <div class="flex space-x-2 items-center my-1.5 mr-8 h-7 !pl-0 -mx-3 md:mx-0"></div> <div class="language-select">&lt;hfoption value=&quot;tgi&quot; label=&quot;TGI&quot;&gt;
<p data-svelte-h="svelte-1uwkewc">TGI uses Flash Attention 2 and continuous batching:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Docker deployment with memory optimization</span>
docker run --gpus all -p 8080:80 \
--shm-size 1g \
ghcr.io/huggingface/text-generation-inference:latest \
--model-id HuggingFaceTB/SmolLM2-1.7B-Instruct \
--max-batch-total-tokens 8192 \
--max-input-length 4096<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;llama.cpp&quot; label=&quot;llama.cpp&quot;&gt;
<p data-svelte-h="svelte-f66vqu">llama.cpp uses quantization and optimized memory layout:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-comment"># Server with memory optimizations</span>
./server \
-m smollm2-1.7b-instruct.Q4_K_M.gguf \
--host 0.0.0.0 \
--port 8080 \
-c 2048 \ <span class="hljs-comment"># Context size</span>
--threads 4 \ <span class="hljs-comment"># CPU threads</span>
--n-gpu-layers 32 \ <span class="hljs-comment"># Use more GPU layers for larger models</span>
--mlock \ <span class="hljs-comment"># Lock memory to prevent swapping</span>
--cont-batching <span class="hljs-comment"># Enable continuous batching</span><!-- HTML_TAG_END --></pre></div> <p data-svelte-h="svelte-1m87csn">For models too large for your GPU, you can use CPU offloading:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START -->./server \
-m smollm2-1.7b-instruct.Q4_K_M.gguf \
--n-gpu-layers 20 \ <span class="hljs-comment"># Keep first 20 layers on GPU</span>
--threads 8 <span class="hljs-comment"># Use more CPU threads for CPU layers</span><!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;
&lt;hfoption value=&quot;vllm&quot; label=&quot;vLLM&quot;&gt;
<p data-svelte-h="svelte-6lsf9z">vLLM uses PagedAttention for optimal memory management:</p> <div class="code-block relative "><div class="absolute top-2.5 right-4"><button class="inline-flex items-center relative text-sm focus:text-green-500 cursor-pointer focus:outline-none transition duration-200 ease-in-out opacity-0 mx-0.5 text-gray-600 " title="code excerpt" type="button"><svg class="" xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"><path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)"></path><path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)"></path><rect fill="none" width="32" height="32"></rect></svg> <div class="absolute pointer-events-none transition-opacity bg-black text-white py-1 px-2 leading-tight rounded font-normal shadow left-1/2 top-full transform -translate-x-1/2 translate-y-2 opacity-0"><div class="absolute bottom-full left-1/2 transform -translate-x-1/2 w-0 h-0 border-black border-4 border-t-0" style="border-left-color: transparent; border-right-color: transparent; "></div> Copied</div></button></div> <pre class=""><!-- HTML_TAG_START --><span class="hljs-keyword">from</span> vllm.engine.arg_utils <span class="hljs-keyword">import</span> AsyncEngineArgs
engine_args = AsyncEngineArgs(
model=<span class="hljs-string">&quot;HuggingFaceTB/SmolLM2-1.7B-Instruct&quot;</span>,
gpu_memory_utilization=<span class="hljs-number">0.85</span>,
max_num_batched_tokens=<span class="hljs-number">8192</span>,
block_size=<span class="hljs-number">16</span>,
)
llm = LLM(engine_args=engine_args)<!-- HTML_TAG_END --></pre></div>
&lt;/hfoption&gt;</div> <h2 class="relative group"><a id="resources" class="header-link block pr-1.5 text-lg no-hover:hidden with-hover:absolute with-hover:p-1.5 with-hover:opacity-0 with-hover:group-hover:opacity-100 with-hover:right-full" href="#resources"><span><svg class="" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 256"><path d="M167.594 88.393a8.001 8.001 0 0 1 0 11.314l-67.882 67.882a8 8 0 1 1-11.314-11.315l67.882-67.881a8.003 8.003 0 0 1 11.314 0zm-28.287 84.86l-28.284 28.284a40 40 0 0 1-56.567-56.567l28.284-28.284a8 8 0 0 0-11.315-11.315l-28.284 28.284a56 56 0 0 0 79.196 79.197l28.285-28.285a8 8 0 1 0-11.315-11.314zM212.852 43.14a56.002 56.002 0 0 0-79.196 0l-28.284 28.284a8 8 0 1 0 11.314 11.314l28.284-28.284a40 40 0 0 1 56.568 56.567l-28.285 28.285a8 8 0 0 0 11.315 11.314l28.284-28.284a56.065 56.065 0 0 0 0-79.196z" fill="currentColor"></path></svg></span></a> <span>Resources</span></h2> <ul data-svelte-h="svelte-15h1dzu"><li><a href="https://huggingface.co/docs/text-generation-inference" rel="nofollow">Text Generation Inference Documentation</a></li> <li><a href="https://github.com/huggingface/text-generation-inference" rel="nofollow">TGI GitHub Repository</a></li> <li><a href="https://vllm.readthedocs.io/" rel="nofollow">vLLM Documentation</a></li> <li><a href="https://github.com/vllm-project/vllm" rel="nofollow">vLLM GitHub Repository</a></li> <li><a href="https://arxiv.org/abs/2309.06180" rel="nofollow">PagedAttention Paper</a></li> <li><a href="https://github.com/ggerganov/llama.cpp" rel="nofollow">llama.cpp GitHub Repository</a></li> <li><a href="https://github.com/abetlen/llama-cpp-python" rel="nofollow">llama-cpp-python Repository</a></li></ul> <a class="!text-gray-400 !no-underline text-sm flex items-center not-prose mt-4" href="https://github.com/huggingface/course/blob/main/chapters/en/chapter2/8.mdx" target="_blank"><span data-svelte-h="svelte-1kd6by1">&lt;</span> <span data-svelte-h="svelte-x0xyl0">&gt;</span> <span data-svelte-h="svelte-1dajgef"><span class="underline ml-1.5">Update</span> on GitHub</span></a> <p></p>
<script>
{
__sveltekit_1y0degu = {
assets: "/docs/course/pr_1069/en",
base: "/docs/course/pr_1069/en",
env: {}
};
const element = document.currentScript.parentElement;
const data = [null,null];
Promise.all([
import("/docs/course/pr_1069/en/_app/immutable/entry/start.c5306bb2.js"),
import("/docs/course/pr_1069/en/_app/immutable/entry/app.4264f5f8.js")
]).then(([kit, app]) => {
kit.start(app, element, {
node_ids: [0, 43],
data,
form: null,
error: null
});
});
}
</script>

Xet Storage Details

Size:
109 kB
·
Xet hash:
91592a3d5159f4dae1209acef75b670e34c070613ba3e84f4494284f01eba852

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.