| <!DOCTYPE html> |
| <html lang="en" class="scroll-smooth"> |
| <head> |
| <meta charset="UTF-8"> |
| <meta name="viewport" content="width=device-width, initial-scale=1.0"> |
| <title>llama.cpp - Complete Guide, Tips & Forks</title> |
| <script src="https://cdn.tailwindcss.com"></script> |
| <script src="https://unpkg.com/lucide@latest"></script> |
| <link rel="preconnect" href="https://fonts.googleapis.com"> |
| <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> |
| <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=JetBrains+Mono:wght@400;500;600&display=swap" rel="stylesheet"> |
| <script> |
| tailwind.config = { |
| theme: { |
| extend: { |
| fontFamily: { |
| sans: ['Inter', 'sans-serif'], |
| mono: ['JetBrains Mono', 'monospace'], |
| }, |
| colors: { |
| primary: '#10b981', |
| secondary: '#3b82f6', |
| accent: '#f59e0b', |
| dark: '#0f172a', |
| darker: '#020617', |
| code: '#1e293b', |
| } |
| } |
| } |
| } |
| </script> |
| <style> |
| body { |
| background-color: #020617; |
| color: #e2e8f0; |
| } |
| .glass { |
| background: rgba(30, 41, 59, 0.7); |
| backdrop-filter: blur(12px); |
| border: 1px solid rgba(255, 255, 255, 0.1); |
| } |
| .glass-hover:hover { |
| background: rgba(51, 65, 85, 0.8); |
| border-color: rgba(16, 185, 129, 0.5); |
| transform: translateY(-4px); |
| box-shadow: 0 20px 40px -15px rgba(16, 185, 129, 0.3); |
| } |
| .code-block { |
| background: #0d1117; |
| border: 1px solid #30363d; |
| border-radius: 0.5rem; |
| overflow-x: auto; |
| } |
| .copy-btn { |
| position: absolute; |
| top: 0.5rem; |
| right: 0.5rem; |
| opacity: 0; |
| transition: opacity 0.2s; |
| } |
| .code-block:hover .copy-btn { |
| opacity: 1; |
| } |
| .pre-wrap { |
| white-space: pre-wrap; |
| word-wrap: break-word; |
| } |
| .hero-gradient { |
| background: linear-gradient(135deg, rgba(16, 185, 129, 0.1) 0%, rgba(59, 130, 246, 0.1) 100%); |
| } |
| </style> |
| </head> |
| <body class="antialiased"> |
|
|
| |
| <nav class="fixed w-full z-50 glass border-b border-slate-800"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="flex justify-between items-center h-16"> |
| <div class="flex items-center space-x-2"> |
| <i data-lucide="brain-circuit" class="w-8 h-8 text-primary"></i> |
| <span class="text-xl font-bold text-white">llama.cpp</span> |
| </div> |
| <div class="hidden md:flex space-x-8"> |
| <a href="#overview" class="text-slate-300 hover:text-primary transition">Overview</a> |
| <a href="#installation" class="text-slate-300 hover:text-primary transition">Installation</a> |
| <a href="#usage" class="text-slate-300 hover:text-primary transition">Usage</a> |
| <a href="#forks" class="text-slate-300 hover:text-primary transition">Forks</a> |
| <a href="#optimization" class="text-slate-300 hover:text-primary transition">Tips</a> |
| </div> |
| <div class="md:hidden"> |
| <button id="mobile-menu-btn" class="text-slate-300 hover:text-white"> |
| <i data-lucide="menu" class="w-6 h-6"></i> |
| </button> |
| </div> |
| </div> |
| </div> |
| <div id="mobile-menu" class="hidden md:hidden glass border-t border-slate-800"> |
| <div class="px-2 pt-2 pb-3 space-y-1"> |
| <a href="#overview" class="block px-3 py-2 text-slate-300 hover:text-primary">Overview</a> |
| <a href="#installation" class="block px-3 py-2 text-slate-300 hover:text-primary">Installation</a> |
| <a href="#usage" class="block px-3 py-2 text-slate-300 hover:text-primary">Usage</a> |
| <a href="#forks" class="block px-3 py-2 text-slate-300 hover:text-primary">Forks</a> |
| <a href="#optimization" class="block px-3 py-2 text-slate-300 hover:text-primary">Tips</a> |
| </div> |
| </div> |
| </nav> |
|
|
| |
| <section class="relative pt-32 pb-20 lg:pt-48 lg:pb-32 hero-gradient overflow-hidden"> |
| <div class="absolute inset-0 bg-[radial-gradient(ellipse_at_center,_var(--tw-gradient-stops))] from-primary/20 via-darker to-darker"></div> |
| <div class="relative max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 text-center"> |
| <h1 class="text-5xl md:text-7xl font-extrabold mb-6 bg-gradient-to-r from-primary to-secondary bg-clip-text text-transparent"> |
| llama.cpp |
| </h1> |
| <p class="text-2xl md:text-3xl text-slate-300 mb-8 font-light"> |
| Your Ultimate Guide to Running LLMs Locally |
| </p> |
| <p class="text-lg text-slate-400 max-w-3xl mx-auto mb-10"> |
| Complete documentation, installation guides, optimization tips, and forks comparison for the most efficient C++ implementation of LLaMA and other large language models. |
| </p> |
| <div class="flex flex-col sm:flex-row gap-4 justify-center"> |
| <a href="#installation" class="px-8 py-4 bg-primary hover:bg-primary/90 text-white font-semibold rounded-lg transition transform hover:scale-105 flex items-center justify-center gap-2"> |
| <i data-lucide="download" class="w-5 h-5"></i> |
| Quick Start |
| </a> |
| <a href="https://github.com/ggerganov/llama.cpp" target="_blank" class="px-8 py-4 glass hover:bg-slate-800 text-white font-semibold rounded-lg transition flex items-center justify-center gap-2"> |
| <i data-lucide="github" class="w-5 h-5"></i> |
| View on GitHub |
| </a> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section id="overview" class="py-20 bg-slate-900/50"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-16"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">What is llama.cpp?</h2> |
| <p class="text-slate-400 text-lg max-w-2xl mx-auto"> |
| A high-performance C++ port of Facebook's LLaMA model, enabling efficient inference on consumer hardware. |
| </p> |
| </div> |
|
|
| <div class="grid md:grid-cols-3 gap-8"> |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="w-12 h-12 bg-primary/20 rounded-lg flex items-center justify-center mb-4"> |
| <i data-lucide="cpu" class="w-6 h-6 text-primary"></i> |
| </div> |
| <h3 class="text-xl font-semibold mb-2 text-white">Zero Dependencies</h3> |
| <p class="text-slate-400">Pure C++ implementation with no dependencies. Just compile and run on CPU or GPU.</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="w-12 h-12 bg-secondary/20 rounded-lg flex items-center justify-center mb-4"> |
| <i data-lucide="zap" class="w-6 h-6 text-secondary"></i> |
| </div> |
| <h3 class="text-xl font-semibold mb-2 text-white">Optimized Inference</h3> |
| <p class="text-slate-400">Highly optimized for 4-bit and 5-bit quantized models. Supports GPU acceleration via CUDA, Metal, and Vulkan.</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="w-12 h-12 bg-accent/20 rounded-lg flex items-center justify-center mb-4"> |
| <i data-lucide="layers" class="w-6 h-6 text-accent"></i> |
| </div> |
| <h3 class="text-xl font-semibold mb-2 text-white">Multiple Models</h3> |
| <p class="text-slate-400">Supports LLaMA, LLaMA 2, Falcon, Wizard, Vicuna, and many more GGUF models.</p> |
| </div> |
| </div> |
|
|
| |
| <div class="mt-16 grid grid-cols-2 md:grid-cols-4 gap-8"> |
| <div class="text-center p-6 glass rounded-xl"> |
| <div class="text-3xl font-bold text-primary mb-1">60k+</div> |
| <div class="text-sm text-slate-400">GitHub Stars</div> |
| </div> |
| <div class="text-center p-6 glass rounded-xl"> |
| <div class="text-3xl font-bold text-secondary mb-1">Q4_K_M</div> |
| <div class="text-sm text-slate-400">Recommended Quant</div> |
| </div> |
| <div class="text-center p-6 glass rounded-xl"> |
| <div class="text-3xl font-bold text-accent mb-1">CUDA</div> |
| <div class="text-sm text-slate-400">GPU Support</div> |
| </div> |
| <div class="text-center p-6 glass rounded-xl"> |
| <div class="text-3xl font-bold text-purple-400 mb-1">8GB+</div> |
| <div class="text-sm text-slate-400">RAM Required</div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section id="installation" class="py-20"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Installation Guide</h2> |
| <p class="text-slate-400">Multiple ways to get llama.cpp running on your machine.</p> |
| </div> |
|
|
| <div class="space-y-8"> |
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="package" class="w-5 h-5 text-primary"></i> |
| Option 1: Pre-built Releases (Windows) |
| </h3> |
| <div class="relative code-block p-4 mb-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <code class="text-slate-300 font-mono text-sm"> |
| # Download from:<br> |
| https://github.com/ggerganov/llama.cpp/releases<br><br> |
| # Look for: llama-b[BUILD]-bin-win-[ARCH]-[BUILD_TYPE].zip<br> |
| # llama-b[BUILD]-macOS-[ARCH].zip<br> |
| # llama-b[BUILD]-bin-ubuntu-[ARCH].[EXT] |
| </code> |
| </div> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="terminal" class="w-5 h-5 text-secondary"></i> |
| Option 2: Build from Source |
| </h3> |
| |
| <div class="space-y-4"> |
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">Mac/Linux</h4> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap">git clone https://github.com/ggerganov/llama.cpp.git |
| cd llama.cpp |
| make</pre> |
| </div> |
| </div> |
|
|
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">Windows (CMake)</h4> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap">git clone https://github.com/ggerganov/llama.cpp.git |
| cd llama.cpp |
| cmake . |
| cmake --build . --config Release</pre> |
| </div> |
| </div> |
|
|
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">With CUDA Support (GPU)</h4> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap">make LLAMA_CUDA=1 |
| # or for Windows |
| cmake -DLLAMA_CUDA=ON . |
| cmake --build . --config Release</pre> |
| </div> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="container" class="w-5 h-5 text-accent"></i> |
| Option 3: Docker |
| </h3> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap">docker pull ghcr.io/ggerganov/llama.cpp:latest |
| docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:latest --api -m /models/your-model.gguf</pre> |
| </div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section id="usage" class="py-20 bg-slate-900/50"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Usage & Commands</h2> |
| <p class="text-slate-400">Master the CLI and server modes.</p> |
| </div> |
|
|
| <div class="grid lg:grid-cols-2 gap-8"> |
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white">Basic Commands</h3> |
| <div class="space-y-4"> |
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">Simple Inference</h4> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./main -m models/model.gguf -p "Your prompt here"</code> |
| </div> |
| </div> |
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">Interactive Chat</h4> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./main -m models/model.gguf --interactive-server</code> |
| </div> |
| </div> |
| <div> |
| <h4 class="text-sm font-semibold text-slate-300 mb-2">Server Mode (API)</h4> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./server -m models/model.gguf -c 2048 --port 8080</code> |
| </div> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white">Important Flags</h3> |
| <div class="space-y-3"> |
| <div class="flex items-start gap-3"> |
| <code class="text-primary font-mono text-sm">-c, --ctx_size</code> |
| <span class="text-slate-400 text-sm">Context size (e.g., 2048, 4096, 8192)</span> |
| </div> |
| <div class="flex items-start gap-3"> |
| <code class="text-primary font-mono text-sm">-n, --n_predict</code> |
| <span class="text-slate-400 text-sm">Number of tokens to predict (-1 = infinity)</span> |
| </div> |
| <div class="flex items-start gap-3"> |
| <code class="text-primary font-mono text-sm">-t, --threads</code> |
| <span class="text-slate-400 text-sm">Number of CPU threads (recommend: physical cores)</span> |
| </div> |
| <div class="flex items-start gap-3"> |
| <code class="text-primary font-mono text-sm">--temp</code> |
| <span class="text-slate-400 text-sm">Temperature (0.8 is standard)</span> |
| </div> |
| <div class="flex items-start gap-3"> |
| <code class="text-primary font-mono text-sm">--gpu_layers/-ngl</code> |
| <span class="text-slate-400 text-sm">Number of layers to offload to GPU</span> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl lg:col-span-2"> |
| <h3 class="text-xl font-semibold mb-4 text-white">API Example (OpenAI-compatible)</h3> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap"># Start server |
| ./server -m models/llama-2-7b-chat.Q4_K_M.gguf -c 4096 |
|
|
| # Send request |
| curl -X POST http://localhost:8080/v1/chat/completions \ |
| -H "Content-Type: application/json" \ |
| -d '{ |
| "messages": [ |
| {"role": "system", "content": "You are a helpful assistant."}, |
| {"role": "user", "content": "Hello!"} |
| ] |
| }'</pre> |
| </div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section id="forks" class="py-20"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Popular Forks & Projects</h2> |
| <p class="text-slate-400">Specialized versions and wrappers built on llama.cpp</p> |
| </div> |
|
|
| <div class="grid md:grid-cols-2 lg:grid-cols-3 gap-6"> |
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-primary"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">Ollama</h3> |
| <span class="px-2 py-1 bg-primary/20 text-primary text-xs rounded font-mono">Top Pick</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">The easiest way to run LLaMA, Mistral, and other models locally. Provides a CLI and API for running models.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• macOS, Linux, Windows</li> |
| <li>• Model library available</li> |
| <li>• REST API included</li> |
| <li>• One-liner install</li> |
| </ul> |
| <a href="https://ollama.ai" target="_blank" class="text-primary hover:text-primary/80 text-sm flex items-center gap-1"> |
| ollama.ai <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-secondary"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">TextGen WebUI</h3> |
| <span class="px-2 py-1 bg-secondary/20 text-secondary text-xs rounded font-mono">GUI</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">A web interface for LLMs. Supports multiple backends including llama.cpp with extensive character/persona features.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• Web-based interface</li> |
| <li>• Chat/Completions modes</li> |
| <li>• LoRA support</li> |
| <li>• Extensions support</li> |
| </ul> |
| <a href="https://github.com/oobabooga/text-generation-webui" target="_blank" class="text-secondary hover:text-secondary/80 text-sm flex items-center gap-1"> |
| GitHub <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-pink-500"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">KoboldCpp</h3> |
| <span class="px-2 py-1 bg-pink-500/20 text-pink-500 text-xs rounded font-mono">Gaming</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">A user-friendly wrapper for llama.cpp optimized for story writing and text adventure gaming.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• Kobold AI compatibility</li> |
| <li>• Streamlined UI</li> |
| <li>• Adventure mode</li> |
| <li>• Easy single-binary setup</li> |
| </ul> |
| <a href="https://github.com/LostRuins/koboldcpp" target="_blank" class="text-pink-500 hover:text-pink-400 text-sm flex items-center gap-1"> |
| GitHub <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-accent"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">llamafile</h3> |
| <span class="px-2 py-1 bg-accent/20 text-accent text-xs rounded font-mono">Portable</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">Mozilla's project. LLMs packaged as single executable files that run on most computers without dependencies.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• Single-file executables</li> |
| <li>• No installation needed</li> |
| <li>• Cross-platform</li> |
| <li>• Embeddable</li> |
| </ul> |
| <a href="https://github.com/Mozilla-Ocho/llamafile" target="_blank" class="text-accent hover:text-accent/80 text-sm flex items-center gap-1"> |
| GitHub <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-purple-500"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">LocalAI</h3> |
| <span class="px-2 py-1 bg-purple-500/20 text-purple-500 text-xs rounded font-mono">API</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">Drop-in OpenAI API replacement. Self-hosted with llama.cpp backend. Supports text generation, images, and audio.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• OpenAI API compatible</li> |
| <li>• Docker ready</li> |
| <li>• Model hot-reloading</li> |
| <li>• Multiple backends</li> |
| </ul> |
| <a href="https://localai.io" target="_blank" class="text-purple-500 hover:text-purple-400 text-sm flex items-center gap-1"> |
| localai.io <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300 border-l-4 border-cyan-500"> |
| <div class="flex items-center justify-between mb-3"> |
| <h3 class="text-xl font-bold text-white">LM Studio</h3> |
| <span class="px-2 py-1 bg-cyan-500/20 text-cyan-500 text-xs rounded font-mono">App</span> |
| </div> |
| <p class="text-slate-400 mb-4 text-sm">Desktop application for running local LLMs with a beautiful interface. Easy model downloading and chatting.</p> |
| <ul class="text-xs text-slate-500 space-y-1 mb-4"> |
| <li>• Desktop GUI</li> |
| <li>• Chat interface</li> |
| <li>• HuggingFace integration</li> |
| <li>• macOS/Windows</li> |
| </ul> |
| <a href="https://lmstudio.ai" target="_blank" class="text-cyan-500 hover:text-cyan-400 text-sm flex items-center gap-1"> |
| lmstudio.ai <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section id="optimization" class="py-20 bg-slate-900/50"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Optimization Tips</h2> |
| <p class="text-slate-400">Get the best performance from your models.</p> |
| </div> |
|
|
| <div class="grid md:grid-cols-2 gap-8"> |
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="scale" class="w-5 h-5 text-primary"></i> |
| Quantization Guide (GGUF) |
| </h3> |
| <div class="space-y-3"> |
| <div class="p-3 bg-slate-800/50 rounded-lg border border-slate-700"> |
| <div class="flex justify-between mb-1"> |
| <span class="font-semibold text-primary font-mono text-sm">Q4_K_M</span> |
| <span class="text-slate-400 text-xs">Best balance</span> |
| </div> |
| <p class="text-slate-400 text-xs">Recommended for most models. ~4.7GB for 7B model. Quality slightly better than Q4_0.</p> |
| </div> |
| <div class="p-3 bg-slate-800/50 rounded-lg border border-slate-700"> |
| <div class="flex justify-between mb-1"> |
| <span class="font-semibold text-slate-300 font-mono text-sm">Q5_K_M</span> |
| <span class="text-slate-400 text-xs">High quality</span> |
| </div> |
| <p class="text-slate-400 text-xs">For quality-critical tasks. ~5.8GB for 7B model.</p> |
| </div> |
| <div class="p-3 bg-slate-800/50 rounded-lg border border-slate-700"> |
| <div class="flex justify-between mb-1"> |
| <span class="font-semibold text-slate-300 font-mono text-sm">Q8_0</span> |
| <span class="text-slate-400 text-xs">Maximum quality</span> |
| </div> |
| <p class="text-slate-400 text-xs">Almost unnoticeable loss. ~7GB for 7B model.</p> |
| </div> |
| </div> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="gauge" class="w-5 h-5 text-secondary"></i> |
| Performance Tips |
| </h3> |
| <ul class="space-y-3 text-slate-300 text-sm"> |
| <li class="flex items-start gap-2"> |
| <span class="text-primary">•</span> |
| <span><strong>GPU Offloading:</strong> Use <code class="bg-slate-800 px-1 rounded text-primary">-ngl 35</code> on macOS (Metal) or <code class="bg-slate-800 px-1 rounded text-primary">-ngl 33</code> on NVIDIA (CUDA) for 7B models</span> |
| </li> |
| <li class="flex items-start gap-2"> |
| <span class="text-primary">•</span> |
| <span><strong>Context Size:</strong> Start with 2048, increase based on your needs (uses more VRAM)</span> |
| </li> |
| <li class="flex items-start gap-2"> |
| <span class="text-primary">•</span> |
| <span><strong>Threads:</strong> Set to your physical CPU cores count with <code class="bg-slate-800 px-1 rounded text-primary">-t [cores]</code></span> |
| </li> |
| <li class="flex items-start gap-2"> |
| <span class="text-primary">•</span> |
| <span><strong>Memory Mapping:</strong> Enable with <code class="bg-slate-800 px-1 rounded text-primary">--mlock</code> to keep model in RAM</span> |
| </li> |
| <li class="flex items-start gap-2"> |
| <span class="text-primary">•</span> |
| <span><strong>Batch Size:</strong> Increase <code class="bg-slate-800 px-1 rounded text-primary">-b 512</code> for higher throughput</span> |
| </li> |
| </ul> |
| </div> |
|
|
| |
| <div class="glass p-6 rounded-xl md:col-span-2"> |
| <h3 class="text-xl font-semibold mb-4 text-white flex items-center gap-2"> |
| <i data-lucide="hard-drive" class="w-5 h-5 text-accent"></i> |
| Hardware Requirements |
| </h3> |
| <div class="grid grid-cols-2 md:grid-cols-4 gap-4"> |
| <div class="text-center p-4 bg-slate-800/50 rounded-lg"> |
| <div class="text-lg font-bold text-white mb-1">7B Model</div> |
| <div class="text-xs text-slate-400">~4-8GB RAM</div> |
| <div class="text-xs text-slate-500 mt-1">M1 Mac Minimum</div> |
| </div> |
| <div class="text-center p-4 bg-slate-800/50 rounded-lg"> |
| <div class="text-lg font-bold text-white mb-1">13B Model</div> |
| <div class="text-xs text-slate-400">~8-12GB RAM</div> |
| <div class="text-xs text-slate-500 mt-1">Requires GPU</div> |
| </div> |
| <div class="text-center p-4 bg-slate-800/50 rounded-lg"> |
| <div class="text-lg font-bold text-white mb-1">30B Model</div> |
| <div class="text-xs text-slate-400">~20GB RAM</div> |
| <div class="text-xs text-slate-500 mt-1">32GB System + GPU</div> |
| </div> |
| <div class="text-center p-4 bg-slate-800/50 rounded-lg"> |
| <div class="text-lg font-bold text-white mb-1">70B Model</div> |
| <div class="text-xs text-slate-400">~40GB+ RAM</div> |
| <div class="text-xs text-slate-500 mt-1">High-end GPU</div> |
| </div> |
| </div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section class="py-20"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="glass p-8 rounded-xl border border-slate-700"> |
| <h2 class="text-2xl font-bold mb-4 text-white">Converting Models</h2> |
| <p class="text-slate-400 mb-4">Converting Safetensors/PyTorch models to GGUF format for llama.cpp:</p> |
| <div class="relative code-block p-4"> |
| <button class="copy-btn p-2 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-4 h-4"></i> |
| </button> |
| <pre class="font-mono text-sm text-slate-300 pre-wrap"># Install dependencies |
| python -m pip install gguf protobuf |
|
|
| # Convert HuggingFace model to GGUF |
| python convert-hf-to-gguf.py /path/to/model \ |
| --outfile /path/to/output/model.gguf \ |
| --outtype q4_k_m</pre> |
| </div> |
| <p class="text-xs text-slate-500 mt-3">Available outtypes: f32, f16, bf16, q8_0, q4_0, q4_1, q4_k_s, q4_k_m, q5_k_s, q5_k_m, q6_k</p> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section class="py-20 bg-slate-900/50"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Where to Download Models</h2> |
| <p class="text-slate-400">Pre-converted GGUF models ready to use.</p> |
| </div> |
|
|
| <div class="grid md:grid-cols-3 gap-6"> |
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="flex items-center gap-3 mb-4"> |
| <div class="w-10 h-10 bg-yellow-500/20 rounded-lg flex items-center justify-center"> |
| <i data-lucide="database" class="w-5 h-5 text-yellow-500"></i> |
| </div> |
| <h3 class="text-lg font-bold text-white">TheBloke</h3> |
| </div> |
| <p class="text-slate-400 text-sm mb-4">The most popular source for quantized GGUF models. Hundreds of models including Llama 2, Mistral, CodeLlama, and more.</p> |
| <a href="https://huggingface.co/TheBloke" target="_blank" class="text-primary hover:text-primary/80 text-sm flex items-center gap-1"> |
| View on HuggingFace <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="flex items-center gap-3 mb-4"> |
| <div class="w-10 h-10 bg-blue-500/20 rounded-lg flex items-center justify-center"> |
| <i data-lucide="layers" class="w-5 h-5 text-blue-500"></i> |
| </div> |
| <h3 class="text-lg font-bold text-white">NousResearch</h3> |
| </div> |
| <p class="text-slate-400 text-sm mb-4">Research-focused models including Hermes, Synthia, and other fine-tuned versions with GGUF support.</p> |
| <a href="https://huggingface.co/NousResearch" target="_blank" class="text-blue-500 hover:text-blue-400 text-sm flex items-center gap-1"> |
| View on HuggingFace <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl glass-hover transition duration-300"> |
| <div class="flex items-center gap-3 mb-4"> |
| <div class="w-10 h-10 bg-purple-500/20 rounded-lg flex items-center justify-center"> |
| <i data-lucide="cloud" class="w-5 h-5 text-purple-500"></i> |
| </div> |
| <h3 class="text-lg font-bold text-white">LWDW (RunPod)</h3> |
| </div> |
| <p class="text-slate-400 text-sm mb-4">Specialized in large model variants (70B+) and unique quantizations. Great for GPU cloud inference.</p> |
| <a href="https://huggingface.co/LWDW" target="_blank" class="text-purple-500 hover:text-purple-400 text-sm flex items-center gap-1"> |
| View on HuggingFace <i data-lucide="external-link" class="w-3 h-3"></i> |
| </a> |
| </div> |
| </div> |
|
|
| <div class="mt-8 glass p-6 rounded-xl"> |
| <h3 class="text-lg font-semibold mb-3 text-white">Popular Models to Try</h3> |
| <div class="grid md:grid-cols-2 lg:grid-cols-4 gap-4 text-sm"> |
| <div class="p-3 bg-slate-800/50 rounded border border-slate-700"> |
| <code class="text-primary font-mono">mistral-7b-instruct</code> |
| <p class="text-slate-500 text-xs mt-1">Fast, great quality</p> |
| </div> |
| <div class="p-3 bg-slate-800/50 rounded border border-slate-700"> |
| <code class="text-primary font-mono">llama-2-7b/13b-chat</code> |
| <p class="text-slate-500 text-xs mt-1">All-purpose, balanced</p> |
| </div> |
| <div class="p-3 bg-slate-800/50 rounded border border-slate-700"> |
| <code class="text-primary font-mono">codellama-7b/13b</code> |
| <p class="text-slate-500 text-xs mt-1">Code generation</p> |
| </div> |
| <div class="p-3 bg-slate-800/50 rounded border border-slate-700"> |
| <code class="text-primary font-mono">neural-chat-7b</code> |
| <p class="text-slate-500 text-xs mt-1">Conversations</p> |
| </div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section class="py-20"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Advanced Features</h2> |
| <p class="text-slate-400">Unlock the full potential of llama.cpp.</p> |
| </div> |
|
|
| <div class="space-y-6"> |
| <div class="grid md:grid-cols-2 gap-6"> |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-3 text-white flex items-center gap-2"> |
| <i data-lucide="zap" class="w-5 h-5 text-yellow-500"></i> |
| Speculative Decoding |
| </h3> |
| <p class="text-slate-400 text-sm mb-3">Use a smaller draft model to speed up token generation. Can achieve 2-3x speedup on supported hardware.</p> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./main -m large_model.gguf --draft small_model.gguf -ngl 35 --draft 10</code> |
| </div> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-3 text-white flex items-center gap-2"> |
| <i data-lucide="terminal" class="w-5 h-5 text-green-500"></i> |
| Grammar-Based Sampling |
| </h3> |
| <p class="text-slate-400 text-sm mb-3">Force JSON output or specific formats using GBNF grammar files. Perfect for structured output.</p> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./main -m model.gguf --grammar-file json.gbnf -p "Generate JSON:"</code> |
| </div> |
| </div> |
| </div> |
|
|
| <div class="grid md:grid-cols-2 gap-6"> |
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-3 text-white flex items-center gap-2"> |
| <i data-lucide="cpu" class="w-5 h-5 text-blue-500"></i> |
| Continuous Batching |
| </h3> |
| <p class="text-slate-400 text-sm">Process multiple prompts simultaneously in server mode for higher throughput in production environments.</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl"> |
| <h3 class="text-xl font-semibold mb-3 text-white flex items-center gap-2"> |
| <i data-lucide="layers" class="w-5 h-5 text-purple-500"></i> |
| LoRA Support |
| </h3> |
| <p class="text-slate-400 text-sm mb-3">Load LoRA adapters on top of base models without merging. Hot-swap adapters at runtime.</p> |
| <div class="relative code-block p-3"> |
| <button class="copy-btn p-1.5 bg-slate-700 rounded hover:bg-slate-600 text-white" onclick="copyCode(this)"> |
| <i data-lucide="copy" class="w-3 h-3"></i> |
| </button> |
| <code class="text-xs font-mono text-slate-300">./main -m base.gguf --lora adapter.bin --lora-scale 0.8</code> |
| </div> |
| </div> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section class="py-20 bg-slate-900/50"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Troubleshooting</h2> |
| <p class="text-slate-400">Common issues and solutions.</p> |
| </div> |
|
|
| <div class="space-y-4"> |
| <div class="glass p-6 rounded-xl border-l-4 border-red-500"> |
| <h3 class="text-lg font-semibold text-white mb-2">CUDA Out of Memory</h3> |
| <p class="text-slate-400 text-sm mb-2">Error: <code class="bg-red-900/30 text-red-300 px-1 rounded">CUDA out of memory</code></p> |
| <p class="text-slate-400 text-sm">Reduce GPU layers or use a smaller model. Try <code class="text-primary">-ngl 20</code> instead of <code class="text-primary">-ngl 35</code>, or use a Q4_K_M quantized model instead of Q5.</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl border-l-4 border-yellow-500"> |
| <h3 class="text-lg font-semibold text-white mb-2">Slow Token Generation</h3> |
| <p class="text-slate-400 text-sm mb-2">Model is running on CPU instead of GPU.</p> |
| <p class="text-slate-400 text-sm">Ensure you built with CUDA/Metal support. Check <code class="text-primary">nvidia-smi</code> or Activity Monitor to verify GPU usage. Increase <code class="text-primary">-ngl</code> to offload more layers.</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl border-l-4 border-blue-500"> |
| <h3 class="text-lg font-semibold text-white mb-2">GGUF Format Errors</h3> |
| <p class="text-slate-400 text-sm mb-2">Error: <code class="bg-blue-900/30 text-blue-300 px-1 rounded">invalid magic</code></p> |
| <p class="text-slate-400 text-sm">Your llama.cpp version is too old for this GGUF file. Pull latest changes and rebuild, or download an older GGUF version (v1 or v2).</p> |
| </div> |
|
|
| <div class="glass p-6 rounded-xl border-l-4 border-green-500"> |
| <h3 class="text-lg font-semibold text-white mb-2">Model Output is Gibberish</h3> |
| <p class="text-slate-400 text-sm mb-2">Random characters or nonsensical output.</p> |
| <p class="text-slate-400 text-sm">Usually indicates wrong tokenizer or incompatible model. Ensure you're using the correct prompt template for the model (e.g., ChatML for Mistral).</p> |
| </div> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <section class="py-20"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="text-center mb-12"> |
| <h2 class="text-3xl md:text-4xl font-bold mb-4 text-white">Community & Resources</h2> |
| <p class="text-slate-400">Get help and stay updated.</p> |
| </div> |
|
|
| <div class="grid md:grid-cols-4 gap-6"> |
| <a href="https://github.com/ggerganov/llama.cpp/discussions" target="_blank" class="glass p-6 rounded-xl glass-hover transition duration-300 text-center"> |
| <i data-lucide="message-square" class="w-8 h-8 text-primary mx-auto mb-3"></i> |
| <h3 class="text-lg font-semibold text-white mb-1">GitHub Discussions</h3> |
| <p class="text-slate-400 text-sm">Community support</p> |
| </a> |
|
|
| <a href="https://discord.gg/llama-cpp" target="_blank" class="glass p-6 rounded-xl glass-hover transition duration-300 text-center"> |
| <i data-lucide="message-circle" class="w-8 h-8 text-indigo-500 mx-auto mb-3"></i> |
| <h3 class="text-lg font-semibold text-white mb-1">Discord</h3> |
| <p class="text-slate-400 text-sm">Real-time chat</p> |
| </a> |
|
|
| <a href="https://www.reddit.com/r/LocalLLaMA/" target="_blank" class="glass p-6 rounded-xl glass-hover transition duration-300 text-center"> |
| <i data-lucide="users" class="w-8 h-8 text-orange-500 mx-auto mb-3"></i> |
| <h3 class="text-lg font-semibold text-white mb-1">r/LocalLLaMA</h3> |
| <p class="text-slate-400 text-sm">Reddit community</p> |
| </a> |
|
|
| <a href="https://github.com/ggerganov/llama.cpp/blob/master/README.md" target="_blank" class="glass p-6 rounded-xl glass-hover transition duration-300 text-center"> |
| <i data-lucide="book" class="w-8 h-8 text-blue-500 mx-auto mb-3"></i> |
| <h3 class="text-lg font-semibold text-white mb-1">Documentation</h3> |
| <p class="text-slate-400 text-sm">Official wiki</p> |
| </a> |
| </div> |
| </div> |
| </section> |
|
|
| |
| <footer class="bg-slate-900 border-t border-slate-800 py-12"> |
| <div class="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8"> |
| <div class="flex flex-col md:flex-row justify-between items-center"> |
| <div class="mb-4 md:mb-0"> |
| <div class="flex items-center space-x-2 mb-2"> |
| <i data-lucide="brain-circuit" class="w-6 h-6 text-primary"></i> |
| <span class="text-lg font-bold text-white">llama.cpp</span> |
| </div> |
| <p class="text-slate-400 text-sm">The ultimate guide to running LLMs locally</p> |
| </div> |
| <div class="flex space-x-6"> |
| <a href="https://github.com/ggerganov/llama.cpp" target="_blank" class="text-slate-400 hover:text-white transition"> |
| <i data-lucide="github" class="w-6 h-6"></i> |
| </a> |
| <a href="https://huggingface.co/TheBloke" target="_blank" class="text-slate-400 hover:text-white transition"> |
| <i data-lucide="database" class="w-6 h-6"></i> |
| </a> |
| <a href="#" class="text-slate-400 hover:text-white transition"> |
| <i data-lucide="twitter" class="w-6 h-6"></i> |
| </a> |
| </div> |
| </div> |
| <div class="mt-8 pt-8 border-t border-slate-800 text-center text-slate-500 text-sm"> |
| <p>Not affiliated with Meta or Facebook. llama.cpp is created by Georgi Gerganov and community.</p> |
| </div> |
| </div> |
| </footer> |
|
|
| <script> |
| |
| lucide.createIcons(); |
| |
| |
| const mobileMenuBtn = document.getElementById('mobile-menu-btn'); |
| const mobileMenu = document.getElementById('mobile-menu'); |
| |
| mobileMenuBtn.addEventListener('click', () => { |
| mobileMenu.classList.toggle('hidden'); |
| }); |
| |
| |
| mobileMenu.querySelectorAll('a').forEach(link => { |
| link.addEventListener('click', () => { |
| mobileMenu.classList.add('hidden'); |
| }); |
| }); |
| |
| |
| function copyCode(btn) { |
| const codeBlock = btn.parentElement.querySelector('code, pre'); |
| const text = codeBlock.textContent; |
| |
| navigator.clipboard.writeText(text).then(() => { |
| const originalIcon = btn.innerHTML; |
| btn.innerHTML = '<i data-lucide="check" class="w-4 h-4"></i>'; |
| lucide.createIcons(); |
| |
| setTimeout(() => { |
| btn.innerHTML = originalIcon; |
| lucide.createIcons(); |
| }, 2000); |
| }); |
| } |
| |
| |
| document.querySelectorAll('a[href^="#"]').forEach(anchor => { |
| anchor.addEventListener('click', function (e) { |
| e.preventDefault(); |
| const target = document.querySelector(this.getAttribute('href')); |
| if (target) { |
| target.scrollIntoView({ behavior: 'smooth', block: 'start' }); |
| } |
| }); |
| }); |
| </script> |
| <script src="https://deepsite.hf.co/deepsite-badge.js"></script> |
| </body> |
| </html> |