{ "Debug": { "Guaranteed-Working": { "TinyLlama-1.1B-Chat-v1.0": { "id": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "description": "🧪 Reliable small chat model ideal for debugging, testing, and low-resource environments.", "link": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0", "emoji": "🧪" }, "Phi-3-Mini-128k-Instruct": { "id": "microsoft/Phi-3-mini-128k-instruct", "description": "🧪 Reliable compact instruction model with broad availability and excellent stability for debugging.", "link": "https://huggingface.co/microsoft/Phi-3-mini-128k-instruct", "emoji": "📎" } } }, "Instruct": { "Llama-Instruct": { "Llama-3.1-8B-Instruct": { "id": "meta-llama/Llama-3.1-8B-Instruct", "description": "General-purpose instruction-tuned Llama 3.1 model for chat, reasoning, and everyday assistance.", "link": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct", "emoji": "🦙" }, "Llama-3.1-70B-Instruct": { "id": "meta-llama/Llama-3.1-70B-Instruct", "description": "🔒 Gated model — requires accepting the Meta license. High-quality instruction model with strong reasoning and chat performance.", "link": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct", "emoji": "🦙" }, "Llama-3.1-405B-Instruct": { "id": "meta-llama/Llama-3.1-405B-Instruct", "description": "⚠️ Extremely large model — requires paid HF Inference API or strong hardware. 🔒 Gated model — requires accepting the Meta license. Ultra-high-performance instruction model with exceptional reasoning.", "link": "https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct", "emoji": "🦙" }, "Llama-3-8B-Instruct": { "id": "meta-llama/Llama-3-8B-Instruct", "description": "🔒 Gated model — requires accepting the Meta license. Instruction-tuned Llama 3 model suitable for general chat and reasoning.", "link": "https://huggingface.co/meta-llama/Llama-3-8B-Instruct", "emoji": "🦙" }, "Llama-3-70B-Instruct": { "id": "meta-llama/Llama-3-70B-Instruct", "description": "⚠️ Large model — may require paid HF Inference API. 🔒 Gated model — requires accepting the Meta license. High-quality Llama 3 instruction model.", "link": "https://huggingface.co/meta-llama/Llama-3-70B-Instruct", "emoji": "🦙" }, "Llama-Guard-3-8B": { "id": "meta-llama/Llama-Guard-3-8B", "description": "🧪 Safety moderation model — useful for filtering or classifying chat content.", "link": "https://huggingface.co/meta-llama/Llama-Guard-3-8B", "emoji": "🛡️" } }, "Mistral-Instruct": { "Mistral-7B-Instruct-v0.3": { "id": "mistralai/Mistral-7B-Instruct-v0.3", "description": "Compact instruction-tuned Mistral model suitable for general chat and reasoning.", "link": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3", "emoji": "🌬️" }, "Mistral-7B-Instruct-v0.2": { "id": "mistralai/Mistral-7B-Instruct-v0.2", "description": "🧪 Earlier version of the Mistral instruction model — stable and widely supported.", "link": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2", "emoji": "🌬️" } }, "Mixtral-Instruct": { "Mixtral-8x7B-Instruct-v0.1": { "id": "mistralai/Mixtral-8x7B-Instruct-v0.1", "description": "High-quality mixture-of-experts instruction model offering strong reasoning and chat performance.", "link": "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1", "emoji": "🧩" }, "Mixtral-8x22B-Instruct": { "id": "mistralai/Mixtral-8x22B-Instruct", "description": "⚠️ Extremely large model — requires paid HF Inference API or strong hardware. Advanced mixture-of-experts instruction model with exceptional performance.", "link": "https://huggingface.co/mistralai/Mixtral-8x22B-Instruct", "emoji": "🧩" } } }, "Coding": { "Qwen-Coder": { "Qwen2.5-Coder-0.5B-Instruct": { "id": "Qwen/Qwen2.5-Coder-0.5B-Instruct", "description": "Sehr leichtes Coding‑Modell für schnelle Inferenz und einfache Aufgaben.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-0.5B-Instruct", "emoji": "💻" }, "Qwen2.5-Coder-1.5B-Instruct": { "id": "Qwen/Qwen2.5-Coder-1.5B-Instruct", "description": "Kleines instruction‑tuned Coding‑Modell mit besserer Logik als die 0.5B‑Variante.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-1.5B-Instruct", "emoji": "💻" }, "Qwen2.5-Coder-3B-Instruct": { "id": "Qwen/Qwen2.5-Coder-3B-Instruct", "description": "Mittelgroßes Coding‑Modell mit guter Performance bei Codegenerierung und Debugging.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-3B-Instruct", "emoji": "💻" }, "Qwen2.5-Coder-7B-Instruct": { "id": "Qwen/Qwen2.5-Coder-7B-Instruct", "description": "Beliebter Allround‑Coder mit starker Fähigkeit für Codegenerierung, -bearbeitung und -erklärung.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct", "emoji": "💻" }, "Qwen2.5-Coder-14B-Instruct": { "id": "Qwen/Qwen2.5-Coder-14B-Instruct", "description": "Großes Modell mit verbesserter Logik und langen Kontexten — geeignet für umfangreiche Projekte.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-14B-Instruct", "emoji": "💻" }, "Qwen2.5-Coder-32B-Instruct": { "id": "Qwen/Qwen2.5-Coder-32B-Instruct", "description": "Leistungsstarker Coding‑Assistent mit sehr hoher Genauigkeit — große Modelle benötigen viel VRAM.", "link": "https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct", "emoji": "💻" } }, "CodeLlama": { "CodeLlama-7B-Instruct": { "id": "codellama/CodeLlama-7b-Instruct-hf", "description": "Leichtes, instruction‑tuned Modell — beliebt für Coding‑Chat und kleinere Projekte.", "link": "https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf", "emoji": "🧑‍💻" }, "CodeLlama-13B-Instruct": { "id": "codellama/CodeLlama-13b-Instruct-hf", "description": "Mittelklasse mit guter Codegenerierung und Reasoning — vielseitig einsetzbar.", "link": "https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf", "emoji": "🧑‍💻" }, "CodeLlama-34B-Instruct": { "id": "codellama/CodeLlama-34b-Instruct-hf", "description": "Leistungsstarkes Modell für größere sowie komplexere Projekte und umfangreiche Codebasen.", "link": "https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf", "emoji": "🧑‍💻" }, "CodeLlama-70B-Instruct": { "id": "meta-llama/CodeLlama-70B-Instruct-hf", "description": "Sehr großes Modell — maximale Kapazität für komplexe Tasks, aber hoher Ressourcenbedarf.", "link": "https://huggingface.co/meta-llama/CodeLlama-70B-Instruct-hf", "emoji": "🧑‍💻" } }, "Llama3.1-Code": { "Llama-3.1-8B-Instruct-Code": { "id": "meta-llama/Llama-3.1-8B-Instruct", "description": "Kompaktes Modell für vielseitige Aufgaben — unterstützt auch Code, gute Balance aus Größe und Leistung.", "link": "https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct", "emoji": "🦙" }, "Llama-3.1-70B-Instruct-Code": { "id": "meta-llama/Llama-3.1-70B-Instruct", "description": "High‑End Modell für professionelle und komplexe Coding‑Workflows, inklusive Multilanguage.", "link": "https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct", "emoji": "🦙" } }, "DeepSeek-Coder": { "DeepSeek-Coder-1.3B": { "id": "deepseek-ai/DeepSeek-Coder-1.3B-instruct", "description": "Kleines, schnelles Modell — geeignet für einfache Aufgaben und schnelles Prototyping.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-1.3B-instruct", "emoji": "🚀" }, "DeepSeek-Coder-6.7B": { "id": "deepseek-ai/DeepSeek-Coder-6.7B-instruct", "description": "Mittelklasse mit guter Multisprachen‑ und Code‑Performance — guter Kompromiss zwischen Ressourcen und Leistung.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-6.7B-instruct", "emoji": "🚀" }, "DeepSeek-Coder-33B": { "id": "deepseek-ai/DeepSeek-Coder-33B-instruct", "description": "Stark in großen Projekten und komplexem Code — beliebt für umfangreiche Codebasen und generelle Multi‑Language‑Projekte.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-Coder-33B-instruct", "emoji": "🚀" } }, "StarCoder": { "StarCoder-1B": { "id": "bigcode/starcoder", "description": "Leichtes Basis‑Code‑Modell — gut für kleinere Skripte und simple Aufgaben.", "link": "https://huggingface.co/bigcode/starcoder", "emoji": "⭐" }, "StarCoder2-7B": { "id": "bigcode/starcoder2-7b", "description": "Aktuelle Generation mit guter Performance für viele Sprachen und mittelgroße Projekte.", "link": "https://huggingface.co/bigcode/starcoder2-7b", "emoji": "⭐" }, "StarCoder2-15B": { "id": "bigcode/starcoder2-15b", "description": "Leistungsfähiges Modell mit breiter Sprachabdeckung und guter Codegenerierung — beliebt für Open-Source‑Projekte.", "link": "https://huggingface.co/bigcode/starcoder2-15b", "emoji": "⭐" } }, "Phind-CodeLlama-v2": { "Phind-CodeLlama-34B-v2": { "id": "phind/Phind-CodeLlama-34B-v2", "description": "Sehr starkes Code‑LLM, oft an der Spitze der Open‑Source‑Leaderboards — gute Leistung in vielen Sprachen inklusive C#, C++, Python.", "link": "https://huggingface.co/phind/Phind-CodeLlama-34B-v2", "emoji": "🔥" } }, "WizardCoder": { "WizardCoder-15B": { "id": "WizardLM/WizardCoder-15B-V1.0", "description": "Beliebtes Chat‑Coding‑Modell — gut für kreative Aufgaben, Erklärungen und Code‑Generierung in vielen Sprachen.", "link": "https://huggingface.co/WizardLM/WizardCoder-15B-V1.0", "emoji": "🧙‍♂️" } }, "CodeGemma": { "CodeGemma-2B": { "id": "google/codegemma-2b-code", "description": "Kompaktes Modell — gut für Mobile oder leichtes Coding, Java & Kotlin Fokus.", "link": "https://huggingface.co/google/codegemma-2b-code", "emoji": "💎" }, "CodeGemma-7B": { "id": "google/codegemma-7b-code", "description": "Stabil und solide — gute Performance für vielfältige Sprachen und Projekte.", "link": "https://huggingface.co/google/codegemma-7b-code", "emoji": "💎" } }, "CodeGen": { "CodeGen-2B": { "id": "salesforce/codegen-2B-multi", "description": "Multilingual Code‑Generation — solide Grundausstattung für viele Sprachen.", "link": "https://huggingface.co/Salesforce/codegen-2B-multi", "emoji": "📘" }, "CodeGen-6B": { "id": "salesforce/codegen-6B-multi", "description": "Gute Balance zwischen Leistungsfähigkeit und Ressourcenbedarf.", "link": "https://huggingface.co/Salesforce/codegen-6B-multi", "emoji": "📘" }, "CodeGen-16B": { "id": "salesforce/codegen-16B-multi", "description": "Leistungsstark für große und komplexe Code‑Generierungsaufgaben.", "link": "https://huggingface.co/Salesforce/codegen-16B-multi", "emoji": "📘" } }, "InCoder": { "InCoder-1B": { "id": "facebook/incoder-1B", "description": "Gutes Infill‑Modell — praktisch für Code-Ergänzungen und Code‑Refactoring.", "link": "https://huggingface.co/facebook/incoder-1B", "emoji": "✨" }, "InCoder-6B": { "id": "facebook/incoder-6B", "description": "Robustere Version mit besserer Codequalität und Kontextverarbeitung.", "link": "https://huggingface.co/facebook/incoder-6B", "emoji": "✨" } }, "PolyCoder": { "PolyCoder-2.7B": { "id": "NinedayWang/PolyCoder-2.7B", "description": "Besonders stark in C, C++, System‑ und Performance‑Sprache — nützlich für low‑level oder cross‑platform Code.", "link": "https://huggingface.co/NinedayWang/PolyCoder-2.7B", "emoji": "🧩" } }, "Replit-Code-LLM": { "Replit-3B": { "id": "replit/Replit-code-v1-3B", "description": "Beliebt bei Web‑ und API‑Projekten — schneller Ladevorgang, gute Performance für kleine & mittlere Projekte.", "link": "https://huggingface.co/replit/Replit-code-v1-3B", "emoji": "🔧" } } }, "Reasoning": { "DeepSeek-R1": { "DeepSeek-R1-Distill-Qwen-7B": { "id": "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", "description": "Reasoning-focused distilled model based on DeepSeek-R1, optimized for chain-of-thought tasks.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", "emoji": "🧠" }, "DeepSeek-R1-Distill-Llama-8B": { "id": "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "description": "Reasoning-focused distilled Llama model with strong performance on structured reasoning tasks.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "emoji": "🧠" }, "DeepSeek-R1-Distill-Llama-70B": { "id": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "description": "⚠️ Large model — may require paid HF Inference API. High-performance reasoning model distilled from DeepSeek-R1.", "link": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B", "emoji": "🧠" } }, "Qwen-Reasoning": { "Qwen2.5-Math-7B-Instruct": { "id": "Qwen/Qwen2.5-Math-7B-Instruct", "description": "Math and logic-focused Qwen model optimized for structured reasoning and problem solving.", "link": "https://huggingface.co/Qwen/Qwen2.5-Math-7B-Instruct", "emoji": "🧮" } } }, "Multilingual": { "Qwen-Multi": { "Qwen2.5-7B-Instruct": { "id": "Qwen/Qwen2.5-7B-Instruct", "description": "Multilingual instruction model with strong performance across many languages.", "link": "https://huggingface.co/Qwen/Qwen2.5-7B-Instruct", "emoji": "🌍" }, "Qwen2.5-72B-Instruct": { "id": "Qwen/Qwen2.5-72B-Instruct", "description": "⚠️ Extremely large model — requires paid HF Inference API. Multilingual instruction model with advanced reasoning.", "link": "https://huggingface.co/Qwen/Qwen2.5-72B-Instruct", "emoji": "🌍" } }, "Aya-Multilingual": { "Aya-23-8B": { "id": "CohereForAI/aya-23-8b", "description": "Multilingual chat model trained across many languages with strong general-purpose performance.", "link": "https://huggingface.co/CohereForAI/aya-23-8b", "emoji": "🗣️" }, "Aya-23-35B": { "id": "CohereForAI/aya-23-35b", "description": "⚠️ Large multilingual model — may require paid HF Inference API. High-quality multilingual chat and reasoning.", "link": "https://huggingface.co/CohereForAI/aya-23-35b", "emoji": "🗣️" } } }, "Lightweight": { "Phi-Mini": { "Phi-3-Small-Instruct": { "id": "microsoft/Phi-3-small-8k-instruct", "description": "Lightweight instruction model suitable for fast inference and general chat.", "link": "https://huggingface.co/microsoft/Phi-3-small-8k-instruct", "emoji": "📎" }, "Phi-3-Medium-Instruct": { "id": "microsoft/Phi-3-medium-128k-instruct", "description": "Medium-sized Phi-3 instruction model with strong performance and broad availability.", "link": "https://huggingface.co/microsoft/Phi-3-medium-128k-instruct", "emoji": "📎" } }, "TinyLlama": { "TinyLlama-1.1B-Chat-v1.0": { "id": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "description": "🧪 Reliable small chat model ideal for debugging, testing, and low-resource environments.", "link": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0", "emoji": "🧪" } } }, "Exotic": { "Mamba": { "Mamba-130M-HF": { "id": "state-spaces/mamba-130m-hf", "description": "🧪 Experimental model — HF-converted Mamba variant. Limited chat capabilities but usable for experimentation.", "link": "https://huggingface.co/state-spaces/mamba-130m-hf", "emoji": "🐍" } } } }