diff --git "a/docs/refs/ref_anycoder.py" "b/docs/refs/ref_anycoder.py"
new file mode 100644--- /dev/null
+++ "b/docs/refs/ref_anycoder.py"
@@ -0,0 +1,10123 @@
+import os
+import re
+from http import HTTPStatus
+from typing import Dict, List, Optional, Tuple
+import base64
+import mimetypes
+import PyPDF2
+import docx
+import cv2
+import numpy as np
+from PIL import Image
+import pytesseract
+import requests
+from urllib.parse import urlparse, urljoin
+from bs4 import BeautifulSoup
+import html2text
+import json
+import time
+import webbrowser
+import urllib.parse
+import copy
+import html
+
+import gradio as gr
+from huggingface_hub import InferenceClient
+from tavily import TavilyClient
+from huggingface_hub import HfApi
+import tempfile
+from openai import OpenAI
+import uuid
+import datetime
+from mistralai import Mistral
+import shutil
+import urllib.parse
+import mimetypes
+import threading
+import atexit
+import asyncio
+from datetime import datetime, timedelta
+from typing import Optional
+import dashscope
+from dashscope.utils.oss_utils import check_and_upload_local
+
+# Gradio supported languages for syntax highlighting
+GRADIO_SUPPORTED_LANGUAGES = [
+ "python", "c", "cpp", "markdown", "latex", "json", "html", "css", "javascript", "jinja2", "typescript", "yaml", "dockerfile", "shell", "r", "sql", "sql-msSQL", "sql-mySQL", "sql-mariaDB", "sql-sqlite", "sql-cassandra", "sql-plSQL", "sql-hive", "sql-pgSQL", "sql-gql", "sql-gpSQL", "sql-sparkSQL", "sql-esper", None
+]
+
+def get_gradio_language(language):
+ # Map composite options to a supported syntax highlighting
+ if language == "streamlit":
+ return "python"
+ if language == "gradio":
+ return "python"
+ return language if language in GRADIO_SUPPORTED_LANGUAGES else None
+
+# Search/Replace Constants
+SEARCH_START = "<<<<<<< SEARCH"
+DIVIDER = "======="
+REPLACE_END = ">>>>>>> REPLACE"
+
+# Gradio Documentation Auto-Update System
+GRADIO_LLMS_TXT_URL = "https://www.gradio.app/llms.txt"
+GRADIO_DOCS_CACHE_FILE = ".gradio_docs_cache.txt"
+GRADIO_DOCS_LAST_UPDATE_FILE = ".gradio_docs_last_update.txt"
+GRADIO_DOCS_UPDATE_ON_APP_UPDATE = True # Only update when app is updated, not on a timer
+
+# Global variable to store the current Gradio documentation
+_gradio_docs_content: str | None = None
+_gradio_docs_last_fetched: Optional[datetime] = None
+
+# ComfyUI Documentation Auto-Update System
+COMFYUI_LLMS_TXT_URL = "https://docs.comfy.org/llms.txt"
+COMFYUI_DOCS_CACHE_FILE = ".comfyui_docs_cache.txt"
+COMFYUI_DOCS_LAST_UPDATE_FILE = ".comfyui_docs_last_update.txt"
+COMFYUI_DOCS_UPDATE_ON_APP_UPDATE = True # Only update when app is updated, not on a timer
+
+# Global variable to store the current ComfyUI documentation
+_comfyui_docs_content: str | None = None
+_comfyui_docs_last_fetched: Optional[datetime] = None
+
+def fetch_gradio_docs() -> str | None:
+ """Fetch the latest Gradio documentation from llms.txt"""
+ try:
+ response = requests.get(GRADIO_LLMS_TXT_URL, timeout=10)
+ response.raise_for_status()
+ return response.text
+ except Exception as e:
+ print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
+ return None
+
+def fetch_comfyui_docs() -> str | None:
+ """Fetch the latest ComfyUI documentation from llms.txt"""
+ try:
+ response = requests.get(COMFYUI_LLMS_TXT_URL, timeout=10)
+ response.raise_for_status()
+ return response.text
+ except Exception as e:
+ print(f"Warning: Failed to fetch ComfyUI docs from {COMFYUI_LLMS_TXT_URL}: {e}")
+ return None
+
+def filter_problematic_instructions(content: str) -> str:
+ """Filter out problematic instructions that cause LLM to stop generation prematurely"""
+ if not content:
+ return content
+
+ # List of problematic phrases that cause early termination when LLM encounters ``` in user code
+ problematic_patterns = [
+ r"Output ONLY the code inside a ``` code block, and do not include any explanations or extra text",
+ r"output only the code inside a ```.*?``` code block",
+ r"Always output only the.*?code.*?inside.*?```.*?```.*?block",
+ r"Return ONLY the code inside a.*?```.*?``` code block",
+ r"Do NOT add the language name at the top of the code output",
+ r"do not include any explanations or extra text",
+ r"Always output only the.*?code blocks.*?shown above, and do not include any explanations",
+ r"Output.*?ONLY.*?code.*?inside.*?```.*?```",
+ r"Return.*?ONLY.*?code.*?inside.*?```.*?```",
+ r"Generate.*?ONLY.*?code.*?inside.*?```.*?```",
+ r"Provide.*?ONLY.*?code.*?inside.*?```.*?```",
+ ]
+
+ # Remove problematic patterns
+ filtered_content = content
+ for pattern in problematic_patterns:
+ # Use case-insensitive matching
+ filtered_content = re.sub(pattern, "", filtered_content, flags=re.IGNORECASE | re.DOTALL)
+
+ # Clean up any double newlines or extra whitespace left by removals
+ filtered_content = re.sub(r'\n\s*\n\s*\n', '\n\n', filtered_content)
+ filtered_content = re.sub(r'^\s+', '', filtered_content, flags=re.MULTILINE)
+
+ return filtered_content
+
+def load_cached_gradio_docs() -> str | None:
+ """Load cached Gradio documentation from file"""
+ try:
+ if os.path.exists(GRADIO_DOCS_CACHE_FILE):
+ with open(GRADIO_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f:
+ return f.read()
+ except Exception as e:
+ print(f"Warning: Failed to load cached Gradio docs: {e}")
+ return None
+
+def save_gradio_docs_cache(content: str):
+ """Save Gradio documentation to cache file"""
+ try:
+ with open(GRADIO_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f:
+ f.write(content)
+ with open(GRADIO_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f:
+ f.write(datetime.now().isoformat())
+ except Exception as e:
+ print(f"Warning: Failed to save Gradio docs cache: {e}")
+
+def load_comfyui_docs_cache() -> str | None:
+ """Load ComfyUI documentation from cache file"""
+ try:
+ if os.path.exists(COMFYUI_DOCS_CACHE_FILE):
+ with open(COMFYUI_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f:
+ return f.read()
+ except Exception as e:
+ print(f"Warning: Failed to load cached ComfyUI docs: {e}")
+ return None
+
+def save_comfyui_docs_cache(content: str):
+ """Save ComfyUI documentation to cache file"""
+ try:
+ with open(COMFYUI_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f:
+ f.write(content)
+ with open(COMFYUI_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f:
+ f.write(datetime.now().isoformat())
+ except Exception as e:
+ print(f"Warning: Failed to save ComfyUI docs cache: {e}")
+
+def get_last_update_time() -> Optional[datetime]:
+ """Get the last update time from file"""
+ try:
+ if os.path.exists(GRADIO_DOCS_LAST_UPDATE_FILE):
+ with open(GRADIO_DOCS_LAST_UPDATE_FILE, 'r', encoding='utf-8') as f:
+ return datetime.fromisoformat(f.read().strip())
+ except Exception as e:
+ print(f"Warning: Failed to read last update time: {e}")
+ return None
+
+def should_update_gradio_docs() -> bool:
+ """Check if Gradio documentation should be updated"""
+ # Only update if we don't have cached content (first run or cache deleted)
+ return not os.path.exists(GRADIO_DOCS_CACHE_FILE)
+
+def should_update_comfyui_docs() -> bool:
+ """Check if ComfyUI documentation should be updated"""
+ # Only update if we don't have cached content (first run or cache deleted)
+ return not os.path.exists(COMFYUI_DOCS_CACHE_FILE)
+
+def force_update_gradio_docs():
+ """
+ Force an update of Gradio documentation (useful when app is updated).
+
+ To manually refresh docs, you can call this function or simply delete the cache file:
+ rm .gradio_docs_cache.txt && restart the app
+ """
+ global _gradio_docs_content, _gradio_docs_last_fetched
+
+ print("🔄 Forcing Gradio documentation update...")
+ latest_content = fetch_gradio_docs()
+
+ if latest_content:
+ # Filter out problematic instructions that cause early termination
+ filtered_content = filter_problematic_instructions(latest_content)
+ _gradio_docs_content = filtered_content
+ _gradio_docs_last_fetched = datetime.now()
+ save_gradio_docs_cache(filtered_content)
+ update_gradio_system_prompts()
+ print("✅ Gradio documentation updated successfully")
+ return True
+ else:
+ print("❌ Failed to update Gradio documentation")
+ return False
+
+def force_update_comfyui_docs():
+ """
+ Force an update of ComfyUI documentation (useful when app is updated).
+
+ To manually refresh docs, you can call this function or simply delete the cache file:
+ rm .comfyui_docs_cache.txt && restart the app
+ """
+ global _comfyui_docs_content, _comfyui_docs_last_fetched
+
+ print("🔄 Forcing ComfyUI documentation update...")
+ latest_content = fetch_comfyui_docs()
+
+ if latest_content:
+ # Filter out problematic instructions that cause early termination
+ filtered_content = filter_problematic_instructions(latest_content)
+ _comfyui_docs_content = filtered_content
+ _comfyui_docs_last_fetched = datetime.now()
+ save_comfyui_docs_cache(filtered_content)
+ update_json_system_prompts()
+ print("✅ ComfyUI documentation updated successfully")
+ return True
+ else:
+ print("❌ Failed to update ComfyUI documentation")
+ return False
+
+def get_gradio_docs_content() -> str:
+ """Get the current Gradio documentation content, updating if necessary"""
+ global _gradio_docs_content, _gradio_docs_last_fetched
+
+ # Check if we need to update
+ if (_gradio_docs_content is None or
+ _gradio_docs_last_fetched is None or
+ should_update_gradio_docs()):
+
+ print("Updating Gradio documentation...")
+
+ # Try to fetch latest content
+ latest_content = fetch_gradio_docs()
+
+ if latest_content:
+ # Filter out problematic instructions that cause early termination
+ filtered_content = filter_problematic_instructions(latest_content)
+ _gradio_docs_content = filtered_content
+ _gradio_docs_last_fetched = datetime.now()
+ save_gradio_docs_cache(filtered_content)
+ print("✅ Gradio documentation updated successfully")
+ else:
+ # Fallback to cached content
+ cached_content = load_cached_gradio_docs()
+ if cached_content:
+ _gradio_docs_content = cached_content
+ _gradio_docs_last_fetched = datetime.now()
+ print("⚠️ Using cached Gradio documentation (network fetch failed)")
+ else:
+ # Fallback to minimal content
+ _gradio_docs_content = """
+ # Gradio API Reference (Offline Fallback)
+
+ This is a minimal fallback when documentation cannot be fetched.
+ Please check your internet connection for the latest API reference.
+
+ Basic Gradio components: Button, Textbox, Slider, Image, Audio, Video, File, etc.
+ Use gr.Blocks() for custom layouts and gr.Interface() for simple apps.
+ """
+ print("❌ Using minimal fallback documentation")
+
+ return _gradio_docs_content or ""
+
+def get_comfyui_docs_content() -> str:
+ """Get the current ComfyUI documentation content, updating if necessary"""
+ global _comfyui_docs_content, _comfyui_docs_last_fetched
+
+ # Check if we need to update
+ if (_comfyui_docs_content is None or
+ _comfyui_docs_last_fetched is None or
+ should_update_comfyui_docs()):
+
+ print("Updating ComfyUI documentation...")
+
+ # Try to fetch latest content
+ latest_content = fetch_comfyui_docs()
+
+ if latest_content:
+ # Filter out problematic instructions that cause early termination
+ filtered_content = filter_problematic_instructions(latest_content)
+ _comfyui_docs_content = filtered_content
+ _comfyui_docs_last_fetched = datetime.now()
+ save_comfyui_docs_cache(filtered_content)
+ print("✅ ComfyUI documentation updated successfully")
+ else:
+ # Fallback to cached content
+ cached_content = load_comfyui_docs_cache()
+ if cached_content:
+ _comfyui_docs_content = cached_content
+ _comfyui_docs_last_fetched = datetime.now()
+ print("⚠️ Using cached ComfyUI documentation (network fetch failed)")
+ else:
+ # Fallback to minimal content
+ _comfyui_docs_content = """
+ # ComfyUI API Reference (Offline Fallback)
+
+ This is a minimal fallback when documentation cannot be fetched.
+ Please check your internet connection for the latest API reference.
+
+ Basic ComfyUI workflow structure: nodes, connections, inputs, outputs.
+ Use CheckpointLoaderSimple, CLIPTextEncode, KSampler for basic workflows.
+ """
+ print("❌ Using minimal fallback documentation")
+
+ return _comfyui_docs_content or ""
+
+def update_gradio_system_prompts():
+ """Update the global Gradio system prompts with latest documentation"""
+ global GRADIO_SYSTEM_PROMPT, GRADIO_SYSTEM_PROMPT_WITH_SEARCH
+
+ docs_content = get_gradio_docs_content()
+
+ # Base system prompt
+ base_prompt = """You are an expert Gradio developer. Create a complete, working Gradio application based on the user's request. Generate all necessary code to make the application functional and runnable.
+
+🚨 IMPORTANT: If the user is asking to use external APIs (like OpenRouter, OpenAI API, Hugging Face Inference API, etc.), DO NOT use @spaces.GPU decorators or any ZeroGPU features. External APIs handle the model inference remotely, so GPU allocation on the Spaces instance is not needed.
+
+🚨 CRITICAL REQUIREMENT: If the user provides ANY diffusion model code (FLUX, Stable Diffusion, etc.) that runs locally (not via API), you MUST implement ZeroGPU ahead-of-time (AoT) compilation. This is mandatory and provides 1.3x-1.8x performance improvements. Do not create basic Gradio apps without AoT optimization for diffusion models.
+
+## ZeroGPU Integration (MANDATORY)
+
+ALWAYS use ZeroGPU for GPU-dependent functions in Gradio apps:
+
+1. Import the spaces module: `import spaces`
+2. Decorate GPU-dependent functions with `@spaces.GPU`
+3. Specify appropriate duration based on expected runtime:
+ - Quick inference (< 30s): `@spaces.GPU(duration=30)`
+ - Standard generation (30-60s): `@spaces.GPU` (default 60s)
+ - Complex generation (60-120s): `@spaces.GPU(duration=120)`
+ - Heavy processing (120-180s): `@spaces.GPU(duration=180)`
+
+Example usage:
+```python
+import spaces
+from diffusers import DiffusionPipeline
+
+pipe = DiffusionPipeline.from_pretrained(...)
+pipe.to('cuda')
+
+@spaces.GPU(duration=120)
+def generate(prompt):
+ return pipe(prompt).images
+
+gr.Interface(
+ fn=generate,
+ inputs=gr.Text(),
+ outputs=gr.Gallery(),
+).launch()
+```
+
+Duration Guidelines:
+- Shorter durations improve queue priority for users
+- Text-to-image: typically 30-60 seconds
+- Image-to-image: typically 20-40 seconds
+- Video generation: typically 60-180 seconds
+- Audio/music generation: typically 30-90 seconds
+- Model loading + inference: add 10-30s buffer
+- AoT compilation during startup: use @spaces.GPU(duration=1500) for maximum allowed duration
+
+Functions that typically need @spaces.GPU:
+- Image generation (text-to-image, image-to-image)
+- Video generation
+- Audio/music generation
+- Model inference with transformers, diffusers
+- Any function using .to('cuda') or GPU operations
+
+## CRITICAL: Use ZeroGPU AoT Compilation for ALL Diffusion Models
+
+FOR ANY DIFFUSION MODEL (FLUX, Stable Diffusion, etc.), YOU MUST IMPLEMENT AHEAD-OF-TIME COMPILATION.
+This is NOT optional - it provides 1.3x-1.8x speedup and is essential for production ZeroGPU Spaces.
+
+ALWAYS implement this pattern for diffusion models:
+
+### MANDATORY: Basic AoT Compilation Pattern
+YOU MUST USE THIS EXACT PATTERN for any diffusion model (FLUX, Stable Diffusion, etc.):
+
+1. ALWAYS add AoT compilation function with @spaces.GPU(duration=1500)
+2. ALWAYS use spaces.aoti_capture to capture inputs
+3. ALWAYS use torch.export.export to export the transformer
+4. ALWAYS use spaces.aoti_compile to compile
+5. ALWAYS use spaces.aoti_apply to apply to pipeline
+
+### Required AoT Implementation
+```python
+import spaces
+import torch
+from diffusers import DiffusionPipeline
+
+MODEL_ID = 'black-forest-labs/FLUX.1-dev'
+pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
+pipe.to('cuda')
+
+@spaces.GPU(duration=1500) # Maximum duration allowed during startup
+def compile_transformer():
+ # 1. Capture example inputs
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ # 2. Export the model
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+
+ # 3. Compile the exported model
+ return spaces.aoti_compile(exported)
+
+# 4. Apply compiled model to pipeline
+compiled_transformer = compile_transformer()
+spaces.aoti_apply(compiled_transformer, pipe.transformer)
+
+@spaces.GPU
+def generate(prompt):
+ return pipe(prompt).images
+```
+
+### Advanced Optimizations
+
+#### FP8 Quantization (Additional 1.2x speedup on H200)
+```python
+from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
+
+@spaces.GPU(duration=1500)
+def compile_transformer_with_quantization():
+ # Quantize before export for FP8 speedup
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
+
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ return spaces.aoti_compile(exported)
+```
+
+#### Dynamic Shapes (Variable input sizes)
+```python
+from torch.utils._pytree import tree_map
+
+@spaces.GPU(duration=1500)
+def compile_transformer_dynamic():
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ # Define dynamic dimension ranges (model-dependent)
+ transformer_hidden_dim = torch.export.Dim('hidden', min=4096, max=8212)
+
+ # Map argument names to dynamic dimensions
+ transformer_dynamic_shapes = {
+ "hidden_states": {1: transformer_hidden_dim},
+ "img_ids": {0: transformer_hidden_dim},
+ }
+
+ # Create dynamic shapes structure
+ dynamic_shapes = tree_map(lambda v: None, call.kwargs)
+ dynamic_shapes.update(transformer_dynamic_shapes)
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ dynamic_shapes=dynamic_shapes,
+ )
+ return spaces.aoti_compile(exported)
+```
+
+#### Multi-Compile for Different Resolutions
+```python
+@spaces.GPU(duration=1500)
+def compile_multiple_resolutions():
+ compiled_models = {}
+ resolutions = [(512, 512), (768, 768), (1024, 1024)]
+
+ for width, height in resolutions:
+ # Capture inputs for specific resolution
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe(f"test prompt {width}x{height}", width=width, height=height)
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ compiled_models[f"{width}x{height}"] = spaces.aoti_compile(exported)
+
+ return compiled_models
+
+# Usage with resolution dispatch
+compiled_models = compile_multiple_resolutions()
+
+@spaces.GPU
+def generate_with_resolution(prompt, width=1024, height=1024):
+ resolution_key = f"{width}x{height}"
+ if resolution_key in compiled_models:
+ # Temporarily apply the right compiled model
+ spaces.aoti_apply(compiled_models[resolution_key], pipe.transformer)
+ return pipe(prompt, width=width, height=height).images
+```
+
+#### FlashAttention-3 Integration
+```python
+from kernels import get_kernel
+
+# Load pre-built FA3 kernel compatible with H200
+try:
+ vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
+ print("✅ FlashAttention-3 kernel loaded successfully")
+except Exception as e:
+ print(f"⚠️ FlashAttention-3 not available: {e}")
+
+# Custom attention processor example
+class FlashAttention3Processor:
+ def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
+ # Use FA3 kernel for attention computation
+ return vllm_flash_attn3(hidden_states, encoder_hidden_states, attention_mask)
+
+# Apply FA3 processor to model
+if 'vllm_flash_attn3' in locals():
+ for name, module in pipe.transformer.named_modules():
+ if hasattr(module, 'processor'):
+ module.processor = FlashAttention3Processor()
+```
+
+### Complete Optimized Example
+```python
+import spaces
+import torch
+from diffusers import DiffusionPipeline
+from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
+
+MODEL_ID = 'black-forest-labs/FLUX.1-dev'
+pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
+pipe.to('cuda')
+
+@spaces.GPU(duration=1500)
+def compile_optimized_transformer():
+ # Apply FP8 quantization
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
+
+ # Capture inputs
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("optimization test prompt")
+
+ # Export and compile
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ return spaces.aoti_compile(exported)
+
+# Compile during startup
+compiled_transformer = compile_optimized_transformer()
+spaces.aoti_apply(compiled_transformer, pipe.transformer)
+
+@spaces.GPU
+def generate(prompt):
+ return pipe(prompt).images
+```
+
+**Expected Performance Gains:**
+- Basic AoT: 1.3x-1.8x speedup
+- + FP8 Quantization: Additional 1.2x speedup
+- + FlashAttention-3: Additional attention speedup
+- Total potential: 2x-3x faster inference
+
+**Hardware Requirements:**
+- FP8 quantization requires CUDA compute capability ≥ 9.0 (H200 ✅)
+- FlashAttention-3 works on H200 hardware via kernels library
+- Dynamic shapes add flexibility for variable input sizes
+
+## Complete Gradio API Reference
+
+This reference is automatically synced from https://www.gradio.app/llms.txt to ensure accuracy.
+
+"""
+
+ # Search-enabled prompt
+ search_prompt = """You are an expert Gradio developer with access to real-time web search. Create a complete, working Gradio application based on the user's request. When needed, use web search to find current best practices or verify latest Gradio features. Generate all necessary code to make the application functional and runnable.
+
+🚨 IMPORTANT: If the user is asking to use external APIs (like OpenRouter, OpenAI API, Hugging Face Inference API, etc.), DO NOT use @spaces.GPU decorators or any ZeroGPU features. External APIs handle the model inference remotely, so GPU allocation on the Spaces instance is not needed.
+
+🚨 CRITICAL REQUIREMENT: If the user provides ANY diffusion model code (FLUX, Stable Diffusion, etc.) that runs locally (not via API), you MUST implement ZeroGPU ahead-of-time (AoT) compilation. This is mandatory and provides 1.3x-1.8x performance improvements. Do not create basic Gradio apps without AoT optimization for diffusion models.
+
+## ZeroGPU Integration (MANDATORY)
+
+ALWAYS use ZeroGPU for GPU-dependent functions in Gradio apps:
+
+1. Import the spaces module: `import spaces`
+2. Decorate GPU-dependent functions with `@spaces.GPU`
+3. Specify appropriate duration based on expected runtime:
+ - Quick inference (< 30s): `@spaces.GPU(duration=30)`
+ - Standard generation (30-60s): `@spaces.GPU` (default 60s)
+ - Complex generation (60-120s): `@spaces.GPU(duration=120)`
+ - Heavy processing (120-180s): `@spaces.GPU(duration=180)`
+
+Example usage:
+```python
+import spaces
+from diffusers import DiffusionPipeline
+
+pipe = DiffusionPipeline.from_pretrained(...)
+pipe.to('cuda')
+
+@spaces.GPU(duration=120)
+def generate(prompt):
+ return pipe(prompt).images
+
+gr.Interface(
+ fn=generate,
+ inputs=gr.Text(),
+ outputs=gr.Gallery(),
+).launch()
+```
+
+Duration Guidelines:
+- Shorter durations improve queue priority for users
+- Text-to-image: typically 30-60 seconds
+- Image-to-image: typically 20-40 seconds
+- Video generation: typically 60-180 seconds
+- Audio/music generation: typically 30-90 seconds
+- Model loading + inference: add 10-30s buffer
+- AoT compilation during startup: use @spaces.GPU(duration=1500) for maximum allowed duration
+
+Functions that typically need @spaces.GPU:
+- Image generation (text-to-image, image-to-image)
+- Video generation
+- Audio/music generation
+- Model inference with transformers, diffusers
+- Any function using .to('cuda') or GPU operations
+
+## CRITICAL: Use ZeroGPU AoT Compilation for ALL Diffusion Models
+
+FOR ANY DIFFUSION MODEL (FLUX, Stable Diffusion, etc.), YOU MUST IMPLEMENT AHEAD-OF-TIME COMPILATION.
+This is NOT optional - it provides 1.3x-1.8x speedup and is essential for production ZeroGPU Spaces.
+
+ALWAYS implement this pattern for diffusion models:
+
+### MANDATORY: Basic AoT Compilation Pattern
+YOU MUST USE THIS EXACT PATTERN for any diffusion model (FLUX, Stable Diffusion, etc.):
+
+1. ALWAYS add AoT compilation function with @spaces.GPU(duration=1500)
+2. ALWAYS use spaces.aoti_capture to capture inputs
+3. ALWAYS use torch.export.export to export the transformer
+4. ALWAYS use spaces.aoti_compile to compile
+5. ALWAYS use spaces.aoti_apply to apply to pipeline
+
+### Required AoT Implementation
+
+For production Spaces with heavy models, use ahead-of-time (AoT) compilation for 1.3x-1.8x speedups:
+
+### Basic AoT Compilation
+```python
+import spaces
+import torch
+from diffusers import DiffusionPipeline
+
+MODEL_ID = 'black-forest-labs/FLUX.1-dev'
+pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
+pipe.to('cuda')
+
+@spaces.GPU(duration=1500) # Maximum duration allowed during startup
+def compile_transformer():
+ # 1. Capture example inputs
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ # 2. Export the model
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+
+ # 3. Compile the exported model
+ return spaces.aoti_compile(exported)
+
+# 4. Apply compiled model to pipeline
+compiled_transformer = compile_transformer()
+spaces.aoti_apply(compiled_transformer, pipe.transformer)
+
+@spaces.GPU
+def generate(prompt):
+ return pipe(prompt).images
+```
+
+### Advanced Optimizations
+
+#### FP8 Quantization (Additional 1.2x speedup on H200)
+```python
+from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
+
+@spaces.GPU(duration=1500)
+def compile_transformer_with_quantization():
+ # Quantize before export for FP8 speedup
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
+
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ return spaces.aoti_compile(exported)
+```
+
+#### Dynamic Shapes (Variable input sizes)
+```python
+from torch.utils._pytree import tree_map
+
+@spaces.GPU(duration=1500)
+def compile_transformer_dynamic():
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("arbitrary example prompt")
+
+ # Define dynamic dimension ranges (model-dependent)
+ transformer_hidden_dim = torch.export.Dim('hidden', min=4096, max=8212)
+
+ # Map argument names to dynamic dimensions
+ transformer_dynamic_shapes = {
+ "hidden_states": {1: transformer_hidden_dim},
+ "img_ids": {0: transformer_hidden_dim},
+ }
+
+ # Create dynamic shapes structure
+ dynamic_shapes = tree_map(lambda v: None, call.kwargs)
+ dynamic_shapes.update(transformer_dynamic_shapes)
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ dynamic_shapes=dynamic_shapes,
+ )
+ return spaces.aoti_compile(exported)
+```
+
+#### Multi-Compile for Different Resolutions
+```python
+@spaces.GPU(duration=1500)
+def compile_multiple_resolutions():
+ compiled_models = {}
+ resolutions = [(512, 512), (768, 768), (1024, 1024)]
+
+ for width, height in resolutions:
+ # Capture inputs for specific resolution
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe(f"test prompt {width}x{height}", width=width, height=height)
+
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ compiled_models[f"{width}x{height}"] = spaces.aoti_compile(exported)
+
+ return compiled_models
+
+# Usage with resolution dispatch
+compiled_models = compile_multiple_resolutions()
+
+@spaces.GPU
+def generate_with_resolution(prompt, width=1024, height=1024):
+ resolution_key = f"{width}x{height}"
+ if resolution_key in compiled_models:
+ # Temporarily apply the right compiled model
+ spaces.aoti_apply(compiled_models[resolution_key], pipe.transformer)
+ return pipe(prompt, width=width, height=height).images
+```
+
+#### FlashAttention-3 Integration
+```python
+from kernels import get_kernel
+
+# Load pre-built FA3 kernel compatible with H200
+try:
+ vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
+ print("✅ FlashAttention-3 kernel loaded successfully")
+except Exception as e:
+ print(f"⚠️ FlashAttention-3 not available: {e}")
+
+# Custom attention processor example
+class FlashAttention3Processor:
+ def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None):
+ # Use FA3 kernel for attention computation
+ return vllm_flash_attn3(hidden_states, encoder_hidden_states, attention_mask)
+
+# Apply FA3 processor to model
+if 'vllm_flash_attn3' in locals():
+ for name, module in pipe.transformer.named_modules():
+ if hasattr(module, 'processor'):
+ module.processor = FlashAttention3Processor()
+```
+
+### Complete Optimized Example
+```python
+import spaces
+import torch
+from diffusers import DiffusionPipeline
+from torchao.quantization import quantize_, Float8DynamicActivationFloat8WeightConfig
+
+MODEL_ID = 'black-forest-labs/FLUX.1-dev'
+pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
+pipe.to('cuda')
+
+@spaces.GPU(duration=1500)
+def compile_optimized_transformer():
+ # Apply FP8 quantization
+ quantize_(pipe.transformer, Float8DynamicActivationFloat8WeightConfig())
+
+ # Capture inputs
+ with spaces.aoti_capture(pipe.transformer) as call:
+ pipe("optimization test prompt")
+
+ # Export and compile
+ exported = torch.export.export(
+ pipe.transformer,
+ args=call.args,
+ kwargs=call.kwargs,
+ )
+ return spaces.aoti_compile(exported)
+
+# Compile during startup
+compiled_transformer = compile_optimized_transformer()
+spaces.aoti_apply(compiled_transformer, pipe.transformer)
+
+@spaces.GPU
+def generate(prompt):
+ return pipe(prompt).images
+```
+
+**Expected Performance Gains:**
+- Basic AoT: 1.3x-1.8x speedup
+- + FP8 Quantization: Additional 1.2x speedup
+- + FlashAttention-3: Additional attention speedup
+- Total potential: 2x-3x faster inference
+
+**Hardware Requirements:**
+- FP8 quantization requires CUDA compute capability ≥ 9.0 (H200 ✅)
+- FlashAttention-3 works on H200 hardware via kernels library
+- Dynamic shapes add flexibility for variable input sizes
+
+## Complete Gradio API Reference
+
+This reference is automatically synced from https://www.gradio.app/llms.txt to ensure accuracy.
+
+"""
+
+ # Update the prompts
+ GRADIO_SYSTEM_PROMPT = base_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns.\n\nIMPORTANT: Always include \"Built with anycoder\" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"
+ GRADIO_SYSTEM_PROMPT_WITH_SEARCH = search_prompt + docs_content + "\n\nAlways use the exact function signatures from this API reference and follow modern Gradio patterns.\n\nIMPORTANT: Always include \"Built with anycoder\" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"
+
+def update_json_system_prompts():
+ """Update the global JSON system prompts with latest ComfyUI documentation"""
+ global JSON_SYSTEM_PROMPT, JSON_SYSTEM_PROMPT_WITH_SEARCH
+
+ docs_content = get_comfyui_docs_content()
+
+ # Base system prompt
+ base_prompt = """You are an expert JSON developer. Generate clean, valid JSON data based on the user's request. Follow JSON syntax rules strictly:
+- Use double quotes for strings
+- No trailing commas
+- Proper nesting and structure
+- Valid data types (string, number, boolean, null, object, array)
+
+Generate ONLY the JSON data requested - no HTML, no applications, no explanations outside the JSON. The output should be pure, valid JSON that can be parsed directly.
+
+"""
+
+ # Search-enabled system prompt
+ search_prompt = """You are an expert JSON developer. You have access to real-time web search. When needed, use web search to find the latest information or data structures for your JSON generation.
+
+Generate clean, valid JSON data based on the user's request. Follow JSON syntax rules strictly:
+- Use double quotes for strings
+- No trailing commas
+- Proper nesting and structure
+- Valid data types (string, number, boolean, null, object, array)
+
+Generate ONLY the JSON data requested - no HTML, no applications, no explanations outside the JSON. The output should be pure, valid JSON that can be parsed directly.
+
+"""
+
+ # Add ComfyUI documentation if available
+ if docs_content.strip():
+ comfyui_section = f"""
+## ComfyUI Reference Documentation
+
+When generating JSON data related to ComfyUI workflows, nodes, or configurations, use this reference:
+
+{docs_content}
+
+This reference is automatically synced from https://docs.comfy.org/llms.txt to ensure accuracy.
+
+"""
+ base_prompt += comfyui_section
+ search_prompt += comfyui_section
+
+ # Update the prompts
+ JSON_SYSTEM_PROMPT = base_prompt
+ JSON_SYSTEM_PROMPT_WITH_SEARCH = search_prompt
+
+# Initialize Gradio documentation on startup
+def initialize_gradio_docs():
+ """Initialize Gradio documentation on application startup"""
+ try:
+ update_gradio_system_prompts()
+ if should_update_gradio_docs():
+ print("🚀 Gradio documentation system initialized (fetched fresh content)")
+ else:
+ print("🚀 Gradio documentation system initialized (using cached content)")
+ except Exception as e:
+ print(f"Warning: Failed to initialize Gradio documentation: {e}")
+
+# Initialize ComfyUI documentation on startup
+def initialize_comfyui_docs():
+ """Initialize ComfyUI documentation on application startup"""
+ try:
+ update_json_system_prompts()
+ if should_update_comfyui_docs():
+ print("🚀 ComfyUI documentation system initialized (fetched fresh content)")
+ else:
+ print("🚀 ComfyUI documentation system initialized (using cached content)")
+ except Exception as e:
+ print(f"Warning: Failed to initialize ComfyUI documentation: {e}")
+
+# Configuration
+HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
+
+For website redesign tasks:
+- Use the provided original HTML code as the starting point for redesign
+- Preserve all original content, structure, and functionality
+- Keep the same semantic HTML structure but enhance the styling
+- Reuse all original images and their URLs from the HTML code
+- Create a modern, responsive design with improved typography and spacing
+- Use modern CSS frameworks and design patterns
+- Ensure accessibility and mobile responsiveness
+- Maintain the same navigation and user flow
+- Enhance the visual design while keeping the original layout structure
+
+If an image is provided, analyze it and use the visual information to better understand the user's requirements.
+
+Always respond with code that can be executed or rendered directly.
+
+Generate complete, working HTML code that can be run immediately.
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+def validate_video_html(video_html: str) -> bool:
+ """Validate that the video HTML is well-formed and safe to insert."""
+ try:
+ # Basic checks for video HTML structure
+ if not video_html or not video_html.strip():
+ return False
+
+ # Check for required video elements
+ if ' not found
+ return False
+
+ return True
+ except Exception:
+ return False
+
+def llm_place_media(html_content: str, media_html_tag: str, media_kind: str = "image") -> str:
+ """Ask a lightweight model to produce search/replace blocks that insert media_html_tag in the best spot.
+
+ The model must return ONLY our block format using SEARCH_START/DIVIDER/REPLACE_END.
+ """
+ try:
+ client = get_inference_client("Qwen/Qwen3-Coder-480B-A35B-Instruct", "auto")
+ system_prompt = (
+ "You are a code editor. Insert the provided media tag into the given HTML in the most semantically appropriate place.\n"
+ "For video elements: prefer replacing placeholder images or inserting in hero sections with proper container divs.\n"
+ "For image elements: prefer replacing placeholder images or inserting near related content.\n"
+ "CRITICAL: Ensure proper HTML structure - videos should be wrapped in appropriate containers.\n"
+ "Return ONLY search/replace blocks using the exact markers: <<<<<<< SEARCH, =======, >>>>>>> REPLACE.\n"
+ "Do NOT include any commentary. Ensure the SEARCH block matches exact lines from the input.\n"
+ "When inserting videos, ensure they are properly contained within semantic HTML elements.\n"
+ )
+ # Truncate very long media tags for LLM prompt only to prevent token limits
+ truncated_media_tag_for_prompt = media_html_tag
+ if len(media_html_tag) > 2000:
+ # For very long data URIs, show structure but truncate the data for LLM prompt
+ if 'data:video/mp4;base64,' in media_html_tag:
+ start_idx = media_html_tag.find('data:video/mp4;base64,')
+ end_idx = media_html_tag.find('"', start_idx)
+ if start_idx != -1 and end_idx != -1:
+ truncated_media_tag_for_prompt = (
+ media_html_tag[:start_idx] +
+ 'data:video/mp4;base64,[TRUNCATED_BASE64_DATA]' +
+ media_html_tag[end_idx:]
+ )
+
+ user_payload = (
+ "HTML Document:\n" + html_content + "\n\n" +
+ f"Media ({media_kind}):\n" + truncated_media_tag_for_prompt + "\n\n" +
+ "Produce search/replace blocks now."
+ )
+ messages = [
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_payload},
+ ]
+ completion = client.chat.completions.create(
+ model="Qwen/Qwen3-Coder-480B-A35B-Instruct",
+ messages=messages,
+ max_tokens=2000,
+ temperature=0.2,
+ )
+ text = (completion.choices[0].message.content or "") if completion and completion.choices else ""
+
+ # Replace any truncated placeholders with the original full media HTML
+ if '[TRUNCATED_BASE64_DATA]' in text and 'data:video/mp4;base64,[TRUNCATED_BASE64_DATA]' in truncated_media_tag_for_prompt:
+ # Extract the original base64 data from the full media tag
+ original_start = media_html_tag.find('data:video/mp4;base64,')
+ original_end = media_html_tag.find('"', original_start)
+ if original_start != -1 and original_end != -1:
+ original_data_uri = media_html_tag[original_start:original_end]
+ text = text.replace('data:video/mp4;base64,[TRUNCATED_BASE64_DATA]', original_data_uri)
+
+ return text.strip()
+ except Exception as e:
+ print(f"[LLMPlaceMedia] Fallback due to error: {e}")
+ return ""
+
+# Stricter prompt for GLM-4.5V to ensure a complete, runnable HTML document with no escaped characters
+GLM45V_HTML_SYSTEM_PROMPT = """You are an expert front-end developer.
+
+Output a COMPLETE, STANDALONE HTML document that renders directly in a browser.
+
+Hard constraints:
+- DO NOT use React, ReactDOM, JSX, Babel, Vue, Angular, Svelte, or any SPA framework.
+- Use ONLY plain HTML, CSS, and vanilla JavaScript.
+- Allowed external resources: Tailwind CSS CDN, Font Awesome CDN, Google Fonts.
+- Do NOT escape characters (no \\n, \\t, or escaped quotes). Output raw HTML/JS/CSS.
+
+Structural requirements:
+- Include , ,
, and with proper nesting
+- Include required tags for any CSS you reference (e.g., Tailwind, Font Awesome, Google Fonts)
+- Keep everything in ONE file; inline CSS/JS as needed
+
+Generate complete, working HTML code that can be run immediately.
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder
+"""
+
+# ---------------------------------------------------------------------------
+# Video temp-file management (per-session tracking and cleanup)
+# ---------------------------------------------------------------------------
+VIDEO_TEMP_DIR = os.path.join(tempfile.gettempdir(), "anycoder_videos")
+VIDEO_FILE_TTL_SECONDS = 6 * 60 * 60 # 6 hours
+_SESSION_VIDEO_FILES: Dict[str, List[str]] = {}
+_VIDEO_FILES_LOCK = threading.Lock()
+
+
+def _ensure_video_dir_exists() -> None:
+ try:
+ os.makedirs(VIDEO_TEMP_DIR, exist_ok=True)
+ except Exception:
+ pass
+
+
+def _register_video_for_session(session_id: str | None, file_path: str) -> None:
+ if not session_id or not file_path:
+ return
+ with _VIDEO_FILES_LOCK:
+ if session_id not in _SESSION_VIDEO_FILES:
+ _SESSION_VIDEO_FILES[session_id] = []
+ _SESSION_VIDEO_FILES[session_id].append(file_path)
+
+
+def cleanup_session_videos(session_id: str | None) -> None:
+ if not session_id:
+ return
+ with _VIDEO_FILES_LOCK:
+ file_list = _SESSION_VIDEO_FILES.pop(session_id, [])
+ for path in file_list:
+ try:
+ if path and os.path.exists(path):
+ os.unlink(path)
+ except Exception:
+ # Best-effort cleanup
+ pass
+
+
+def reap_old_videos(ttl_seconds: int = VIDEO_FILE_TTL_SECONDS) -> None:
+ """Delete old video files in the temp directory based on modification time."""
+ try:
+ _ensure_video_dir_exists()
+ now_ts = time.time()
+ for name in os.listdir(VIDEO_TEMP_DIR):
+ path = os.path.join(VIDEO_TEMP_DIR, name)
+ try:
+ if not os.path.isfile(path):
+ continue
+ mtime = os.path.getmtime(path)
+ if now_ts - mtime > ttl_seconds:
+ os.unlink(path)
+ except Exception:
+ pass
+ except Exception:
+ # Temp dir might not exist or be accessible; ignore
+ pass
+
+# ---------------------------------------------------------------------------
+# Audio temp-file management (per-session tracking and cleanup)
+# ---------------------------------------------------------------------------
+AUDIO_TEMP_DIR = os.path.join(tempfile.gettempdir(), "anycoder_audio")
+AUDIO_FILE_TTL_SECONDS = 6 * 60 * 60 # 6 hours
+_SESSION_AUDIO_FILES: Dict[str, List[str]] = {}
+_AUDIO_FILES_LOCK = threading.Lock()
+
+
+def _ensure_audio_dir_exists() -> None:
+ try:
+ os.makedirs(AUDIO_TEMP_DIR, exist_ok=True)
+ except Exception:
+ pass
+
+
+def _register_audio_for_session(session_id: str | None, file_path: str) -> None:
+ if not session_id or not file_path:
+ return
+ with _AUDIO_FILES_LOCK:
+ if session_id not in _SESSION_AUDIO_FILES:
+ _SESSION_AUDIO_FILES[session_id] = []
+ _SESSION_AUDIO_FILES[session_id].append(file_path)
+
+
+def cleanup_session_audio(session_id: str | None) -> None:
+ if not session_id:
+ return
+ with _AUDIO_FILES_LOCK:
+ file_list = _SESSION_AUDIO_FILES.pop(session_id, [])
+ for path in file_list:
+ try:
+ if path and os.path.exists(path):
+ os.unlink(path)
+ except Exception:
+ pass
+
+
+def reap_old_audio(ttl_seconds: int = AUDIO_FILE_TTL_SECONDS) -> None:
+ try:
+ _ensure_audio_dir_exists()
+ now_ts = time.time()
+ for name in os.listdir(AUDIO_TEMP_DIR):
+ path = os.path.join(AUDIO_TEMP_DIR, name)
+ try:
+ if not os.path.isfile(path):
+ continue
+ mtime = os.path.getmtime(path)
+ if now_ts - mtime > ttl_seconds:
+ os.unlink(path)
+ except Exception:
+ pass
+ except Exception:
+ pass
+
+TRANSFORMERS_JS_SYSTEM_PROMPT = """You are an expert web developer creating a transformers.js application. You will generate THREE separate files: index.html, index.js, and style.css.
+
+IMPORTANT: You MUST output ALL THREE files in the following format:
+
+```html
+
+```
+
+```javascript
+// index.js content here
+```
+
+```css
+/* style.css content here */
+```
+
+Requirements:
+1. Create a modern, responsive web application using transformers.js
+2. Use the transformers.js library for AI/ML functionality
+3. Create a clean, professional UI with good user experience
+4. Make the application fully responsive for mobile devices
+5. Use modern CSS practices and JavaScript ES6+ features
+6. Include proper error handling and loading states
+7. Follow accessibility best practices
+
+Library import (required): Add the following snippet to index.html to import transformers.js:
+
+
+Device Options: By default, transformers.js runs on CPU (via WASM). For better performance, you can run models on GPU using WebGPU:
+- CPU (default): const pipe = await pipeline('task', 'model-name');
+- GPU (WebGPU): const pipe = await pipeline('task', 'model-name', { device: 'webgpu' });
+
+Consider providing users with a toggle option to choose between CPU and GPU execution based on their browser's WebGPU support.
+
+The index.html should contain the basic HTML structure and link to the CSS and JS files.
+The index.js should contain all the JavaScript logic including transformers.js integration.
+The style.css should contain all the styling for the application.
+
+Generate complete, working code files as shown above.
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+SVELTE_SYSTEM_PROMPT = """You are an expert Svelte developer creating a modern Svelte application.
+
+File selection policy (dynamic, model-decided):
+- Generate ONLY the files actually needed for the user's request.
+- MUST include src/App.svelte (entry component) and src/main.ts (entry point).
+- Usually include src/app.css for global styles.
+- Add additional files when needed, e.g. src/lib/*.svelte, src/components/*.svelte, src/stores/*.ts, static/* assets, etc.
+- Other base template files (package.json, vite.config.ts, tsconfig, svelte.config.js, src/vite-env.d.ts) are provided by the template and should NOT be generated unless explicitly requested by the user.
+
+CRITICAL: Always generate src/main.ts with correct Svelte 5 syntax:
+```typescript
+import './app.css'
+import App from './App.svelte'
+
+const app = new App({
+ target: document.getElementById('app')!,
+})
+
+export default app
+```
+Do NOT use the old mount syntax: `import { mount } from 'svelte'` - this will cause build errors.
+
+Output format (CRITICAL):
+- Return ONLY a series of file sections, each starting with a filename line:
+ === src/App.svelte ===
+ ...file content...
+
+ === src/app.css ===
+ ...file content...
+
+ (repeat for all files you decide to create)
+- Do NOT wrap files in Markdown code fences.
+
+Dependency policy:
+- If you import any third-party npm packages (e.g., "@gradio/dataframe"), include a package.json at the project root with a "dependencies" section listing them. Keep scripts and devDependencies compatible with the default Svelte + Vite template.
+
+Requirements:
+1. Create a modern, responsive Svelte application based on the user's specific request
+2. Prefer TypeScript where applicable for better type safety
+3. Create a clean, professional UI with good user experience
+4. Make the application fully responsive for mobile devices
+5. Use modern CSS practices and Svelte best practices
+6. Include proper error handling and loading states
+7. Follow accessibility best practices
+8. Use Svelte's reactive features effectively
+9. Include proper component structure and organization (only what's needed)
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder
+"""
+
+SVELTE_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert Svelte developer. You have access to real-time web search.
+
+File selection policy (dynamic, model-decided):
+- Generate ONLY the files actually needed for the user's request.
+- MUST include src/App.svelte (entry component) and src/main.ts (entry point).
+- Usually include src/app.css for global styles.
+- Add additional files when needed, e.g. src/lib/*.svelte, src/components/*.svelte, src/stores/*.ts, static/* assets, etc.
+- Other base template files (package.json, vite.config.ts, tsconfig, svelte.config.js, src/vite-env.d.ts) are provided by the template and should NOT be generated unless explicitly requested by the user.
+
+CRITICAL: Always generate src/main.ts with correct Svelte 5 syntax:
+```typescript
+import './app.css'
+import App from './App.svelte'
+
+const app = new App({
+ target: document.getElementById('app')!,
+})
+
+export default app
+```
+Do NOT use the old mount syntax: `import { mount } from 'svelte'` - this will cause build errors.
+
+Output format (CRITICAL):
+- Return ONLY a series of file sections, each starting with a filename line:
+ === src/App.svelte ===
+ ...file content...
+
+ === src/app.css ===
+ ...file content...
+
+ (repeat for all files you decide to create)
+- Do NOT wrap files in Markdown code fences.
+
+Dependency policy:
+- If you import any third-party npm packages, include a package.json at the project root with a "dependencies" section listing them. Keep scripts and devDependencies compatible with the default Svelte + Vite template.
+
+Requirements:
+1. Create a modern, responsive Svelte application
+2. Prefer TypeScript where applicable
+3. Clean, professional UI and UX
+4. Mobile-first responsiveness
+5. Svelte best practices and modern CSS
+6. Error handling and loading states
+7. Accessibility best practices
+8. Use search to apply current best practices
+9. Keep component structure organized and minimal
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder
+"""
+
+TRANSFORMERS_JS_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert web developer creating a transformers.js application. You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific technologies for transformers.js.
+
+You will generate THREE separate files: index.html, index.js, and style.css.
+
+IMPORTANT: You MUST output ALL THREE files in the following format:
+
+```html
+
+```
+
+```javascript
+// index.js content here
+```
+
+```css
+/* style.css content here */
+```
+
+Requirements:
+1. Create a modern, responsive web application using transformers.js
+2. Use the transformers.js library for AI/ML functionality
+3. Use web search to find current best practices and latest transformers.js features
+4. Create a clean, professional UI with good user experience
+5. Make the application fully responsive for mobile devices
+6. Use modern CSS practices and JavaScript ES6+ features
+7. Include proper error handling and loading states
+8. Follow accessibility best practices
+
+Library import (required): Add the following snippet to index.html to import transformers.js:
+
+
+Device Options: By default, transformers.js runs on CPU (via WASM). For better performance, you can run models on GPU using WebGPU:
+- CPU (default): const pipe = await pipeline('task', 'model-name');
+- GPU (WebGPU): const pipe = await pipeline('task', 'model-name', { device: 'webgpu' });
+
+Consider providing users with a toggle option to choose between CPU and GPU execution based on their browser's WebGPU support.
+
+The index.html should contain the basic HTML structure and link to the CSS and JS files.
+The index.js should contain all the JavaScript logic including transformers.js integration.
+The style.css should contain all the styling for the application.
+
+Generate complete, working code files as shown above.
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+# Gradio system prompts will be dynamically populated by update_gradio_system_prompts()
+GRADIO_SYSTEM_PROMPT = ""
+GRADIO_SYSTEM_PROMPT_WITH_SEARCH = ""
+
+# GRADIO_SYSTEM_PROMPT_WITH_SEARCH will be dynamically populated by update_gradio_system_prompts()
+
+# All Gradio API documentation is now dynamically loaded from https://www.gradio.app/llms.txt
+
+# JSON system prompts will be dynamically populated by update_json_system_prompts()
+JSON_SYSTEM_PROMPT = ""
+JSON_SYSTEM_PROMPT_WITH_SEARCH = ""
+
+# All ComfyUI API documentation is now dynamically loaded from https://docs.comfy.org/llms.txt
+
+GENERIC_SYSTEM_PROMPT = """You are an expert {language} developer. Write clean, idiomatic, and runnable {language} code for the user's request. If possible, include comments and best practices. Generate complete, working code that can be run immediately. If the user provides a file or other context, use it as a reference. If the code is for a script or app, make it as self-contained as possible.
+
+IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+# System prompt with search capability
+HTML_SYSTEM_PROMPT_WITH_SEARCH = """You are an expert front-end developer. You have access to real-time web search.
+
+Output a COMPLETE, STANDALONE HTML document that renders directly in a browser. Requirements:
+- Include , , , and with proper nesting
+- Include all required and
+
+{REPLACE_END}
+```
+
+Example Fixing Dependencies (requirements.txt):
+```
+Adding missing dependency to fix ImportError...
+=== requirements.txt ===
+{SEARCH_START}
+gradio
+streamlit
+{DIVIDER}
+gradio
+streamlit
+mistral-common
+{REPLACE_END}
+```
+
+Example Deleting Code:
+```
+Removing the paragraph...
+{SEARCH_START}
+
This paragraph will be deleted.
+{DIVIDER}
+{REPLACE_END}
+```
+
+IMPORTANT: Always ensure "Built with anycoder" appears as clickable text in the header/top section linking to https://huggingface.co/spaces/akhaliq/anycoder - if it's missing from the existing code, add it; if it exists, preserve it.
+
+CRITICAL: For imported spaces that lack anycoder attribution, you MUST add it as part of your modifications. Add it to the header/navigation area as clickable text linking to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+# Follow-up system prompt for modifying existing transformers.js applications
+TransformersJSFollowUpSystemPrompt = f"""You are an expert web developer modifying an existing transformers.js application.
+The user wants to apply changes based on their request.
+You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file.
+Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks.
+
+IMPORTANT: When the user reports an ERROR MESSAGE, analyze it carefully to determine which file needs fixing:
+- JavaScript errors/module loading issues → Fix index.js
+- HTML rendering/DOM issues → Fix index.html
+- Styling/visual issues → Fix style.css
+- CDN/library loading errors → Fix script tags in index.html
+
+The transformers.js application consists of three files: index.html, index.js, and style.css.
+When making changes, specify which file you're modifying by starting your search/replace blocks with the file name.
+
+Format Rules:
+1. Start with {SEARCH_START}
+2. Provide the exact lines from the current code that need to be replaced.
+3. Use {DIVIDER} to separate the search block from the replacement.
+4. Provide the new lines that should replace the original lines.
+5. End with {REPLACE_END}
+6. You can use multiple SEARCH/REPLACE blocks if changes are needed in different parts of the file.
+7. To insert code, use an empty SEARCH block (only {SEARCH_START} and {DIVIDER} on their lines) if inserting at the very beginning, otherwise provide the line *before* the insertion point in the SEARCH block and include that line plus the new lines in the REPLACE block.
+8. To delete code, provide the lines to delete in the SEARCH block and leave the REPLACE block empty (only {DIVIDER} and {REPLACE_END} on their lines).
+9. IMPORTANT: The SEARCH block must *exactly* match the current code, including indentation and whitespace.
+
+Example Modifying HTML:
+```
+Changing the title in index.html...
+=== index.html ===
+{SEARCH_START}
+ Old Title
+{DIVIDER}
+ New Title
+{REPLACE_END}
+```
+
+Example Modifying JavaScript:
+```
+Adding a new function to index.js...
+=== index.js ===
+{SEARCH_START}
+// Existing code
+{DIVIDER}
+// Existing code
+
+function newFunction() {{
+ console.log("New function added");
+}}
+{REPLACE_END}
+```
+
+Example Modifying CSS:
+```
+Changing background color in style.css...
+=== style.css ===
+{SEARCH_START}
+body {{
+ background-color: white;
+}}
+{DIVIDER}
+body {{
+ background-color: #f0f0f0;
+}}
+{REPLACE_END}
+```
+
+Example Fixing Library Loading Error:
+```
+Fixing transformers.js CDN loading error...
+=== index.html ===
+{SEARCH_START}
+
+{DIVIDER}
+
+{REPLACE_END}
+```
+
+IMPORTANT: Always ensure "Built with anycoder" appears as clickable text in the header/top section linking to https://huggingface.co/spaces/akhaliq/anycoder - if it's missing from the existing code, add it; if it exists, preserve it.
+
+CRITICAL: For imported spaces that lack anycoder attribution, you MUST add it as part of your modifications. Add it to the header/navigation area as clickable text linking to https://huggingface.co/spaces/akhaliq/anycoder"""
+
+# Available models
+AVAILABLE_MODELS = [
+ {
+ "name": "SiliconFlow: inclusionAI/Ling-flash-2.0",
+ "id": "inclusionAI/Ling-flash-2.0",
+ "description": "InclusionAI Ling-flash-2.0 model for code generation and general tasks"
+ },
+ # {
+ # "name": "Grok 4 Fast (Free)",
+ # "id": "x-ai/grok-4-fast:free",
+ # "description": "X.AI Grok 4 Fast model via OpenRouter - free tier with vision capabilities for code generation"
+ # },
+ # {
+ # "name": "Moonshot Kimi-K2",
+ # "id": "moonshotai/Kimi-K2-Instruct",
+ # "description": "Moonshot AI Kimi-K2-Instruct model for code generation and general tasks"
+ # },
+ # {
+ # "name": "Kimi K2 Turbo (Preview)",
+ # "id": "kimi-k2-turbo-preview",
+ # "description": "Moonshot AI Kimi K2 Turbo via OpenAI-compatible API"
+ # },
+ # {
+ # "name": "Carrot",
+ # "id": "stealth-model-1",
+ # "description": "High-performance AI model for code generation and complex reasoning tasks"
+ # },
+ # {
+ # "name": "DeepSeek V3",
+ # "id": "deepseek-ai/DeepSeek-V3-0324",
+ # "description": "DeepSeek V3 model for code generation"
+ # },
+ # {
+ # "name": "DeepSeek V3.1",
+ # "id": "deepseek-ai/DeepSeek-V3.1",
+ # "description": "DeepSeek V3.1 model for code generation and general tasks"
+ # },
+ # {
+ # "name": "DeepSeek V3.1 Terminus",
+ # "id": "deepseek-ai/DeepSeek-V3.1-Terminus",
+ # "description": "DeepSeek V3.1 Terminus model for advanced code generation and reasoning tasks"
+ # },
+ # {
+ # "name": "DeepSeek R1",
+ # "id": "deepseek-ai/DeepSeek-R1-0528",
+ # "description": "DeepSeek R1 model for code generation"
+ # },
+ # {
+ # "name": "ERNIE-4.5-VL",
+ # "id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT",
+ # "description": "ERNIE-4.5-VL model for multimodal code generation with image support"
+ # },
+ # {
+ # "name": "MiniMax M1",
+ # "id": "MiniMaxAI/MiniMax-M1-80k",
+ # "description": "MiniMax M1 model for code generation and general tasks"
+ # },
+ # {
+ # "name": "Qwen3-235B-A22B",
+ # "id": "Qwen/Qwen3-235B-A22B",
+ # "description": "Qwen3-235B-A22B model for code generation and general tasks"
+ # },
+ # {
+ # "name": "SmolLM3-3B",
+ # "id": "HuggingFaceTB/SmolLM3-3B",
+ # "description": "SmolLM3-3B model for code generation and general tasks"
+ # },
+ # {
+ # "name": "GLM-4.5",
+ # "id": "zai-org/GLM-4.5",
+ # "description": "GLM-4.5 model with thinking capabilities for advanced code generation"
+ # },
+ # {
+ # "name": "GLM-4.5V",
+ # "id": "zai-org/GLM-4.5V",
+ # "description": "GLM-4.5V multimodal model with image understanding for code generation"
+ # },
+ # {
+ # "name": "GLM-4.1V-9B-Thinking",
+ # "id": "THUDM/GLM-4.1V-9B-Thinking",
+ # "description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
+ # },
+ # {
+ # "name": "Qwen3-235B-A22B-Instruct-2507",
+ # "id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
+ # "description": "Qwen3-235B-A22B-Instruct-2507 model for code generation and general tasks"
+ # },
+ # {
+ # "name": "Qwen3-Coder-480B-A35B-Instruct",
+ # "id": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
+ # "description": "Qwen3-Coder-480B-A35B-Instruct model for advanced code generation and programming tasks"
+ # },
+ # {
+ # "name": "Qwen3-32B",
+ # "id": "Qwen/Qwen3-32B",
+ # "description": "Qwen3-32B model for code generation and general tasks"
+ # },
+ # {
+ # "name": "Qwen3-4B-Instruct-2507",
+ # "id": "Qwen/Qwen3-4B-Instruct-2507",
+ # "description": "Qwen3-4B-Instruct-2507 model for code generation and general tasks"
+ # },
+ # {
+ # "name": "Qwen3-4B-Thinking-2507",
+ # "id": "Qwen/Qwen3-4B-Thinking-2507",
+ # "description": "Qwen3-4B-Thinking-2507 model with advanced reasoning capabilities for code generation and general tasks"
+ # },
+ # {
+ # "name": "Qwen3-235B-A22B-Thinking",
+ # "id": "Qwen/Qwen3-235B-A22B-Thinking-2507",
+ # "description": "Qwen3-235B-A22B-Thinking model with advanced reasoning capabilities"
+ # },
+ # {
+ # "name": "Qwen3-Next-80B-A3B-Thinking",
+ # "id": "Qwen/Qwen3-Next-80B-A3B-Thinking",
+ # "description": "Qwen3-Next-80B-A3B-Thinking model with advanced reasoning capabilities via Hyperbolic"
+ # },
+ # {
+ # "name": "Qwen3-Next-80B-A3B-Instruct",
+ # "id": "Qwen/Qwen3-Next-80B-A3B-Instruct",
+ # "description": "Qwen3-Next-80B-A3B-Instruct model for code generation and general tasks via Hyperbolic"
+ # },
+ # {
+ # "name": "Qwen3-30B-A3B-Instruct-2507",
+ # "id": "qwen3-30b-a3b-instruct-2507",
+ # "description": "Qwen3-30B-A3B-Instruct model via Alibaba Cloud DashScope API"
+ # },
+ # {
+ # "name": "Qwen3-30B-A3B-Thinking-2507",
+ # "id": "qwen3-30b-a3b-thinking-2507",
+ # "description": "Qwen3-30B-A3B-Thinking model with advanced reasoning via Alibaba Cloud DashScope API"
+ # },
+ # {
+ # "name": "Qwen3-Coder-30B-A3B-Instruct",
+ # "id": "qwen3-coder-30b-a3b-instruct",
+ # "description": "Qwen3-Coder-30B-A3B-Instruct model for advanced code generation via Alibaba Cloud DashScope API"
+ # },
+ # {
+ # "name": "Qwen3-Coder-Plus-2025-09-23",
+ # "id": "qwen3-coder-plus-2025-09-23",
+ # "description": "Qwen3-Coder-Plus-2025-09-23 model - latest advanced code generation model via Alibaba Cloud DashScope API"
+ # },
+ # {
+ # "name": "Cohere Command-A Reasoning 08-2025",
+ # "id": "CohereLabs/command-a-reasoning-08-2025",
+ # "description": "Cohere Labs Command-A Reasoning (Aug 2025) via Hugging Face InferenceClient"
+ # },
+ # {
+ # "name": "StepFun Step-3",
+ # "id": "step-3",
+ # "description": "StepFun Step-3 model - AI chat assistant by 阶跃星辰 with multilingual capabilities"
+ # },
+ # {
+ # "name": "Codestral 2508",
+ # "id": "codestral-2508",
+ # "description": "Mistral Codestral model - specialized for code generation and programming tasks",
+ # "type": "mistral"
+ # },
+ # {
+ # "name": "Mistral Medium 2508",
+ # "id": "mistral-medium-2508",
+ # "description": "Mistral Medium 2508 model via Mistral API for general tasks and coding",
+ # "type": "mistral"
+ # },
+ # {
+ # "name": "Magistral Medium 2509",
+ # "id": "magistral-medium-2509",
+ # "description": "Magistral Medium 2509 model via Mistral API for advanced code generation and reasoning",
+ # "type": "mistral"
+ # },
+ # {
+ # "name": "Gemini 2.5 Flash",
+ # "id": "gemini-2.5-flash",
+ # "description": "Google Gemini 2.5 Flash via OpenAI-compatible API"
+ # },
+ # {
+ # "name": "Gemini 2.5 Pro",
+ # "id": "gemini-2.5-pro",
+ # "description": "Google Gemini 2.5 Pro via OpenAI-compatible API"
+ # },
+ # {
+ # "name": "GPT-OSS-120B",
+ # "id": "openai/gpt-oss-120b",
+ # "description": "OpenAI GPT-OSS-120B model for advanced code generation and general tasks"
+ # },
+ # {
+ # "name": "GPT-OSS-20B",
+ # "id": "openai/gpt-oss-20b",
+ # "description": "OpenAI GPT-OSS-20B model for code generation and general tasks"
+ # },
+ # {
+ # "name": "GPT-5",
+ # "id": "gpt-5",
+ # "description": "OpenAI GPT-5 model for advanced code generation and general tasks"
+ # },
+ # {
+ # "name": "Grok-4",
+ # "id": "grok-4",
+ # "description": "Grok-4 model via Poe (OpenAI-compatible) for advanced tasks"
+ # },
+ # {
+ # "name": "Grok-Code-Fast-1",
+ # "id": "Grok-Code-Fast-1",
+ # "description": "Grok-Code-Fast-1 model via Poe (OpenAI-compatible) for fast code generation"
+ # },
+ # {
+ # "name": "Claude-Opus-4.1",
+ # "id": "claude-opus-4.1",
+ # "description": "Anthropic Claude Opus 4.1 via Poe (OpenAI-compatible)"
+ # },
+ # {
+ # "name": "Qwen3 Max Preview",
+ # "id": "qwen3-max-preview",
+ # "description": "Qwen3 Max Preview model via DashScope International API"
+ # },
+ # {
+ # "name": "Qwen3-Max-2025-09-23",
+ # "id": "qwen3-max-2025-09-23",
+ # "description": "Qwen3-Max-2025-09-23 model - latest flagship model via Alibaba Cloud DashScope API"
+ # },
+ # {
+ # "name": "Sonoma Dusk Alpha",
+ # "id": "openrouter/sonoma-dusk-alpha",
+ # "description": "OpenRouter Sonoma Dusk Alpha model with vision capabilities"
+ # },
+ # {
+ # "name": "Sonoma Sky Alpha",
+ # "id": "openrouter/sonoma-sky-alpha",
+ # "description": "OpenRouter Sonoma Sky Alpha model with vision capabilities"
+ # }
+]
+
+# Default model selection
+DEFAULT_MODEL_NAME = "inclusionAI/Ling-flash-2.0"
+DEFAULT_MODEL = None
+for _m in AVAILABLE_MODELS:
+ if _m.get("name") == DEFAULT_MODEL_NAME:
+ DEFAULT_MODEL = _m
+ break
+if DEFAULT_MODEL is None and AVAILABLE_MODELS:
+ DEFAULT_MODEL = AVAILABLE_MODELS[0]
+DEMO_LIST = [
+ {
+ "title": "Todo App",
+ "description": "Create a simple todo application with add, delete, and mark as complete functionality"
+ },
+ {
+ "title": "Calculator",
+ "description": "Build a basic calculator with addition, subtraction, multiplication, and division"
+ },
+ {
+ "title": "Chat Interface",
+ "description": "Build a chat interface with message history and user input"
+ },
+ {
+ "title": "E-commerce Product Card",
+ "description": "Create a product card component for an e-commerce website"
+ },
+ {
+ "title": "Login Form",
+ "description": "Build a responsive login form with validation"
+ },
+ {
+ "title": "Dashboard Layout",
+ "description": "Create a dashboard layout with sidebar navigation and main content area"
+ },
+ {
+ "title": "Data Table",
+ "description": "Build a data table with sorting and filtering capabilities"
+ },
+ {
+ "title": "Image Gallery",
+ "description": "Create an image gallery with lightbox functionality and responsive grid layout"
+ },
+ {
+ "title": "UI from Image",
+ "description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it"
+ },
+ {
+ "title": "Extract Text from Image",
+ "description": "Upload an image containing text and I'll extract and process the text content"
+ },
+ {
+ "title": "Website Redesign",
+ "description": "Enter a website URL to extract its content and redesign it with a modern, responsive layout"
+ },
+ {
+ "title": "Modify HTML",
+ "description": "After generating HTML, ask me to modify it with specific changes using search/replace format"
+ },
+ {
+ "title": "Search/Replace Example",
+ "description": "Generate HTML first, then ask: 'Change the title to My New Title' or 'Add a blue background to the body'"
+ },
+ {
+ "title": "Transformers.js App",
+ "description": "Create a transformers.js application with AI/ML functionality using the transformers.js library"
+ },
+ {
+ "title": "Svelte App",
+ "description": "Create a modern Svelte application with TypeScript, Vite, and responsive design"
+ }
+]
+
+# HF Inference Client
+HF_TOKEN = os.getenv('HF_TOKEN')
+if not HF_TOKEN:
+ raise RuntimeError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face API token.")
+
+def get_inference_client(model_id, provider="auto"):
+ """Return an InferenceClient with provider based on model_id and user selection."""
+ if model_id == "qwen3-30b-a3b-instruct-2507":
+ # Use DashScope OpenAI client
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "qwen3-30b-a3b-thinking-2507":
+ # Use DashScope OpenAI client for Thinking model
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "qwen3-coder-30b-a3b-instruct":
+ # Use DashScope OpenAI client for Coder model
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "qwen3-coder-plus-2025-09-23":
+ # Use DashScope OpenAI client for Qwen3-Coder-Plus-2025-09-23 model
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "gpt-5":
+ # Use Poe (OpenAI-compatible) client for GPT-5 model
+ return OpenAI(
+ api_key=os.getenv("POE_API_KEY"),
+ base_url="https://api.poe.com/v1"
+ )
+ elif model_id == "grok-4":
+ # Use Poe (OpenAI-compatible) client for Grok-4 model
+ return OpenAI(
+ api_key=os.getenv("POE_API_KEY"),
+ base_url="https://api.poe.com/v1"
+ )
+ elif model_id == "Grok-Code-Fast-1":
+ # Use Poe (OpenAI-compatible) client for Grok-Code-Fast-1 model
+ return OpenAI(
+ api_key=os.getenv("POE_API_KEY"),
+ base_url="https://api.poe.com/v1"
+ )
+ elif model_id == "claude-opus-4.1":
+ # Use Poe (OpenAI-compatible) client for Claude-Opus-4.1
+ return OpenAI(
+ api_key=os.getenv("POE_API_KEY"),
+ base_url="https://api.poe.com/v1"
+ )
+ elif model_id == "qwen3-max-preview":
+ # Use DashScope International OpenAI client for Qwen3 Max Preview
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "qwen3-max-2025-09-23":
+ # Use DashScope OpenAI client for Qwen3-Max-2025-09-23 model
+ return OpenAI(
+ api_key=os.getenv("DASHSCOPE_API_KEY"),
+ base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+ )
+ elif model_id == "openrouter/sonoma-dusk-alpha":
+ # Use OpenRouter client for Sonoma Dusk Alpha model
+ return OpenAI(
+ api_key=os.getenv("OPENROUTER_API_KEY"),
+ base_url="https://openrouter.ai/api/v1",
+ )
+ elif model_id == "openrouter/sonoma-sky-alpha":
+ # Use OpenRouter client for Sonoma Sky Alpha model
+ return OpenAI(
+ api_key=os.getenv("OPENROUTER_API_KEY"),
+ base_url="https://openrouter.ai/api/v1",
+ )
+ elif model_id == "x-ai/grok-4-fast:free":
+ # Use OpenRouter client for Grok 4 Fast (Free) model
+ return OpenAI(
+ api_key=os.getenv("OPENROUTER_API_KEY"),
+ base_url="https://openrouter.ai/api/v1",
+ default_headers={
+ "HTTP-Referer": "https://huggingface.co/spaces/akhaliq/anycoder",
+ "X-Title": "anycoder"
+ }
+ )
+ elif model_id == "step-3":
+ # Use StepFun API client for Step-3 model
+ return OpenAI(
+ api_key=os.getenv("STEP_API_KEY"),
+ base_url="https://api.stepfun.com/v1"
+ )
+ elif model_id == "codestral-2508" or model_id == "mistral-medium-2508" or model_id == "magistral-medium-2509":
+ # Use Mistral client for Mistral models
+ return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
+ elif model_id == "gemini-2.5-flash":
+ # Use Google Gemini (OpenAI-compatible) client
+ return OpenAI(
+ api_key=os.getenv("GEMINI_API_KEY"),
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
+ )
+ elif model_id == "gemini-2.5-pro":
+ # Use Google Gemini Pro (OpenAI-compatible) client
+ return OpenAI(
+ api_key=os.getenv("GEMINI_API_KEY"),
+ base_url="https://generativelanguage.googleapis.com/v1beta/openai/",
+ )
+ elif model_id == "kimi-k2-turbo-preview":
+ # Use Moonshot AI (OpenAI-compatible) client for Kimi K2 Turbo (Preview)
+ return OpenAI(
+ api_key=os.getenv("MOONSHOT_API_KEY"),
+ base_url="https://api.moonshot.ai/v1",
+ )
+ elif model_id == "stealth-model-1":
+ # Use stealth model with generic configuration
+ api_key = os.getenv("STEALTH_MODEL_1_API_KEY")
+ if not api_key:
+ raise ValueError("STEALTH_MODEL_1_API_KEY environment variable is required for Carrot model")
+
+ base_url = os.getenv("STEALTH_MODEL_1_BASE_URL")
+ if not base_url:
+ raise ValueError("STEALTH_MODEL_1_BASE_URL environment variable is required for Carrot model")
+
+ return OpenAI(
+ api_key=api_key,
+ base_url=base_url,
+ )
+ elif model_id == "inclusionAI/Ling-flash-2.0":
+ # Use SiliconFlow (OpenAI-compatible) client for inclusionAI/Ling-flash-2.0 model
+ api_key = os.getenv("SILICONFLOW_API_KEY")
+ if not api_key:
+ raise ValueError("SILICONFLOW_API_KEY environment variable is required for SiliconFlow models")
+
+ base_url = os.getenv("SILICONFLOW_BASE_URL")
+ if not base_url:
+ raise ValueError("SILICONFLOW_BASE_URL environment variable is required for SiliconFlow models")
+
+ return OpenAI(
+ api_key=api_key,
+ base_url=base_url,
+ )
+ elif model_id == "openai/gpt-oss-120b":
+ provider = "groq"
+ elif model_id == "openai/gpt-oss-20b":
+ provider = "groq"
+ elif model_id == "moonshotai/Kimi-K2-Instruct":
+ provider = "groq"
+ elif model_id == "Qwen/Qwen3-235B-A22B":
+ provider = "cerebras"
+ elif model_id == "Qwen/Qwen3-235B-A22B-Instruct-2507":
+ provider = "cerebras"
+ elif model_id == "Qwen/Qwen3-32B":
+ provider = "cerebras"
+ elif model_id == "Qwen/Qwen3-235B-A22B-Thinking-2507":
+ provider = "cerebras"
+ elif model_id == "Qwen/Qwen3-Coder-480B-A35B-Instruct":
+ provider = "cerebras"
+ elif model_id == "Qwen/Qwen3-Next-80B-A3B-Thinking":
+ provider = "hyperbolic"
+ elif model_id == "Qwen/Qwen3-Next-80B-A3B-Instruct":
+ provider = "novita"
+ elif model_id == "deepseek-ai/DeepSeek-V3.1":
+ provider = "novita"
+ elif model_id == "deepseek-ai/DeepSeek-V3.1-Terminus":
+ provider = "novita"
+ elif model_id == "zai-org/GLM-4.5":
+ provider = "fireworks-ai"
+ return InferenceClient(
+ provider=provider,
+ api_key=HF_TOKEN,
+ bill_to="huggingface"
+ )
+
+# Helper function to get real model ID for stealth models
+def get_real_model_id(model_id: str) -> str:
+ """Get the real model ID, checking environment variables for stealth models"""
+ if model_id == "stealth-model-1":
+ # Get the real model ID from environment variable
+ real_model_id = os.getenv("STEALTH_MODEL_1_ID")
+ if not real_model_id:
+ raise ValueError("STEALTH_MODEL_1_ID environment variable is required for Carrot model")
+
+ return real_model_id
+ return model_id
+
+# Type definitions
+History = List[Tuple[str, str]]
+Messages = List[Dict[str, str]]
+
+# Tavily Search Client
+TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
+tavily_client = None
+if TAVILY_API_KEY:
+ try:
+ tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
+ except Exception as e:
+ print(f"Failed to initialize Tavily client: {e}")
+ tavily_client = None
+
+def history_to_messages(history: History, system: str) -> Messages:
+ messages = [{'role': 'system', 'content': system}]
+ for h in history:
+ # Handle multimodal content in history
+ user_content = h[0]
+ if isinstance(user_content, list):
+ # Extract text from multimodal content
+ text_content = ""
+ for item in user_content:
+ if isinstance(item, dict) and item.get("type") == "text":
+ text_content += item.get("text", "")
+ user_content = text_content if text_content else str(user_content)
+
+ messages.append({'role': 'user', 'content': user_content})
+ messages.append({'role': 'assistant', 'content': h[1]})
+ return messages
+
+def messages_to_history(messages: Messages) -> Tuple[str, History]:
+ assert messages[0]['role'] == 'system'
+ history = []
+ for q, r in zip(messages[1::2], messages[2::2]):
+ # Extract text content from multimodal messages for history
+ user_content = q['content']
+ if isinstance(user_content, list):
+ text_content = ""
+ for item in user_content:
+ if isinstance(item, dict) and item.get("type") == "text":
+ text_content += item.get("text", "")
+ user_content = text_content if text_content else str(user_content)
+
+ history.append([user_content, r['content']])
+ return history
+
+def history_to_chatbot_messages(history: History) -> List[Dict[str, str]]:
+ """Convert history tuples to chatbot message format"""
+ messages = []
+ for user_msg, assistant_msg in history:
+ # Handle multimodal content
+ if isinstance(user_msg, list):
+ text_content = ""
+ for item in user_msg:
+ if isinstance(item, dict) and item.get("type") == "text":
+ text_content += item.get("text", "")
+ user_msg = text_content if text_content else str(user_msg)
+
+ messages.append({"role": "user", "content": user_msg})
+ messages.append({"role": "assistant", "content": assistant_msg})
+ return messages
+
+def remove_code_block(text):
+ # Try to match code blocks with language markers
+ patterns = [
+ r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
+ r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
+ r'```([\s\S]+?)```' # Match code blocks without line breaks
+ ]
+ for pattern in patterns:
+ match = re.search(pattern, text, re.DOTALL)
+ if match:
+ extracted = match.group(1).strip()
+ # Remove a leading language marker line (e.g., 'python') if present
+ if extracted.split('\n', 1)[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
+ return extracted.split('\n', 1)[1] if '\n' in extracted else ''
+ # If HTML markup starts later in the block (e.g., Poe injected preface), trim to first HTML root
+ html_root_idx = None
+ for tag in [' 0:
+ return extracted[html_root_idx:].strip()
+ return extracted
+ # If no code block is found, check if the entire text is HTML
+ stripped = text.strip()
+ if stripped.startswith('') or stripped.startswith(' 0:
+ return stripped[idx:].strip()
+ return stripped
+ # Special handling for python: remove python marker
+ if text.strip().startswith('```python'):
+ return text.strip()[9:-3].strip()
+ # Remove a leading language marker line if present (fallback)
+ lines = text.strip().split('\n', 1)
+ if lines[0].strip().lower() in ['python', 'html', 'css', 'javascript', 'json', 'c', 'cpp', 'markdown', 'latex', 'jinja2', 'typescript', 'yaml', 'dockerfile', 'shell', 'r', 'sql', 'sql-mssql', 'sql-mysql', 'sql-mariadb', 'sql-sqlite', 'sql-cassandra', 'sql-plSQL', 'sql-hive', 'sql-pgsql', 'sql-gql', 'sql-gpsql', 'sql-sparksql', 'sql-esper']:
+ return lines[1] if len(lines) > 1 else ''
+ return text.strip()
+
+## React CDN compatibility fixer removed per user preference
+
+def strip_placeholder_thinking(text: str) -> str:
+ """Remove placeholder 'Thinking...' status lines from streamed text."""
+ if not text:
+ return text
+ # Matches lines like: "Thinking..." or "Thinking... (12s elapsed)"
+ return re.sub(r"(?mi)^[\t ]*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?[\t ]*$\n?", "", text)
+
+def is_placeholder_thinking_only(text: str) -> bool:
+ """Return True if text contains only 'Thinking...' placeholder lines (with optional elapsed)."""
+ if not text:
+ return False
+ stripped = text.strip()
+ if not stripped:
+ return False
+ return re.fullmatch(r"(?s)(?:\s*Thinking\.\.\.(?:\s*\(\d+s elapsed\))?\s*)+", stripped) is not None
+
+def extract_last_thinking_line(text: str) -> str:
+ """Extract the last 'Thinking...' line to display as status."""
+ matches = list(re.finditer(r"Thinking\.\.\.(?:\s*\(\d+s elapsed\))?", text))
+ return matches[-1].group(0) if matches else "Thinking..."
+
+def parse_transformers_js_output(text):
+ """Parse transformers.js output and extract the three files (index.html, index.js, style.css)"""
+ files = {
+ 'index.html': '',
+ 'index.js': '',
+ 'style.css': ''
+ }
+
+ # Multiple patterns to match the three code blocks with different variations
+ html_patterns = [
+ r'```html\s*\n([\s\S]*?)(?:```|\Z)',
+ r'```htm\s*\n([\s\S]*?)(?:```|\Z)',
+ r'```\s*(?:index\.html|html)\s*\n([\s\S]*?)(?:```|\Z)'
+ ]
+
+ js_patterns = [
+ r'```javascript\s*\n([\s\S]*?)(?:```|\Z)',
+ r'```js\s*\n([\s\S]*?)(?:```|\Z)',
+ r'```\s*(?:index\.js|javascript|js)\s*\n([\s\S]*?)(?:```|\Z)'
+ ]
+
+ css_patterns = [
+ r'```css\s*\n([\s\S]*?)(?:```|\Z)',
+ r'```\s*(?:style\.css|css)\s*\n([\s\S]*?)(?:```|\Z)'
+ ]
+
+ # Extract HTML content
+ for pattern in html_patterns:
+ html_match = re.search(pattern, text, re.IGNORECASE)
+ if html_match:
+ files['index.html'] = html_match.group(1).strip()
+ break
+
+ # Extract JavaScript content
+ for pattern in js_patterns:
+ js_match = re.search(pattern, text, re.IGNORECASE)
+ if js_match:
+ files['index.js'] = js_match.group(1).strip()
+ break
+
+ # Extract CSS content
+ for pattern in css_patterns:
+ css_match = re.search(pattern, text, re.IGNORECASE)
+ if css_match:
+ files['style.css'] = css_match.group(1).strip()
+ break
+
+ # Fallback: support === index.html === format if any file is missing
+ if not (files['index.html'] and files['index.js'] and files['style.css']):
+ # Use regex to extract sections
+ html_fallback = re.search(r'===\s*index\.html\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
+ js_fallback = re.search(r'===\s*index\.js\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
+ css_fallback = re.search(r'===\s*style\.css\s*===\s*\n([\s\S]+?)(?=\n===|$)', text, re.IGNORECASE)
+
+ if html_fallback:
+ files['index.html'] = html_fallback.group(1).strip()
+ if js_fallback:
+ files['index.js'] = js_fallback.group(1).strip()
+ if css_fallback:
+ files['style.css'] = css_fallback.group(1).strip()
+
+ # Additional fallback: extract from numbered sections or file headers
+ if not (files['index.html'] and files['index.js'] and files['style.css']):
+ # Try patterns like "1. index.html:" or "**index.html**"
+ patterns = [
+ (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.html(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.html'),
+ (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)index\.js(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'index.js'),
+ (r'(?:^\d+\.\s*|^##\s*|^\*\*\s*)style\.css(?:\s*:|\*\*:?)\s*\n([\s\S]+?)(?=\n(?:\d+\.|##|\*\*|===)|$)', 'style.css')
+ ]
+
+ for pattern, file_key in patterns:
+ if not files[file_key]:
+ match = re.search(pattern, text, re.IGNORECASE | re.MULTILINE)
+ if match:
+ # Clean up the content by removing any code block markers
+ content = match.group(1).strip()
+ content = re.sub(r'^```\w*\s*\n', '', content)
+ content = re.sub(r'\n```\s*$', '', content)
+ files[file_key] = content.strip()
+
+ return files
+
+def format_transformers_js_output(files):
+ """Format the three files into a single display string"""
+ output = []
+ output.append("=== index.html ===")
+ output.append(files['index.html'])
+ output.append("\n=== index.js ===")
+ output.append(files['index.js'])
+ output.append("\n=== style.css ===")
+ output.append(files['style.css'])
+ return '\n'.join(output)
+
+def build_transformers_inline_html(files: dict) -> str:
+ """Merge transformers.js three-file output into a single self-contained HTML document.
+
+ - Inlines style.css into a " if css else ""
+ if style_tag:
+ if '' in doc.lower():
+ # Preserve original casing by finding closing head case-insensitively
+ match = _re.search(r"", doc, flags=_re.IGNORECASE)
+ if match:
+ idx = match.start()
+ doc = doc[:idx] + style_tag + doc[idx:]
+ else:
+ # No head; insert at top of body
+ match = _re.search(r"]*>", doc, flags=_re.IGNORECASE)
+ if match:
+ idx = match.end()
+ doc = doc[:idx] + "\n" + style_tag + doc[idx:]
+ else:
+ # Append at beginning
+ doc = style_tag + doc
+
+ # Inline JS: insert before
+ script_tag = f"" if js else ""
+ # Lightweight debug console overlay to surface runtime errors inside the iframe
+ debug_overlay = (
+ "\n"
+ "\n"
+ ""
+ )
+ # Cleanup script to clear Cache Storage and IndexedDB on unload to free model weights
+ cleanup_tag = (
+ ""
+ )
+ if script_tag:
+ match = _re.search(r"", doc, flags=_re.IGNORECASE)
+ if match:
+ idx = match.start()
+ doc = doc[:idx] + debug_overlay + script_tag + cleanup_tag + doc[idx:]
+ else:
+ # Append at end
+ doc = doc + debug_overlay + script_tag + cleanup_tag
+
+ return doc
+
+def send_transformers_to_sandbox(files: dict) -> str:
+ """Build a self-contained HTML document from transformers.js files and return an iframe preview."""
+ merged_html = build_transformers_inline_html(files)
+ return send_to_sandbox(merged_html)
+
+def parse_multipage_html_output(text: str) -> Dict[str, str]:
+ """Parse multi-page HTML output formatted as repeated "=== filename ===" sections.
+
+ Returns a mapping of filename → file content. Supports nested paths like assets/css/styles.css.
+ """
+ if not text:
+ return {}
+ # First, strip any markdown fences
+ cleaned = remove_code_block(text)
+ files: Dict[str, str] = {}
+ import re as _re
+ pattern = _re.compile(r"^===\s*([^=\n]+?)\s*===\s*\n([\s\S]*?)(?=\n===\s*[^=\n]+?\s*===|\Z)", _re.MULTILINE)
+ for m in pattern.finditer(cleaned):
+ name = m.group(1).strip()
+ content = m.group(2).strip()
+ # Remove accidental trailing fences if present
+ content = _re.sub(r"^```\w*\s*\n|\n```\s*$", "", content)
+ files[name] = content
+ return files
+
+def format_multipage_output(files: Dict[str, str]) -> str:
+ """Format a dict of files back into === filename === sections.
+
+ Ensures `index.html` appears first if present; others follow sorted by path.
+ """
+ if not isinstance(files, dict) or not files:
+ return ""
+ ordered_paths = []
+ if 'index.html' in files:
+ ordered_paths.append('index.html')
+ for path in sorted(files.keys()):
+ if path == 'index.html':
+ continue
+ ordered_paths.append(path)
+ parts: list[str] = []
+ for path in ordered_paths:
+ parts.append(f"=== {path} ===")
+ # Avoid trailing extra newlines to keep blocks compact
+ parts.append((files.get(path) or '').rstrip())
+ return "\n".join(parts)
+
+def validate_and_autofix_files(files: Dict[str, str]) -> Dict[str, str]:
+ """Ensure minimal contract for multi-file sites; auto-fix missing pieces.
+
+ Rules:
+ - Ensure at least one HTML entrypoint (index.html). If none, synthesize a simple index.html linking discovered pages.
+ - For each HTML file, ensure referenced local assets exist in files; if missing, add minimal stubs.
+ - Normalize relative paths (strip leading '/').
+ """
+ if not isinstance(files, dict) or not files:
+ return files or {}
+ import re as _re
+
+ normalized: Dict[str, str] = {}
+ for k, v in files.items():
+ safe_key = k.strip().lstrip('/')
+ normalized[safe_key] = v
+
+ html_files = [p for p in normalized.keys() if p.lower().endswith('.html')]
+ has_index = 'index.html' in normalized
+
+ # If no index.html but some HTML pages exist, create a simple hub index linking to them
+ if not has_index and html_files:
+ links = '\n'.join([f"