"""
Generate Docker-based (FastAPI) Space code using LLM with template fallbacks.
"""
import os
import re
from huggingface_hub import InferenceClient
LLM_MODEL = "Qwen/Qwen2.5-72B-Instruct"
DOCKER_TEMPLATES = {
"rest_api": {
"app.py": '''from fastapi import FastAPI, HTTPException
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import os
app = FastAPI(
title="{title}",
description="{description}",
version="1.0.0",
)
client = InferenceClient("{model_id}", token=os.environ.get("HF_TOKEN"))
class TextRequest(BaseModel):
text: str
max_tokens: int = 512
temperature: float = 0.7
class TextResponse(BaseModel):
result: str
model: str
class SummarizeRequest(BaseModel):
text: str
max_length: int = 150
min_length: int = 30
@app.get("/", response_class=HTMLResponse)
async def root():
return """
{title}
{title}
{description}
Endpoints
POST /generate - Generate text
POST /summarize - Summarize text
GET /health - Health check
GET /docs - API documentation
"""
@app.post("/generate", response_model=TextResponse)
async def generate_text(request: TextRequest):
try:
messages = [{{"role": "user", "content": request.text}}]
response = client.chat_completion(
messages,
max_tokens=request.max_tokens,
temperature=request.temperature,
)
return TextResponse(
result=response.choices[0].message.content,
model="{model_id}",
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/summarize")
async def summarize_text(request: SummarizeRequest):
try:
prompt = f"Summarize the following text in a concise way:\\n\\n{{request.text}}"
messages = [{{"role": "user", "content": prompt}}]
response = client.chat_completion(messages, max_tokens=request.max_length)
return {{"summary": response.choices[0].message.content, "model": "{model_id}"}}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health_check():
return {{"status": "healthy", "model": "{model_id}"}}
''',
"Dockerfile": '''FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
RUN useradd -m -u 1000 appuser
USER appuser
EXPOSE 7860
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
''',
"requirements.txt": "fastapi==0.115.6\nuvicorn[standard]==0.34.0\nhuggingface-hub==0.27.1\n",
},
"generic_docker": {
"app.py": '''from fastapi import FastAPI, HTTPException
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import os
app = FastAPI(title="{title}", version="1.0.0")
client = InferenceClient("{model_id}", token=os.environ.get("HF_TOKEN"))
class QueryRequest(BaseModel):
query: str
max_tokens: int = 512
@app.get("/", response_class=HTMLResponse)
async def root():
return """
{title}
{title}
{description}
API Documentation
Visit /docs for interactive API documentation.
Endpoints
POST /query - Process a query
GET /health - Health check
"""
@app.post("/query")
async def process_query(request: QueryRequest):
try:
messages = [{{"role": "user", "content": request.query}}]
response = client.chat_completion(messages, max_tokens=request.max_tokens)
return {{
"response": response.choices[0].message.content,
"model": "{model_id}",
}}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/health")
async def health():
return {{"status": "healthy"}}
''',
"Dockerfile": '''FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
RUN useradd -m -u 1000 appuser
USER appuser
EXPOSE 7860
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
''',
"requirements.txt": "fastapi==0.115.6\nuvicorn[standard]==0.34.0\nhuggingface-hub==0.27.1\n",
},
}
class DockerGenerator:
"""Generate Docker-based Space files."""
def __init__(self):
self._client = None
@property
def client(self) -> InferenceClient:
if self._client is None:
token = os.environ.get("HF_TOKEN", None)
self._client = InferenceClient(LLM_MODEL, token=token)
return self._client
def generate(self, plan: dict, prompt: str) -> dict:
"""
Generate all files for a Docker-based Space.
Returns dict of {filename: content}.
"""
model_id = self._get_model_id(plan)
title = plan.get("title", "My API")
description = plan.get("description", "")
template_key = plan.get("template_key", "generic_docker")
# Try LLM generation first
try:
files = self._generate_with_llm(plan, prompt, model_id, title, description)
if files and "app.py" in files and len(files["app.py"]) > 100:
# Ensure required files exist
if "Dockerfile" not in files:
files["Dockerfile"] = DOCKER_TEMPLATES["generic_docker"]["Dockerfile"]
if "requirements.txt" not in files:
files["requirements.txt"] = DOCKER_TEMPLATES["generic_docker"]["requirements.txt"]
return files
except Exception:
pass
# Fallback to templates
template = DOCKER_TEMPLATES.get(template_key, DOCKER_TEMPLATES["generic_docker"])
files = {}
for filename, content in template.items():
files[filename] = content.format(
model_id=model_id,
title=title,
description=description,
)
return files
def _generate_with_llm(
self, plan: dict, prompt: str, model_id: str, title: str, description: str
) -> dict:
"""Use LLM to generate custom Docker app files."""
system_prompt = """You are an expert Python developer specializing in FastAPI applications for Hugging Face Docker Spaces.
Generate complete, production-ready code files.
Rules:
1. Output files in this exact format - each file delimited by markers:
=== FILENAME: app.py ===
(file content)
=== FILENAME: requirements.txt ===
(file content)
=== FILENAME: Dockerfile ===
(file content)
2. Use FastAPI for the web framework.
3. Use `huggingface_hub.InferenceClient` for model inference.
4. Include a root GET endpoint that returns an HTML page describing the API.
5. Include a /health endpoint.
6. Include proper error handling.
7. The Dockerfile should use python:3.11-slim, create a non-root user, and expose port 7860.
8. Use uvicorn to serve the app on 0.0.0.0:7860.
9. Do NOT use transformers, torch, or tensorflow."""
user_prompt = f"""Generate a complete Docker-based HF Space for this request:
USER REQUEST: {prompt}
APP PLAN:
- Type: {plan.get('app_type', 'custom')}
- Model: {model_id}
- Model Task: {plan.get('model_task', 'text-generation')}
- Title: {title}
- Description: {description}
- Extra Features: {', '.join(plan.get('extra_features', []))}
Generate the files (app.py, requirements.txt, Dockerfile):"""
response = self.client.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
max_tokens=4096,
temperature=0.3,
)
raw = response.choices[0].message.content
return self._parse_files(raw)
def _parse_files(self, text: str) -> dict:
"""Parse LLM output into a dict of filename -> content."""
files = {}
# Try marker-based parsing first
parts = re.split(r"===\s*FILENAME:\s*(.+?)\s*===", text)
if len(parts) >= 3:
for i in range(1, len(parts), 2):
filename = parts[i].strip()
content = parts[i + 1].strip() if i + 1 < len(parts) else ""
# Remove markdown code fences if present
content = re.sub(r"^```\w*\n?", "", content)
content = re.sub(r"\n?```$", "", content)
files[filename] = content.strip() + "\n"
# Try code block parsing as fallback
if not files:
blocks = re.findall(
r"(?:#|//)\s*(\S+\.(?:py|txt|dockerfile))\s*\n```\w*\n(.*?)```",
text, re.DOTALL | re.IGNORECASE,
)
for name, content in blocks:
files[name.strip()] = content.strip() + "\n"
return files
def _get_model_id(self, plan: dict) -> str:
models = plan.get("recommended_models", [])
if models:
return models[0]["id"]
return "Qwen/Qwen2.5-7B-Instruct"
def edit(self, plan: dict, current_files: dict, edit_prompt: str) -> dict:
"""Edit existing Docker app files."""
try:
app_code = current_files.get("app.py", "")
system_prompt = """You are an expert Python developer. Modify the existing FastAPI app code according to the edit request.
Output ONLY the updated Python code for app.py, no explanations or markdown fences.
Keep all existing functionality unless explicitly asked to remove something.
Use huggingface_hub.InferenceClient for model inference."""
user_prompt = f"""EXISTING app.py:
```python
{app_code}
```
EDIT REQUEST: {edit_prompt}
Return the complete updated app.py:"""
response = self.client.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
max_tokens=4096,
temperature=0.2,
)
raw = response.choices[0].message.content
code = re.sub(r"^```\w*\n?", "", raw.strip())
code = re.sub(r"\n?```$", "", code)
if code and len(code) > 50:
updated = dict(current_files)
updated["app.py"] = code.strip() + "\n"
return updated
except Exception:
pass
return current_files