dippoo's picture
Initial deployment - Content Engine
ed37502
raw
history blame
6.95 kB
"""System API routes — health checks, status, and configuration."""
from __future__ import annotations
import os
from pathlib import Path
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel
from content_engine.models.schemas import SystemStatus
from content_engine.config import IS_HF_SPACES
router = APIRouter(prefix="/api", tags=["system"])
_comfyui_client = None
_catalog = None
_template_engine = None
_character_profiles = None
def init_routes(comfyui_client, catalog, template_engine, character_profiles=None):
"""Initialize route dependencies."""
global _comfyui_client, _catalog, _template_engine, _character_profiles
_comfyui_client = comfyui_client
_catalog = catalog
_template_engine = template_engine
_character_profiles = character_profiles
@router.get("/health")
async def health_check():
"""Basic health check."""
comfyui_ok = False
if _comfyui_client:
comfyui_ok = await _comfyui_client.is_available()
return {"status": "ok", "comfyui": comfyui_ok}
@router.get("/status", response_model=SystemStatus)
async def system_status():
"""Get comprehensive system status."""
comfyui_connected = False
gpu_name = None
vram_total_gb = None
vram_free_gb = None
queue_depth = 0
if _comfyui_client:
comfyui_connected = await _comfyui_client.is_available()
if comfyui_connected:
try:
stats = await _comfyui_client.get_system_stats()
devices = stats.get("devices", [])
if devices:
gpu_name = devices[0].get("name")
vram_total_gb = devices[0].get("vram_total", 0) / (1024**3)
vram_free_gb = devices[0].get("vram_free", 0) / (1024**3)
queue_depth = await _comfyui_client.get_queue_depth()
except Exception:
pass
total_images = 0
if _catalog:
total_images = await _catalog.get_total_count()
return SystemStatus(
comfyui_connected=comfyui_connected,
gpu_name=gpu_name,
vram_total_gb=round(vram_total_gb, 2) if vram_total_gb else None,
vram_free_gb=round(vram_free_gb, 2) if vram_free_gb else None,
local_queue_depth=queue_depth,
cloud_available=False, # Phase 4
total_images=total_images,
pending_jobs=0,
)
@router.get("/templates")
async def list_templates():
"""List all available prompt templates."""
if _template_engine is None:
return []
templates = _template_engine.list_templates()
return [
{
"id": t.id,
"name": t.name,
"category": t.category,
"rating": t.rating,
"variables": {
name: {
"type": vdef.type,
"options": vdef.options,
"required": vdef.required,
}
for name, vdef in t.variables.items()
},
}
for t in templates
]
@router.get("/characters")
async def list_characters():
"""List all configured character profiles."""
if _character_profiles is None:
return []
return [
{
"id": c.id,
"name": c.name,
"trigger_word": c.trigger_word,
"lora_filename": c.lora_filename,
"lora_strength": c.lora_strength,
"description": c.description,
}
for c in _character_profiles.values()
]
@router.get("/models/loras")
async def list_loras():
"""List available LoRA models from ComfyUI."""
if _comfyui_client is None:
return []
try:
return await _comfyui_client.get_models("loras")
except Exception:
return []
@router.get("/models/checkpoints")
async def list_checkpoints():
"""List available checkpoint models from ComfyUI."""
if _comfyui_client is None:
return []
try:
return await _comfyui_client.get_models("checkpoints")
except Exception:
return []
# --- API Settings ---
class APISettingsResponse(BaseModel):
runpod_configured: bool
runpod_key_preview: str | None = None
wavespeed_configured: bool
wavespeed_key_preview: str | None = None
is_cloud: bool
env_file_path: str | None = None
class UpdateAPIKeysRequest(BaseModel):
runpod_api_key: str | None = None
wavespeed_api_key: str | None = None
def _mask_key(key: str | None) -> str | None:
"""Mask API key showing only last 4 chars."""
if not key:
return None
if len(key) <= 8:
return "****"
return f"****{key[-4:]}"
@router.get("/settings/api", response_model=APISettingsResponse)
async def get_api_settings():
"""Get current API settings status (keys are masked)."""
runpod_key = os.environ.get("RUNPOD_API_KEY")
wavespeed_key = os.environ.get("WAVESPEED_API_KEY")
env_file = None
if not IS_HF_SPACES:
env_file = "D:/AI automation/content_engine/.env"
return APISettingsResponse(
runpod_configured=bool(runpod_key),
runpod_key_preview=_mask_key(runpod_key),
wavespeed_configured=bool(wavespeed_key),
wavespeed_key_preview=_mask_key(wavespeed_key),
is_cloud=IS_HF_SPACES,
env_file_path=env_file,
)
@router.post("/settings/api")
async def update_api_settings(request: UpdateAPIKeysRequest):
"""Update API keys. Only works in local mode (not HF Spaces).
On HF Spaces, use the Settings > Secrets panel instead.
"""
if IS_HF_SPACES:
raise HTTPException(
400,
"Cannot update API keys on Hugging Face Spaces. "
"Use Settings > Variables and secrets in your Space dashboard."
)
env_path = Path("D:/AI automation/content_engine/.env")
# Read existing .env
existing = {}
if env_path.exists():
with open(env_path) as f:
for line in f:
line = line.strip()
if line and not line.startswith("#") and "=" in line:
key, val = line.split("=", 1)
existing[key.strip()] = val.strip()
# Update keys
updated = []
if request.runpod_api_key is not None:
existing["RUNPOD_API_KEY"] = request.runpod_api_key
os.environ["RUNPOD_API_KEY"] = request.runpod_api_key
updated.append("RUNPOD_API_KEY")
if request.wavespeed_api_key is not None:
existing["WAVESPEED_API_KEY"] = request.wavespeed_api_key
os.environ["WAVESPEED_API_KEY"] = request.wavespeed_api_key
updated.append("WAVESPEED_API_KEY")
# Write back
with open(env_path, "w") as f:
for key, val in existing.items():
f.write(f"{key}={val}\n")
return {
"status": "updated",
"updated_keys": updated,
"message": "API keys updated. Restart the server to fully apply changes."
}