diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..df8f38022d731ee1809bd100d15c65660f59c0a9
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,40 @@
+# Hugging Face Spaces Dockerfile for Content Engine
+FROM python:3.11-slim
+
+# Create user with UID 1000 (HF Spaces requirement)
+RUN useradd -m -u 1000 user
+
+# Install system dependencies as root
+RUN apt-get update && apt-get install -y \
+ git \
+ && rm -rf /var/lib/apt/lists/*
+
+# Switch to user
+USER user
+ENV PATH="/home/user/.local/bin:$PATH"
+
+WORKDIR /app
+
+# Copy requirements first for caching
+COPY --chown=user ./requirements.txt requirements.txt
+RUN pip install --no-cache-dir --upgrade -r requirements.txt
+
+# Copy application code
+COPY --chown=user ./src ./src
+COPY --chown=user ./config ./config
+
+# Create directories for data persistence
+RUN mkdir -p /app/data/output /app/data/output/videos /app/data/db /app/data/uploads /app/data/loras /app/data/models /app/data/training
+
+# Set environment variables
+ENV PYTHONUNBUFFERED=1
+ENV PYTHONPATH=/app/src
+ENV HF_SPACES=1
+ENV OUTPUT_DIR=/app/data/output
+ENV DATA_DIR=/app/data
+ENV DB_PATH=/app/data/db/content_engine.db
+ENV UPLOAD_DIR=/app/data/uploads
+
+# HF Spaces requires port 7860
+EXPOSE 7860
+CMD ["uvicorn", "content_engine.main:app", "--host", "0.0.0.0", "--port", "7860"]
diff --git a/README.md b/README.md
index 819312936f237b03f30efd62a3707aae560fe2bd..74c96840ab232803be777b0d7472c69ffa98b3d9 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,41 @@
----
-title: Content Engine
-emoji: 🏢
-colorFrom: red
-colorTo: blue
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+# Content Engine - AI Image & Video Generation
+
+A web-based content generation platform using RunPod for GPU-powered image and video generation.
+
+## Features
+
+- **Image Generation**: FLUX.2 and WAN 2.2 models via RunPod GPU
+- **Video Generation**: WAN 2.2 Image-to-Video
+- **LoRA Training**: Train custom character models
+- **Gallery**: Browse, download, and manage generated content
+- **Templates**: Pre-configured prompts for consistent results
+
+## Setup
+
+This Space requires RunPod API credentials to function:
+
+1. Get your RunPod API key from https://www.runpod.io/console/user/settings
+2. Add it as a Space Secret: `RUNPOD_API_KEY`
+
+Optional:
+- `WAVESPEED_API_KEY`: For WaveSpeed cloud generation (alternative backend)
+
+## Usage
+
+1. Go to **Status** page and click **Start Pod** to boot a GPU
+2. Wait ~2-3 minutes for the pod to be ready
+3. Use **Generate** page to create images/videos
+4. **Stop Pod** when done to save costs
+
+## Cost
+
+- RunPod GPU (RTX 4090): ~$0.44/hour while running
+- No cost when pod is stopped
+- Images/videos stored on Hugging Face (free)
+
+## Tech Stack
+
+- FastAPI backend
+- RunPod for GPU compute
+- SQLite for metadata
+- Pure HTML/CSS/JS frontend
diff --git a/config/characters/example_character.yaml b/config/characters/example_character.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1fa06d70dadc034ce9686d1f3f4abbc90e128022
--- /dev/null
+++ b/config/characters/example_character.yaml
@@ -0,0 +1,29 @@
+# Example character profile — replace with your trained LoRA details
+#
+# To use this:
+# 1. Train a LoRA using Kohya_ss with ~20-50 reference images
+# 2. Place the .safetensors file in D:\ComfyUI\Models\Lora\
+# 3. Update the fields below with your character's details
+# 4. Rename this file to your character's name (e.g., alice.yaml)
+
+id: example
+name: "Example Character"
+trigger_word: "examplechar" # The trigger word used during LoRA training
+lora_filename: "example_v1.safetensors" # Filename in D:\ComfyUI\Models\Lora\
+lora_strength: 0.85 # 0.6-0.9 typically works best
+
+# Optional: override default checkpoint for this character
+# default_checkpoint: "realisticVisionV51_v51VAE.safetensors"
+
+# Optional: additional style LoRAs to stack
+style_loras: []
+ # - name: "glamour_style_v1.safetensors"
+ # strength_model: 0.5
+ # strength_clip: 0.5
+
+description: "Example character for testing the pipeline"
+
+physical_traits:
+ hair: "brown, shoulder length"
+ eyes: "blue"
+ build: "average"
diff --git a/config/models.yaml b/config/models.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6f4c64aa412db204332c10058b8e8fdab4b84163
--- /dev/null
+++ b/config/models.yaml
@@ -0,0 +1,95 @@
+# Training Model Registry
+# Defines base models available for LoRA training with their optimal parameters
+
+training_models:
+ # FLUX - Best for photorealistic images (recommended for realistic person)
+ flux2_dev:
+ name: "FLUX.2 Dev (Recommended)"
+ description: "Latest FLUX model, 32B params, best quality for realistic person. Also supports multi-reference without training."
+ hf_repo: "black-forest-labs/FLUX.2-dev"
+ hf_filename: "flux.2-dev.safetensors"
+ model_type: "flux"
+ resolution: 1024
+ learning_rate: 1e-3
+ text_encoder_lr: 1e-4
+ network_rank: 48
+ network_alpha: 24
+ clip_skip: 1
+ optimizer: "AdamW8bit"
+ lr_scheduler: "cosine"
+ min_snr_gamma: 5
+ max_train_steps: 1200
+ fp8_base: true
+ use_case: "images"
+ vram_required_gb: 24
+ recommended_images: "15-30 high quality photos with detailed captions"
+ training_script: "flux_train_network.py"
+
+ flux1_dev:
+ name: "FLUX.1 Dev"
+ description: "Previous gen FLUX, still excellent for realistic person LoRAs"
+ hf_repo: "black-forest-labs/FLUX.1-dev"
+ hf_filename: "flux1-dev.safetensors"
+ model_type: "flux"
+ resolution: 768
+ learning_rate: 4e-4
+ text_encoder_lr: 4e-5
+ network_rank: 32
+ network_alpha: 16
+ clip_skip: 1
+ optimizer: "AdamW8bit"
+ lr_scheduler: "cosine"
+ min_snr_gamma: 5
+ max_train_steps: 1500
+ use_case: "images"
+ vram_required_gb: 24
+ recommended_images: "15-30 high quality photos"
+ training_script: "flux_train_network.py"
+
+ # SD 1.5 Realistic Vision - Good balance of quality and speed
+ sd15_realistic:
+ name: "Realistic Vision V5.1"
+ description: "SD 1.5 based, great for realistic humans, faster training"
+ hf_repo: "SG161222/Realistic_Vision_V5.1_noVAE"
+ hf_filename: "Realistic_Vision_V5.1_fp16-no-ema.safetensors"
+ model_type: "sd15"
+ resolution: 512
+ learning_rate: 1e-4
+ network_rank: 32
+ network_alpha: 16
+ clip_skip: 1
+ optimizer: "AdamW8bit"
+ use_case: "images"
+ vram_required_gb: 8
+ recommended_images: "15-30 photos"
+
+ # SDXL - Higher quality than SD 1.5, but more VRAM
+ sdxl_base:
+ name: "SDXL Base 1.0"
+ description: "Higher resolution and quality than SD 1.5"
+ hf_repo: "stabilityai/stable-diffusion-xl-base-1.0"
+ hf_filename: "sd_xl_base_1.0.safetensors"
+ model_type: "sdxl"
+ resolution: 1024
+ learning_rate: 1e-4
+ network_rank: 32
+ network_alpha: 16
+ clip_skip: 2
+ optimizer: "AdamW8bit"
+ use_case: "images"
+ vram_required_gb: 12
+ recommended_images: "20-40 photos"
+
+# Video generation models (for img2video, not training)
+video_models:
+ wan22_i2v:
+ name: "WAN 2.2 Image-to-Video"
+ description: "Converts images to videos, use with your trained LoRA images"
+ hf_repo: "Wan-AI/Wan2.2-I2V-A14B"
+ model_type: "wan22"
+ use_case: "img2video"
+ vram_required_gb: 24
+ resolution: "480p/720p"
+
+# Default model for training
+default_training_model: "flux2_dev"
diff --git a/config/settings.yaml b/config/settings.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d8b980ec7844f533db43cde2709e9490fba1d270
--- /dev/null
+++ b/config/settings.yaml
@@ -0,0 +1,47 @@
+# Content Engine Configuration
+
+comfyui:
+ url: "http://127.0.0.1:8188"
+ # Maximum jobs to queue locally before routing to cloud
+ max_local_queue_depth: 3
+ # Minimum free VRAM (GB) required to accept a local job
+ min_vram_gb: 2.0
+
+paths:
+ output_dir: "D:/AI automation/output"
+ data_dir: "D:/AI automation/data"
+ # ComfyUI model paths (from extra_model_paths.yaml)
+ lora_dir: "D:/ComfyUI/Models/Lora"
+ checkpoint_dir: "D:/ComfyUI/Models/StableDiffusion"
+
+database:
+ # SQLite for v1, switch to postgresql:// for v2
+ url: "sqlite+aiosqlite:///D:/AI automation/data/catalog.db"
+ jobs_url: "sqlite+aiosqlite:///D:/AI automation/data/jobs.db"
+
+generation:
+ # Default generation parameters
+ default_checkpoint: "realisticVisionV51_v51VAE.safetensors"
+ default_steps: 28
+ default_cfg: 7.0
+ default_sampler: "dpmpp_2m"
+ default_scheduler: "karras"
+ default_width: 832
+ default_height: 1216
+
+scheduling:
+ # Posts per day per character
+ posts_per_day: 3
+ # Peak posting hours (UTC)
+ peak_hours: [10, 14, 20]
+ # SFW to NSFW ratio for scheduling
+ sfw_ratio: 0.4
+
+cloud_providers: []
+ # Uncomment and configure when ready (Phase 4)
+ # - name: replicate
+ # api_key: "${REPLICATE_API_KEY}"
+ # priority: 1
+ # - name: runpod
+ # api_key: "${RUNPOD_API_KEY}"
+ # priority: 2
diff --git a/config/templates/prompts/artistic_nude.yaml b/config/templates/prompts/artistic_nude.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f636c5f0739bcf6b04fcf66f4ae46e7d6da12d0a
--- /dev/null
+++ b/config/templates/prompts/artistic_nude.yaml
@@ -0,0 +1,100 @@
+id: artistic_nude
+name: "Artistic Nude"
+category: artistic
+rating: nsfw
+base_model: realistic_vision
+
+loras:
+ - name: "{{character_lora}}"
+ strength_model: 0.85
+ strength_clip: 0.85
+
+positive_prompt: >
+ {{character_trigger}}, {{pose}}, nude,
+ {{emotion}} expression, {{camera_angle}},
+ {{lighting}}, {{scene}},
+ masterpiece, best quality, photorealistic, 8k uhd,
+ detailed skin texture, fine art photography,
+ artistic composition, tasteful, elegant
+
+negative_prompt: >
+ worst quality, low quality, blurry, deformed,
+ bad anatomy, bad hands, extra fingers,
+ watermark, text, signature, cartoon, anime,
+ unrealistic proportions, ugly
+
+sampler:
+ steps: 30
+ cfg: 7.5
+ sampler_name: dpmpp_2m
+ scheduler: karras
+ width: 832
+ height: 1216
+
+variables:
+ character_trigger:
+ type: string
+ required: true
+ description: "Character trigger word from LoRA training"
+ character_lora:
+ type: string
+ required: true
+ description: "Character LoRA filename"
+ pose:
+ type: choice
+ options:
+ - reclining
+ - standing profile
+ - seated with crossed legs
+ - back view
+ - curled up
+ - stretching
+ - lying on stomach
+ - kneeling
+ emotion:
+ type: choice
+ options:
+ - serene
+ - contemplative
+ - confident
+ - vulnerable
+ - sensual
+ - mysterious
+ - peaceful
+ camera_angle:
+ type: choice
+ options:
+ - eye level
+ - above looking down
+ - low angle
+ - side profile
+ - three quarter
+ - from behind
+ lighting:
+ type: choice
+ options:
+ - chiaroscuro
+ - soft diffused
+ - rim lighting
+ - golden hour
+ - dramatic single source
+ - natural daylight
+ - moody low key
+ scene:
+ type: choice
+ options:
+ - minimalist studio
+ - natural landscape
+ - classical interior
+ - fabric drapes
+ - water reflection
+ - garden
+ - abstract background
+
+motion:
+ enabled: false
+ type: loop
+ intensity: 0.3
+ motion_keywords:
+ - "slow breathing"
+ - "gentle wind"
diff --git a/config/templates/prompts/boudoir_intimate.yaml b/config/templates/prompts/boudoir_intimate.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3dc945092141830c5c711c4acf71ca841cd7971
--- /dev/null
+++ b/config/templates/prompts/boudoir_intimate.yaml
@@ -0,0 +1,112 @@
+id: boudoir_intimate
+name: "Boudoir Intimate"
+category: boudoir
+rating: nsfw
+base_model: realistic_vision
+
+loras:
+ - name: "{{character_lora}}"
+ strength_model: 0.85
+ strength_clip: 0.85
+
+positive_prompt: >
+ {{character_trigger}}, {{pose}}, {{outfit}},
+ {{emotion}} expression, {{camera_angle}},
+ {{lighting}}, {{scene}},
+ masterpiece, best quality, photorealistic, 8k uhd,
+ detailed skin texture, professional boudoir photography,
+ intimate atmosphere, sensual, alluring
+
+negative_prompt: >
+ worst quality, low quality, blurry, deformed,
+ bad anatomy, bad hands, extra fingers,
+ watermark, text, signature, cartoon, anime,
+ unrealistic proportions
+
+sampler:
+ steps: 30
+ cfg: 7.0
+ sampler_name: dpmpp_2m
+ scheduler: karras
+ width: 832
+ height: 1216
+
+variables:
+ character_trigger:
+ type: string
+ required: true
+ description: "Character trigger word from LoRA training"
+ character_lora:
+ type: string
+ required: true
+ description: "Character LoRA filename"
+ pose:
+ type: choice
+ options:
+ - lying on bed
+ - sitting on edge of bed
+ - standing by window
+ - kneeling
+ - reclining on couch
+ - looking over shoulder
+ - stretching
+ - leaning forward
+ outfit:
+ type: choice
+ options:
+ - lingerie
+ - silk robe
+ - lace bodysuit
+ - sheer nightgown
+ - corset
+ - bikini
+ - oversized shirt
+ - towel
+ emotion:
+ type: choice
+ options:
+ - seductive
+ - playful
+ - confident
+ - dreamy
+ - mysterious
+ - inviting
+ - coy
+ camera_angle:
+ type: choice
+ options:
+ - eye level
+ - low angle
+ - high angle looking down
+ - close-up
+ - three quarter view
+ - from behind
+ lighting:
+ type: choice
+ options:
+ - warm candlelight
+ - soft window light
+ - golden hour
+ - dim ambient light
+ - neon accent lighting
+ - dramatic shadows
+ - backlit silhouette
+ scene:
+ type: choice
+ options:
+ - luxury bedroom
+ - hotel room
+ - bathtub
+ - balcony at dusk
+ - silk sheets
+ - vanity mirror
+ - penthouse suite
+
+motion:
+ enabled: false
+ type: loop
+ intensity: 0.5
+ motion_keywords:
+ - "slow breathing"
+ - "gentle movement"
+ - "hair falling"
diff --git a/config/templates/prompts/lifestyle_casual.yaml b/config/templates/prompts/lifestyle_casual.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..29127f88c3c6c287564122222bbae6520ffd82ed
--- /dev/null
+++ b/config/templates/prompts/lifestyle_casual.yaml
@@ -0,0 +1,88 @@
+id: lifestyle_casual
+name: "Casual Lifestyle"
+category: lifestyle
+rating: sfw
+base_model: realistic_vision
+
+loras:
+ - name: "{{character_lora}}"
+ strength_model: 0.85
+ strength_clip: 0.85
+
+positive_prompt: >
+ {{character_trigger}}, casual lifestyle photo,
+ {{activity}}, {{outfit}}, {{emotion}} expression,
+ {{camera_angle}}, {{lighting}}, {{scene}},
+ masterpiece, best quality, photorealistic,
+ candid photography style, natural look
+
+negative_prompt: >
+ worst quality, low quality, blurry, deformed,
+ bad anatomy, bad hands, extra fingers,
+ watermark, text, signature, overly posed
+
+sampler:
+ steps: 25
+ cfg: 6.5
+ sampler_name: dpmpp_2m
+ scheduler: karras
+ width: 1024
+ height: 1024
+
+variables:
+ character_trigger:
+ type: string
+ required: true
+ character_lora:
+ type: string
+ required: true
+ activity:
+ type: choice
+ options:
+ - reading a book
+ - drinking coffee
+ - walking in park
+ - stretching
+ - cooking
+ - using laptop
+ - taking selfie
+ outfit:
+ type: choice
+ options:
+ - oversized sweater
+ - yoga pants and tank top
+ - summer dress
+ - jeans and t-shirt
+ - pajamas
+ - workout clothes
+ emotion:
+ type: choice
+ options:
+ - relaxed
+ - happy
+ - focused
+ - dreamy
+ - cheerful
+ camera_angle:
+ type: choice
+ options:
+ - eye level
+ - slightly above
+ - candid angle
+ - over the shoulder
+ lighting:
+ type: choice
+ options:
+ - morning light
+ - afternoon sun
+ - warm indoor lighting
+ - window light
+ scene:
+ type: choice
+ options:
+ - cozy bedroom
+ - modern kitchen
+ - sunny balcony
+ - coffee shop
+ - living room with plants
+ - yoga studio
diff --git a/config/templates/prompts/portrait_glamour.yaml b/config/templates/prompts/portrait_glamour.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44c9668373ff635d5e7fb2c63d89b4e47baa4d8f
--- /dev/null
+++ b/config/templates/prompts/portrait_glamour.yaml
@@ -0,0 +1,108 @@
+id: portrait_glamour
+name: "Glamour Portrait"
+category: portrait
+rating: sfw
+base_model: realistic_vision
+
+loras:
+ - name: "{{character_lora}}"
+ strength_model: 0.85
+ strength_clip: 0.85
+
+positive_prompt: >
+ {{character_trigger}}, {{pose}}, {{outfit}},
+ {{emotion}} expression, {{camera_angle}},
+ {{lighting}}, {{scene}},
+ masterpiece, best quality, photorealistic, 8k uhd,
+ detailed skin texture, professional photography
+
+negative_prompt: >
+ worst quality, low quality, blurry, deformed,
+ bad anatomy, bad hands, extra fingers,
+ watermark, text, signature
+
+sampler:
+ steps: 28
+ cfg: 7.0
+ sampler_name: dpmpp_2m
+ scheduler: karras
+ width: 832
+ height: 1216
+
+variables:
+ character_trigger:
+ type: string
+ required: true
+ description: "Character trigger word from LoRA training"
+ character_lora:
+ type: string
+ required: true
+ description: "Character LoRA filename"
+ pose:
+ type: choice
+ options:
+ - standing
+ - sitting
+ - leaning against wall
+ - walking
+ - looking over shoulder
+ - hands on hips
+ - arms crossed
+ outfit:
+ type: choice
+ options:
+ - casual dress
+ - evening gown
+ - business suit
+ - athletic wear
+ - sundress
+ - leather jacket
+ - crop top and jeans
+ emotion:
+ type: choice
+ options:
+ - confident
+ - playful
+ - serious
+ - mysterious
+ - warm smile
+ - contemplative
+ - laughing
+ camera_angle:
+ type: choice
+ options:
+ - front view
+ - three quarter view
+ - side profile
+ - low angle
+ - high angle
+ - close-up portrait
+ lighting:
+ type: choice
+ options:
+ - natural light
+ - golden hour
+ - studio lighting
+ - rim lighting
+ - neon lighting
+ - dramatic shadows
+ - soft diffused light
+ scene:
+ type: choice
+ options:
+ - urban rooftop
+ - luxury interior
+ - garden
+ - beach at sunset
+ - studio backdrop
+ - cozy cafe
+ - city street at night
+
+motion:
+ enabled: false
+ type: loop
+ intensity: 0.7
+ motion_keywords:
+ - "gentle swaying"
+ - "hair flowing in wind"
+ - "slow breathing"
diff --git a/config/templates/workflows/sd15_base_nsfw.json b/config/templates/workflows/sd15_base_nsfw.json
new file mode 100644
index 0000000000000000000000000000000000000000..b66222895ea587bafcb10e7fc8c522b2ef5c8a60
--- /dev/null
+++ b/config/templates/workflows/sd15_base_nsfw.json
@@ -0,0 +1,59 @@
+{
+ "1": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {
+ "ckpt_name": "realisticVisionV51_v51VAE.safetensors"
+ }
+ },
+ "2": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": ""
+ }
+ },
+ "3": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": "worst quality, low quality, blurry, deformed, bad anatomy, bad hands, extra fingers, watermark, text, signature, censored"
+ }
+ },
+ "4": {
+ "class_type": "EmptyLatentImage",
+ "inputs": {
+ "width": 832,
+ "height": 1216,
+ "batch_size": 1
+ }
+ },
+ "5": {
+ "class_type": "KSampler",
+ "inputs": {
+ "model": ["1", 0],
+ "positive": ["2", 0],
+ "negative": ["3", 0],
+ "latent_image": ["4", 0],
+ "seed": 0,
+ "steps": 28,
+ "cfg": 7.0,
+ "sampler_name": "dpmpp_2m",
+ "scheduler": "karras",
+ "denoise": 1.0
+ }
+ },
+ "6": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["5", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "7": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "images": ["6", 0],
+ "filename_prefix": "content_engine"
+ }
+ }
+}
diff --git a/config/templates/workflows/sd15_base_sfw.json b/config/templates/workflows/sd15_base_sfw.json
new file mode 100644
index 0000000000000000000000000000000000000000..b59c999edea380c7dad93e25c7767ebe69a66a40
--- /dev/null
+++ b/config/templates/workflows/sd15_base_sfw.json
@@ -0,0 +1,59 @@
+{
+ "1": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {
+ "ckpt_name": "realisticVisionV51_v51VAE.safetensors"
+ }
+ },
+ "2": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": ""
+ }
+ },
+ "3": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": "worst quality, low quality, blurry, deformed, bad anatomy, bad hands, extra fingers, watermark, text, signature"
+ }
+ },
+ "4": {
+ "class_type": "EmptyLatentImage",
+ "inputs": {
+ "width": 832,
+ "height": 1216,
+ "batch_size": 1
+ }
+ },
+ "5": {
+ "class_type": "KSampler",
+ "inputs": {
+ "model": ["1", 0],
+ "positive": ["2", 0],
+ "negative": ["3", 0],
+ "latent_image": ["4", 0],
+ "seed": 0,
+ "steps": 28,
+ "cfg": 7.0,
+ "sampler_name": "dpmpp_2m",
+ "scheduler": "karras",
+ "denoise": 1.0
+ }
+ },
+ "6": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["5", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "7": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "images": ["6", 0],
+ "filename_prefix": "content_engine"
+ }
+ }
+}
diff --git a/config/templates/workflows/sd15_img2img_nsfw.json b/config/templates/workflows/sd15_img2img_nsfw.json
new file mode 100644
index 0000000000000000000000000000000000000000..e0e626fe3b0911c6d783645f57de4482d3bac1ac
--- /dev/null
+++ b/config/templates/workflows/sd15_img2img_nsfw.json
@@ -0,0 +1,64 @@
+{
+ "1": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {
+ "ckpt_name": "realisticVisionV51_v51VAE.safetensors"
+ }
+ },
+ "2": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": ""
+ }
+ },
+ "3": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": "worst quality, low quality, blurry, deformed, bad anatomy, bad hands, extra fingers, watermark, text, signature"
+ }
+ },
+ "8": {
+ "class_type": "LoadImage",
+ "inputs": {
+ "image": "input_image.png"
+ }
+ },
+ "9": {
+ "class_type": "VAEEncode",
+ "inputs": {
+ "pixels": ["8", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "5": {
+ "class_type": "KSampler",
+ "inputs": {
+ "model": ["1", 0],
+ "positive": ["2", 0],
+ "negative": ["3", 0],
+ "latent_image": ["9", 0],
+ "seed": 0,
+ "steps": 28,
+ "cfg": 7.0,
+ "sampler_name": "dpmpp_2m",
+ "scheduler": "karras",
+ "denoise": 0.65
+ }
+ },
+ "6": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["5", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "7": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "images": ["6", 0],
+ "filename_prefix": "content_engine_img2img"
+ }
+ }
+}
diff --git a/config/templates/workflows/sd15_img2img_sfw.json b/config/templates/workflows/sd15_img2img_sfw.json
new file mode 100644
index 0000000000000000000000000000000000000000..e0e626fe3b0911c6d783645f57de4482d3bac1ac
--- /dev/null
+++ b/config/templates/workflows/sd15_img2img_sfw.json
@@ -0,0 +1,64 @@
+{
+ "1": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {
+ "ckpt_name": "realisticVisionV51_v51VAE.safetensors"
+ }
+ },
+ "2": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": ""
+ }
+ },
+ "3": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "clip": ["1", 1],
+ "text": "worst quality, low quality, blurry, deformed, bad anatomy, bad hands, extra fingers, watermark, text, signature"
+ }
+ },
+ "8": {
+ "class_type": "LoadImage",
+ "inputs": {
+ "image": "input_image.png"
+ }
+ },
+ "9": {
+ "class_type": "VAEEncode",
+ "inputs": {
+ "pixels": ["8", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "5": {
+ "class_type": "KSampler",
+ "inputs": {
+ "model": ["1", 0],
+ "positive": ["2", 0],
+ "negative": ["3", 0],
+ "latent_image": ["9", 0],
+ "seed": 0,
+ "steps": 28,
+ "cfg": 7.0,
+ "sampler_name": "dpmpp_2m",
+ "scheduler": "karras",
+ "denoise": 0.65
+ }
+ },
+ "6": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["5", 0],
+ "vae": ["1", 2]
+ }
+ },
+ "7": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "images": ["6", 0],
+ "filename_prefix": "content_engine_img2img"
+ }
+ }
+}
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f7c91768dbcb1168657c848a6e78f304248d417e
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,15 @@
+fastapi>=0.109.0
+uvicorn[standard]>=0.27.0
+aiohttp>=3.9.0
+sqlalchemy>=2.0.0
+aiosqlite>=0.19.0
+pydantic>=2.5.0
+pydantic-settings>=2.1.0
+jinja2>=3.1.0
+Pillow>=10.2.0
+httpx>=0.26.0
+pyyaml>=6.0
+python-multipart>=0.0.6
+python-dotenv>=1.0.0
+runpod>=1.6.0
+paramiko>=3.4.0
diff --git a/src/content_engine.egg-info/PKG-INFO b/src/content_engine.egg-info/PKG-INFO
new file mode 100644
index 0000000000000000000000000000000000000000..63c2c8d76c8c7dda1fc4edfacca91e9d39551da0
--- /dev/null
+++ b/src/content_engine.egg-info/PKG-INFO
@@ -0,0 +1,25 @@
+Metadata-Version: 2.4
+Name: content-engine
+Version: 0.1.0
+Summary: Automated content generation system using ComfyUI
+Requires-Python: >=3.11
+Requires-Dist: fastapi>=0.109.0
+Requires-Dist: uvicorn[standard]>=0.27.0
+Requires-Dist: aiohttp>=3.9.0
+Requires-Dist: sqlalchemy>=2.0.0
+Requires-Dist: alembic>=1.13.0
+Requires-Dist: aiosqlite>=0.19.0
+Requires-Dist: pydantic>=2.5.0
+Requires-Dist: pydantic-settings>=2.1.0
+Requires-Dist: jinja2>=3.1.0
+Requires-Dist: Pillow>=10.2.0
+Requires-Dist: apscheduler>=3.10.0
+Requires-Dist: httpx>=0.26.0
+Requires-Dist: pyyaml>=6.0
+Requires-Dist: python-multipart>=0.0.6
+Provides-Extra: cloud
+Requires-Dist: replicate>=0.22.0; extra == "cloud"
+Provides-Extra: dev
+Requires-Dist: pytest>=7.4.0; extra == "dev"
+Requires-Dist: pytest-asyncio>=0.23.0; extra == "dev"
+Requires-Dist: httpx>=0.26.0; extra == "dev"
diff --git a/src/content_engine.egg-info/SOURCES.txt b/src/content_engine.egg-info/SOURCES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..68ad901ee522d7a131f271a920aa6277e4b50732
--- /dev/null
+++ b/src/content_engine.egg-info/SOURCES.txt
@@ -0,0 +1,30 @@
+pyproject.toml
+src/content_engine/__init__.py
+src/content_engine/config.py
+src/content_engine/main.py
+src/content_engine.egg-info/PKG-INFO
+src/content_engine.egg-info/SOURCES.txt
+src/content_engine.egg-info/dependency_links.txt
+src/content_engine.egg-info/requires.txt
+src/content_engine.egg-info/top_level.txt
+src/content_engine/api/__init__.py
+src/content_engine/api/routes_catalog.py
+src/content_engine/api/routes_generation.py
+src/content_engine/api/routes_system.py
+src/content_engine/models/__init__.py
+src/content_engine/models/database.py
+src/content_engine/models/schemas.py
+src/content_engine/services/__init__.py
+src/content_engine/services/catalog.py
+src/content_engine/services/comfyui_client.py
+src/content_engine/services/router.py
+src/content_engine/services/template_engine.py
+src/content_engine/services/variation_engine.py
+src/content_engine/services/workflow_builder.py
+src/content_engine/services/cloud_providers/__init__.py
+src/content_engine/services/cloud_providers/base.py
+src/content_engine/services/publisher/__init__.py
+src/content_engine/services/publisher/base.py
+src/content_engine/workers/__init__.py
+src/content_engine/workers/cloud_worker.py
+src/content_engine/workers/local_worker.py
\ No newline at end of file
diff --git a/src/content_engine.egg-info/dependency_links.txt b/src/content_engine.egg-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/src/content_engine.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/src/content_engine.egg-info/requires.txt b/src/content_engine.egg-info/requires.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f4cf03b247838eb00f690fe72f2f0cfce4861b0d
--- /dev/null
+++ b/src/content_engine.egg-info/requires.txt
@@ -0,0 +1,22 @@
+fastapi>=0.109.0
+uvicorn[standard]>=0.27.0
+aiohttp>=3.9.0
+sqlalchemy>=2.0.0
+alembic>=1.13.0
+aiosqlite>=0.19.0
+pydantic>=2.5.0
+pydantic-settings>=2.1.0
+jinja2>=3.1.0
+Pillow>=10.2.0
+apscheduler>=3.10.0
+httpx>=0.26.0
+pyyaml>=6.0
+python-multipart>=0.0.6
+
+[cloud]
+replicate>=0.22.0
+
+[dev]
+pytest>=7.4.0
+pytest-asyncio>=0.23.0
+httpx>=0.26.0
diff --git a/src/content_engine.egg-info/top_level.txt b/src/content_engine.egg-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..247e4495dd69d4d68af775b2dc437c5cb81c5f7a
--- /dev/null
+++ b/src/content_engine.egg-info/top_level.txt
@@ -0,0 +1 @@
+content_engine
diff --git a/src/content_engine/__init__.py b/src/content_engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbb0878e3a387a55cb966432d26ed7d352f98785
--- /dev/null
+++ b/src/content_engine/__init__.py
@@ -0,0 +1,3 @@
+"""Content Engine - Automated content generation system using ComfyUI."""
+
+__version__ = "0.1.0"
diff --git a/src/content_engine/__pycache__/__init__.cpython-311.pyc b/src/content_engine/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1796a3b01d4bfe85cc59c61d300f6b409fbe73b6
Binary files /dev/null and b/src/content_engine/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/__pycache__/config.cpython-311.pyc b/src/content_engine/__pycache__/config.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d499bda6d04c30261daabc2442bcf673891ca589
Binary files /dev/null and b/src/content_engine/__pycache__/config.cpython-311.pyc differ
diff --git a/src/content_engine/__pycache__/main.cpython-311.pyc b/src/content_engine/__pycache__/main.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12ce2c9ec59ace85d0c354338b105f0572619047
Binary files /dev/null and b/src/content_engine/__pycache__/main.cpython-311.pyc differ
diff --git a/src/content_engine/api/__init__.py b/src/content_engine/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..75959421a194a62ef203d5141f9d9d6cf7515f9d
--- /dev/null
+++ b/src/content_engine/api/__init__.py
@@ -0,0 +1 @@
+"""FastAPI route modules."""
diff --git a/src/content_engine/api/__pycache__/__init__.cpython-311.pyc b/src/content_engine/api/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40b9d814e71102534c7bc05848deefd1ca457bfc
Binary files /dev/null and b/src/content_engine/api/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_catalog.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_catalog.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..449604603314c28912fc5307e90c0b01e401f7d0
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_catalog.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_generation.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_generation.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a506bef64fab43c7c69dda27e7fe3ea2c3f37b9
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_generation.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_pod.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_pod.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57987f5a6bc44a0889e5b466cbfc258bf2c28b86
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_pod.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_system.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_system.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94e58ae07020cd0037943834d025c9e5f7ecf8bf
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_system.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_training.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_training.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3df9debd31052d2d03df5eb4dfdff4aece382004
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_training.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_ui.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_ui.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c59e21df8e9f3b30e659e3a579a5841437cfa88d
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_ui.cpython-311.pyc differ
diff --git a/src/content_engine/api/__pycache__/routes_video.cpython-311.pyc b/src/content_engine/api/__pycache__/routes_video.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77a524c2099162076bf7c8bafdaa638fb95444b1
Binary files /dev/null and b/src/content_engine/api/__pycache__/routes_video.cpython-311.pyc differ
diff --git a/src/content_engine/api/routes_catalog.py b/src/content_engine/api/routes_catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..926b0af14796efe3d42b7a900c8929e3d4628b79
--- /dev/null
+++ b/src/content_engine/api/routes_catalog.py
@@ -0,0 +1,169 @@
+"""Catalog API routes — query and manage generated images."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+from fastapi import APIRouter, HTTPException
+from fastapi.responses import FileResponse
+
+from content_engine.models.schemas import ImageResponse
+
+router = APIRouter(prefix="/api", tags=["catalog"])
+
+_catalog = None
+
+
+def init_routes(catalog):
+ """Initialize route dependencies."""
+ global _catalog
+ _catalog = catalog
+
+
+@router.get("/images", response_model=list[ImageResponse])
+async def list_images(
+ character_id: str | None = None,
+ content_rating: str | None = None,
+ template_id: str | None = None,
+ is_approved: bool | None = None,
+ is_published: bool | None = None,
+ pose: str | None = None,
+ outfit: str | None = None,
+ emotion: str | None = None,
+ limit: int = 50,
+ offset: int = 0,
+):
+ """Search and list generated images with optional filters."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ images = await _catalog.search(
+ character_id=character_id,
+ content_rating=content_rating,
+ template_id=template_id,
+ is_approved=is_approved,
+ is_published=is_published,
+ pose=pose,
+ outfit=outfit,
+ emotion=emotion,
+ limit=limit,
+ offset=offset,
+ )
+
+ return [
+ ImageResponse(
+ id=img.id,
+ character_id=img.character_id,
+ template_id=img.template_id,
+ content_rating=img.content_rating,
+ file_path=img.file_path,
+ seed=img.seed,
+ pose=img.pose,
+ outfit=img.outfit,
+ emotion=img.emotion,
+ camera_angle=img.camera_angle,
+ lighting=img.lighting,
+ scene=img.scene,
+ quality_score=img.quality_score,
+ is_approved=img.is_approved,
+ is_published=img.is_published,
+ created_at=img.created_at,
+ )
+ for img in images
+ ]
+
+
+@router.get("/images/{image_id}", response_model=ImageResponse)
+async def get_image(image_id: str):
+ """Get a single image by ID."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ img = await _catalog.get_image(image_id)
+ if img is None:
+ raise HTTPException(404, f"Image not found: {image_id}")
+
+ return ImageResponse(
+ id=img.id,
+ character_id=img.character_id,
+ template_id=img.template_id,
+ content_rating=img.content_rating,
+ file_path=img.file_path,
+ seed=img.seed,
+ pose=img.pose,
+ outfit=img.outfit,
+ emotion=img.emotion,
+ camera_angle=img.camera_angle,
+ lighting=img.lighting,
+ scene=img.scene,
+ quality_score=img.quality_score,
+ is_approved=img.is_approved,
+ is_published=img.is_published,
+ created_at=img.created_at,
+ )
+
+
+@router.get("/images/{image_id}/file")
+async def serve_image_file(image_id: str):
+ """Serve the actual image file for display in the UI."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ img = await _catalog.get_image(image_id)
+ if img is None:
+ raise HTTPException(404, f"Image not found: {image_id}")
+
+ file_path = Path(img.file_path)
+ if not file_path.exists():
+ raise HTTPException(404, f"Image file not found on disk")
+
+ return FileResponse(
+ file_path,
+ media_type="image/png",
+ headers={"Cache-Control": "public, max-age=3600"},
+ )
+
+
+@router.get("/images/{image_id}/download")
+async def download_image_file(image_id: str):
+ """Download the image file as an attachment."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ img = await _catalog.get_image(image_id)
+ if img is None:
+ raise HTTPException(404, f"Image not found: {image_id}")
+
+ file_path = Path(img.file_path)
+ if not file_path.exists():
+ raise HTTPException(404, "Image file not found on disk")
+
+ return FileResponse(
+ file_path,
+ media_type="image/png",
+ filename=file_path.name,
+ )
+
+
+@router.post("/images/{image_id}/approve")
+async def approve_image(image_id: str):
+ """Mark an image as approved for publishing."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ success = await _catalog.approve_image(image_id)
+ if not success:
+ raise HTTPException(404, f"Image not found: {image_id}")
+ return {"status": "approved", "image_id": image_id}
+
+
+@router.delete("/images/{image_id}")
+async def delete_image(image_id: str):
+ """Delete an image from the catalog and disk."""
+ if _catalog is None:
+ raise HTTPException(503, "Catalog not initialized")
+
+ success = await _catalog.delete_image(image_id)
+ if not success:
+ raise HTTPException(404, f"Image not found: {image_id}")
+ return {"status": "deleted", "image_id": image_id}
diff --git a/src/content_engine/api/routes_generation.py b/src/content_engine/api/routes_generation.py
new file mode 100644
index 0000000000000000000000000000000000000000..65f41d93c9e6c9f5adfd86beca8b1d7126f66d2d
--- /dev/null
+++ b/src/content_engine/api/routes_generation.py
@@ -0,0 +1,604 @@
+"""Generation API routes — submit single and batch image generation jobs."""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import uuid
+
+from fastapi import APIRouter, File, Form, HTTPException, UploadFile
+
+from content_engine.models.schemas import (
+ BatchRequest,
+ BatchStatusResponse,
+ GenerationRequest,
+ GenerationResponse,
+)
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api", tags=["generation"])
+
+# These are injected at startup from main.py
+_local_worker = None
+_template_engine = None
+_variation_engine = None
+_character_profiles = None
+_wavespeed_provider = None
+_runpod_provider = None
+_catalog = None
+_comfyui_client = None
+
+# In-memory batch tracking (v1 — move to DB for production)
+_batch_tracker: dict[str, dict] = {}
+
+
+def init_routes(local_worker, template_engine, variation_engine, character_profiles,
+ wavespeed_provider=None, catalog=None, comfyui_client=None):
+ """Initialize route dependencies. Called from main.py on startup."""
+ global _local_worker, _template_engine, _variation_engine, _character_profiles
+ global _wavespeed_provider, _catalog, _comfyui_client
+ _local_worker = local_worker
+ _template_engine = template_engine
+ _variation_engine = variation_engine
+ _character_profiles = character_profiles
+ _wavespeed_provider = wavespeed_provider
+ _catalog = catalog
+ _comfyui_client = comfyui_client
+
+
+def set_runpod_provider(provider):
+ """Set RunPod generation provider. Called from main.py after init_routes."""
+ global _runpod_provider
+ _runpod_provider = provider
+
+
+@router.post("/generate", response_model=GenerationResponse)
+async def generate_single(request: GenerationRequest):
+ """Submit a single image generation job.
+
+ The job runs asynchronously — returns immediately with a job ID.
+ """
+ if _local_worker is None:
+ raise HTTPException(503, "Worker not initialized")
+
+ job_id = str(uuid.uuid4())
+
+ # Fire and forget — run in background
+ asyncio.create_task(
+ _run_generation(
+ job_id=job_id,
+ character_id=request.character_id,
+ template_id=request.template_id,
+ content_rating=request.content_rating,
+ positive_prompt=request.positive_prompt,
+ negative_prompt=request.negative_prompt,
+ checkpoint=request.checkpoint,
+ loras=[l.model_dump() for l in request.loras] if request.loras else None,
+ seed=request.seed or -1,
+ steps=request.steps,
+ cfg=request.cfg,
+ sampler=request.sampler,
+ scheduler=request.scheduler,
+ width=request.width,
+ height=request.height,
+ variables=request.variables,
+ )
+ )
+
+ return GenerationResponse(job_id=job_id, status="queued", backend="local")
+
+
+@router.post("/batch", response_model=GenerationResponse)
+async def generate_batch(request: BatchRequest):
+ """Submit a batch of variation-based generation jobs.
+
+ Uses the variation engine to generate multiple images with
+ different poses, outfits, emotions, etc.
+ """
+ if _local_worker is None or _variation_engine is None:
+ raise HTTPException(503, "Services not initialized")
+ if _character_profiles is None:
+ raise HTTPException(503, "No character profiles loaded")
+
+ character = _character_profiles.get(request.character_id)
+ if character is None:
+ raise HTTPException(404, f"Character not found: {request.character_id}")
+
+ # Generate variation jobs
+ jobs = _variation_engine.generate_batch(
+ template_id=request.template_id,
+ character=character,
+ content_rating=request.content_rating,
+ count=request.count,
+ variation_mode=request.variation_mode,
+ pin=request.pin,
+ seed_strategy=request.seed_strategy,
+ )
+
+ batch_id = jobs[0].batch_id if jobs else str(uuid.uuid4())
+ _batch_tracker[batch_id] = {
+ "total": len(jobs),
+ "completed": 0,
+ "failed": 0,
+ "pending": len(jobs),
+ "running": 0,
+ }
+
+ # Fire all jobs in background
+ for job in jobs:
+ asyncio.create_task(
+ _run_batch_job(batch_id, job)
+ )
+
+ logger.info("Batch %s: %d jobs queued", batch_id, len(jobs))
+ return GenerationResponse(
+ job_id=batch_id, batch_id=batch_id, status="queued", backend="local"
+ )
+
+
+@router.get("/batch/{batch_id}/status", response_model=BatchStatusResponse)
+async def get_batch_status(batch_id: str):
+ """Get the status of a batch generation."""
+ if batch_id not in _batch_tracker:
+ raise HTTPException(404, f"Batch not found: {batch_id}")
+ tracker = _batch_tracker[batch_id]
+ return BatchStatusResponse(
+ batch_id=batch_id,
+ total_jobs=tracker["total"],
+ completed=tracker["completed"],
+ failed=tracker["failed"],
+ pending=tracker["pending"],
+ running=tracker["running"],
+ )
+
+
+@router.post("/generate/cloud", response_model=GenerationResponse)
+async def generate_cloud(request: GenerationRequest):
+ """Generate an image using WaveSpeed cloud API (NanoBanana, SeeDream).
+
+ Supported models via the 'checkpoint' field:
+ - nano-banana, nano-banana-pro
+ - seedream-3, seedream-3.1, seedream-4, seedream-4.5
+ """
+ if _wavespeed_provider is None:
+ raise HTTPException(503, "WaveSpeed cloud provider not configured. Set WAVESPEED_API_KEY in .env")
+
+ job_id = str(uuid.uuid4())
+
+ asyncio.create_task(
+ _run_cloud_generation(
+ job_id=job_id,
+ positive_prompt=request.positive_prompt or "",
+ negative_prompt=request.negative_prompt or "",
+ model=request.checkpoint, # Use checkpoint field for model selection
+ width=request.width or 1024,
+ height=request.height or 1024,
+ seed=request.seed or -1,
+ content_rating=request.content_rating,
+ character_id=request.character_id,
+ template_id=request.template_id,
+ variables=request.variables,
+ )
+ )
+
+ return GenerationResponse(job_id=job_id, status="queued", backend="wavespeed")
+
+
+@router.get("/cloud/models")
+async def list_cloud_models():
+ """List available cloud models (WaveSpeed and RunPod)."""
+ return {
+ "wavespeed": {
+ "available": _wavespeed_provider is not None,
+ "models": [
+ {"id": "nano-banana", "name": "NanoBanana", "provider": "Google", "type": "txt2img"},
+ {"id": "nano-banana-pro", "name": "NanoBanana Pro", "provider": "Google", "type": "txt2img"},
+ {"id": "seedream-3", "name": "SeeDream v3", "provider": "ByteDance", "type": "txt2img"},
+ {"id": "seedream-3.1", "name": "SeeDream v3.1", "provider": "ByteDance", "type": "txt2img"},
+ {"id": "seedream-4", "name": "SeeDream v4", "provider": "ByteDance", "type": "txt2img"},
+ {"id": "seedream-4.5", "name": "SeeDream v4.5", "provider": "ByteDance", "type": "txt2img"},
+ ],
+ "edit_models": [
+ {"id": "seedream-4.5-edit", "name": "SeeDream v4.5 Edit", "provider": "ByteDance", "type": "img2img", "price": "$0.04/img"},
+ {"id": "seedream-4-edit", "name": "SeeDream v4 Edit", "provider": "ByteDance", "type": "img2img", "price": "$0.04/img"},
+ {"id": "nano-banana-edit", "name": "NanoBanana Edit", "provider": "Google", "type": "img2img", "price": "$0.038/img"},
+ {"id": "nano-banana-pro-edit", "name": "NanoBanana Pro Edit", "provider": "Google", "type": "img2img", "price": "$0.14/img"},
+ ],
+ },
+ "runpod": {
+ "available": _runpod_provider is not None,
+ "description": "Pay-per-second serverless GPU. Uses your deployed endpoint.",
+ "pricing": "~$0.00025/sec (RTX 4090)",
+ },
+ }
+
+
+@router.post("/generate/runpod", response_model=GenerationResponse)
+async def generate_runpod(request: GenerationRequest):
+ """Generate an image using RunPod serverless GPU.
+
+ Uses your deployed RunPod endpoint. Pay per second of GPU time.
+ Requires RUNPOD_API_KEY and RUNPOD_ENDPOINT_ID in .env.
+ """
+ if _runpod_provider is None:
+ raise HTTPException(
+ 503,
+ "RunPod not configured. Set RUNPOD_API_KEY and RUNPOD_ENDPOINT_ID in .env"
+ )
+
+ job_id = str(uuid.uuid4())
+
+ asyncio.create_task(
+ _run_runpod_generation(
+ job_id=job_id,
+ positive_prompt=request.positive_prompt or "",
+ negative_prompt=request.negative_prompt or "",
+ checkpoint=request.checkpoint,
+ loras=request.loras,
+ seed=request.seed or -1,
+ steps=request.steps or 28,
+ cfg=request.cfg or 7.0,
+ width=request.width or 832,
+ height=request.height or 1216,
+ character_id=request.character_id,
+ template_id=request.template_id,
+ content_rating=request.content_rating,
+ )
+ )
+
+ return GenerationResponse(job_id=job_id, status="queued", backend="runpod")
+
+
+@router.post("/generate/img2img", response_model=GenerationResponse)
+async def generate_img2img(
+ image: UploadFile = File(...),
+ positive_prompt: str = Form(""),
+ negative_prompt: str = Form(""),
+ character_id: str | None = Form(None),
+ template_id: str | None = Form(None),
+ variables_json: str = Form("{}"),
+ content_rating: str = Form("sfw"),
+ checkpoint: str | None = Form(None),
+ seed: int = Form(-1),
+ steps: int = Form(28),
+ cfg: float = Form(7.0),
+ denoise: float = Form(0.65),
+ width: int | None = Form(None),
+ height: int | None = Form(None),
+ backend: str = Form("local"),
+):
+ """Generate an image using a reference image (img2img).
+
+ Supports both local (ComfyUI) and cloud (WaveSpeed edit) backends.
+ - Local: denoise-based img2img via ComfyUI
+ - Cloud: prompt-guided editing via SeeDream/NanoBanana Edit APIs
+ """
+ import json as json_module
+
+ job_id = str(uuid.uuid4())
+ image_bytes = await image.read()
+
+ # Parse template variables
+ try:
+ variables = json_module.loads(variables_json) if variables_json else {}
+ except json_module.JSONDecodeError:
+ variables = {}
+
+ if backend == "cloud":
+ # Cloud img2img via WaveSpeed Edit API
+ if _wavespeed_provider is None:
+ raise HTTPException(503, "WaveSpeed cloud provider not configured. Set WAVESPEED_API_KEY in .env")
+
+ asyncio.create_task(
+ _run_cloud_img2img(
+ job_id=job_id,
+ image_bytes=image_bytes,
+ positive_prompt=positive_prompt,
+ model=checkpoint,
+ content_rating=content_rating,
+ character_id=character_id,
+ template_id=template_id,
+ variables=variables,
+ width=width,
+ height=height,
+ )
+ )
+ return GenerationResponse(job_id=job_id, status="queued", backend="wavespeed")
+
+ # Local img2img via ComfyUI
+ if _local_worker is None or _comfyui_client is None:
+ raise HTTPException(503, "Worker not initialized")
+
+ ref_filename = f"ref_{job_id[:8]}.png"
+
+ try:
+ uploaded_name = await _comfyui_client.upload_image(image_bytes, ref_filename)
+ except Exception as e:
+ raise HTTPException(500, f"Failed to upload reference image to ComfyUI: {e}")
+
+ asyncio.create_task(
+ _run_generation(
+ job_id=job_id,
+ character_id=character_id,
+ template_id=template_id,
+ variables=variables,
+ content_rating=content_rating,
+ positive_prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ checkpoint=checkpoint,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ width=width,
+ height=height,
+ denoise=denoise,
+ reference_image=uploaded_name,
+ mode="img2img",
+ )
+ )
+
+ return GenerationResponse(job_id=job_id, status="queued", backend="local")
+
+
+async def _run_cloud_generation(
+ *,
+ job_id: str,
+ positive_prompt: str,
+ negative_prompt: str,
+ model: str | None,
+ width: int,
+ height: int,
+ seed: int,
+ content_rating: str,
+ character_id: str | None,
+ template_id: str | None,
+ variables: dict | None,
+):
+ """Background task to run a WaveSpeed cloud generation."""
+ try:
+ # Apply template rendering if a template is selected
+ final_positive = positive_prompt
+ final_negative = negative_prompt
+ if template_id and _template_engine:
+ try:
+ rendered = _template_engine.render(template_id, variables or {})
+ # Template prompt becomes the base; user prompt is appended if provided
+ final_positive = rendered.positive_prompt
+ if positive_prompt:
+ final_positive = f"{final_positive}, {positive_prompt}"
+ final_negative = rendered.negative_prompt
+ if negative_prompt:
+ final_negative = f"{final_negative}, {negative_prompt}"
+ # Use template dimensions if user didn't override
+ if rendered.template.width:
+ width = rendered.template.width
+ if rendered.template.height:
+ height = rendered.template.height
+ logger.info("Cloud gen: applied template '%s'", template_id)
+ except Exception:
+ logger.warning("Failed to render template '%s', using raw prompt", template_id, exc_info=True)
+
+ result = await _wavespeed_provider.generate(
+ positive_prompt=final_positive,
+ negative_prompt=final_negative,
+ model=model,
+ width=width,
+ height=height,
+ seed=seed,
+ )
+
+ if _catalog:
+ # Save image to disk
+ output_path = _catalog.resolve_output_path(
+ character_id=character_id or "cloud",
+ content_rating=content_rating,
+ filename=f"wavespeed_{job_id[:8]}.png",
+ )
+ output_path.write_bytes(result.image_bytes)
+
+ # Record in catalog
+ await _catalog.insert_image(
+ file_path=str(output_path),
+ image_bytes=result.image_bytes,
+ character_id=character_id,
+ template_id=template_id,
+ content_rating=content_rating,
+ positive_prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ checkpoint=model or "seedream-4.5",
+ seed=seed if seed >= 0 else None,
+ width=width,
+ height=height,
+ generation_backend="wavespeed",
+ generation_time_seconds=result.generation_time_seconds,
+ variables=variables,
+ )
+ logger.info("Cloud generation saved: %s", output_path)
+
+ except Exception:
+ logger.error("Cloud generation failed for job %s", job_id, exc_info=True)
+
+
+async def _run_cloud_img2img(
+ *,
+ job_id: str,
+ image_bytes: bytes,
+ positive_prompt: str,
+ model: str | None,
+ content_rating: str,
+ character_id: str | None,
+ template_id: str | None,
+ variables: dict | None,
+ width: int | None,
+ height: int | None,
+):
+ """Background task to run a WaveSpeed cloud image edit (img2img)."""
+ try:
+ # Apply template rendering if a template is selected
+ final_prompt = positive_prompt
+ if template_id and _template_engine:
+ try:
+ rendered = _template_engine.render(template_id, variables or {})
+ final_prompt = rendered.positive_prompt
+ if positive_prompt:
+ final_prompt = f"{final_prompt}, {positive_prompt}"
+ logger.info("Cloud img2img: applied template '%s'", template_id)
+ except Exception:
+ logger.warning("Failed to render template '%s', using raw prompt", template_id, exc_info=True)
+
+ # Clean up prompt — remove empty Jinja2 artifacts and leading/trailing commas
+ final_prompt = ", ".join(p.strip() for p in final_prompt.split(",") if p.strip())
+
+ if not final_prompt:
+ logger.error("Cloud img2img: empty prompt after template rendering, cannot proceed")
+ return
+
+ # Build size string if dimensions provided
+ # WaveSpeed edit API requires output size >= 3686400 pixels (~1920x1920)
+ # If dimensions are too small, omit size to let API use input image dimensions
+ size = None
+ if width and height and (width * height) >= 3686400:
+ size = f"{width}x{height}"
+
+ result = await _wavespeed_provider.edit_image(
+ prompt=final_prompt,
+ image_bytes=image_bytes,
+ model=model,
+ size=size,
+ )
+
+ if _catalog:
+ output_path = _catalog.resolve_output_path(
+ character_id=character_id or "cloud",
+ content_rating=content_rating,
+ filename=f"wavespeed_edit_{job_id[:8]}.png",
+ )
+ output_path.write_bytes(result.image_bytes)
+
+ await _catalog.insert_image(
+ file_path=str(output_path),
+ image_bytes=result.image_bytes,
+ character_id=character_id,
+ template_id=template_id,
+ content_rating=content_rating,
+ positive_prompt=final_prompt,
+ negative_prompt="",
+ checkpoint=model or "seedream-4.5-edit",
+ width=width or 0,
+ height=height or 0,
+ generation_backend="wavespeed-edit",
+ generation_time_seconds=result.generation_time_seconds,
+ variables=variables,
+ )
+ logger.info("Cloud img2img saved: %s", output_path)
+
+ except Exception:
+ logger.error("Cloud img2img failed for job %s", job_id, exc_info=True)
+
+
+async def _run_runpod_generation(
+ *,
+ job_id: str,
+ positive_prompt: str,
+ negative_prompt: str,
+ checkpoint: str | None,
+ loras: list | None,
+ seed: int,
+ steps: int,
+ cfg: float,
+ width: int,
+ height: int,
+ character_id: str | None,
+ template_id: str | None,
+ content_rating: str,
+):
+ """Background task to run a generation on RunPod serverless."""
+ try:
+ # Resolve character/template prompts if provided
+ final_prompt = positive_prompt
+ final_negative = negative_prompt
+
+ if character_id and _character_profiles:
+ character = _character_profiles.get(character_id)
+ if character:
+ final_prompt = f"{character.trigger_word}, {positive_prompt}"
+
+ # Submit to RunPod
+ runpod_job_id = await _runpod_provider.submit_generation(
+ positive_prompt=final_prompt,
+ negative_prompt=final_negative,
+ checkpoint=checkpoint or "realisticVisionV51_v51VAE",
+ lora_name=loras[0].name if loras else None,
+ lora_strength=loras[0].strength if loras else 0.85,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ width=width,
+ height=height,
+ )
+
+ # Wait for completion and get result
+ result = await _runpod_provider.wait_for_completion(runpod_job_id)
+
+ # Save to catalog
+ if _catalog:
+ from pathlib import Path
+ output_path = await _catalog.insert_image(
+ image_bytes=result.image_bytes,
+ character_id=character_id or "unknown",
+ content_rating=content_rating,
+ job_id=job_id,
+ positive_prompt=final_prompt,
+ negative_prompt=final_negative,
+ checkpoint=checkpoint,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ width=width,
+ height=height,
+ generation_backend="runpod",
+ generation_time_seconds=result.generation_time_seconds,
+ )
+ logger.info("RunPod generation saved: %s (%.1fs)", output_path, result.generation_time_seconds)
+
+ except Exception:
+ logger.error("RunPod generation failed for job %s", job_id, exc_info=True)
+
+
+async def _run_generation(**kwargs):
+ """Background task to run a single local generation."""
+ try:
+ # Remove mode param — it's used by the router, not the worker
+ kwargs.pop("mode", None)
+ await _local_worker.process_job(**kwargs)
+ except Exception:
+ logger.error("Generation failed for job %s", kwargs.get("job_id"), exc_info=True)
+
+
+async def _run_batch_job(batch_id: str, job):
+ """Background task to run a single job within a batch."""
+ tracker = _batch_tracker.get(batch_id)
+ if tracker:
+ tracker["pending"] -= 1
+ tracker["running"] += 1
+ try:
+ await _local_worker.process_job(
+ job_id=job.job_id,
+ batch_id=job.batch_id,
+ character_id=job.character.id,
+ template_id=job.template_id,
+ content_rating=job.content_rating,
+ loras=[l for l in job.loras],
+ seed=job.seed,
+ variables=job.variables,
+ )
+ if tracker:
+ tracker["completed"] += 1
+ except Exception:
+ logger.error("Batch job %s failed", job.job_id, exc_info=True)
+ if tracker:
+ tracker["failed"] += 1
+ finally:
+ if tracker:
+ tracker["running"] -= 1
diff --git a/src/content_engine/api/routes_pod.py b/src/content_engine/api/routes_pod.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e2b6d12e4c4635a7711afb7e40b9c62064cc90e
--- /dev/null
+++ b/src/content_engine/api/routes_pod.py
@@ -0,0 +1,545 @@
+"""RunPod Pod management routes — start/stop GPU pods for generation and training."""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import time
+import uuid
+from typing import Any
+
+import runpod
+from fastapi import APIRouter, File, HTTPException, UploadFile
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/pod", tags=["pod"])
+
+# Pod state
+_pod_state = {
+ "pod_id": None,
+ "status": "stopped", # stopped, starting, running, stopping
+ "ip": None,
+ "port": None,
+ "gpu_type": "NVIDIA GeForce RTX 4090",
+ "started_at": None,
+ "cost_per_hour": 0.44,
+}
+
+# Docker image with ComfyUI + FLUX
+COMFYUI_IMAGE = "timpietruskyblibla/runpod-worker-comfy:3.4.0-flux1-dev"
+
+# GPU options
+GPU_OPTIONS = {
+ "NVIDIA GeForce RTX 4090": {"name": "RTX 4090", "vram": 24, "cost": 0.44},
+ "NVIDIA RTX A6000": {"name": "RTX A6000", "vram": 48, "cost": 0.76},
+ "NVIDIA A100 80GB PCIe": {"name": "A100 80GB", "vram": 80, "cost": 1.89},
+}
+
+
+def _get_api_key() -> str:
+ key = os.environ.get("RUNPOD_API_KEY")
+ if not key:
+ raise HTTPException(503, "RUNPOD_API_KEY not configured")
+ runpod.api_key = key
+ return key
+
+
+class StartPodRequest(BaseModel):
+ gpu_type: str = "NVIDIA GeForce RTX 4090"
+
+
+class PodStatus(BaseModel):
+ status: str
+ pod_id: str | None = None
+ ip: str | None = None
+ port: int | None = None
+ gpu_type: str | None = None
+ cost_per_hour: float | None = None
+ uptime_minutes: float | None = None
+ comfyui_url: str | None = None
+
+
+@router.get("/status", response_model=PodStatus)
+async def get_pod_status():
+ """Get current pod status."""
+ _get_api_key()
+
+ # If we have a pod_id, check its actual status
+ if _pod_state["pod_id"]:
+ try:
+ pod = runpod.get_pod(_pod_state["pod_id"])
+ if pod:
+ desired = pod.get("desiredStatus", "")
+ if desired == "RUNNING":
+ runtime = pod.get("runtime", {})
+ ports = runtime.get("ports", [])
+ for p in ports:
+ if p.get("privatePort") == 8188:
+ _pod_state["ip"] = p.get("ip")
+ _pod_state["port"] = p.get("publicPort")
+ _pod_state["status"] = "running"
+ elif desired == "EXITED":
+ _pod_state["status"] = "stopped"
+ _pod_state["pod_id"] = None
+ else:
+ _pod_state["status"] = "stopped"
+ _pod_state["pod_id"] = None
+ except Exception as e:
+ logger.warning("Failed to check pod: %s", e)
+
+ uptime = None
+ if _pod_state["started_at"] and _pod_state["status"] == "running":
+ uptime = (time.time() - _pod_state["started_at"]) / 60
+
+ comfyui_url = None
+ if _pod_state["ip"] and _pod_state["port"]:
+ comfyui_url = f"http://{_pod_state['ip']}:{_pod_state['port']}"
+
+ return PodStatus(
+ status=_pod_state["status"],
+ pod_id=_pod_state["pod_id"],
+ ip=_pod_state["ip"],
+ port=_pod_state["port"],
+ gpu_type=_pod_state["gpu_type"],
+ cost_per_hour=_pod_state["cost_per_hour"],
+ uptime_minutes=uptime,
+ comfyui_url=comfyui_url,
+ )
+
+
+@router.get("/gpu-options")
+async def list_gpu_options():
+ """List available GPU types."""
+ return {"gpus": GPU_OPTIONS}
+
+
+@router.post("/start")
+async def start_pod(request: StartPodRequest):
+ """Start a GPU pod for generation/training."""
+ _get_api_key()
+
+ if _pod_state["status"] == "running":
+ return {"status": "already_running", "pod_id": _pod_state["pod_id"]}
+
+ if _pod_state["status"] == "starting":
+ return {"status": "starting", "message": "Pod is already starting"}
+
+ gpu_info = GPU_OPTIONS.get(request.gpu_type)
+ if not gpu_info:
+ raise HTTPException(400, f"Unknown GPU type: {request.gpu_type}")
+
+ _pod_state["status"] = "starting"
+ _pod_state["gpu_type"] = request.gpu_type
+ _pod_state["cost_per_hour"] = gpu_info["cost"]
+
+ try:
+ logger.info("Starting RunPod with %s...", request.gpu_type)
+
+ pod = runpod.create_pod(
+ name="content-engine-gpu",
+ image_name=COMFYUI_IMAGE,
+ gpu_type_id=request.gpu_type,
+ volume_in_gb=50, # For models and LoRAs
+ container_disk_in_gb=20,
+ ports="8188/http",
+ env={
+ # Pre-load FLUX model
+ "MODEL_URL": "https://huggingface.co/black-forest-labs/FLUX.1-dev/resolve/main/flux1-dev.safetensors",
+ },
+ )
+
+ _pod_state["pod_id"] = pod["id"]
+ _pod_state["started_at"] = time.time()
+
+ logger.info("Pod created: %s", pod["id"])
+
+ # Start background task to wait for pod ready
+ asyncio.create_task(_wait_for_pod_ready(pod["id"]))
+
+ return {
+ "status": "starting",
+ "pod_id": pod["id"],
+ "message": f"Starting {gpu_info['name']} pod (~2-3 min)",
+ }
+
+ except Exception as e:
+ _pod_state["status"] = "stopped"
+ logger.error("Failed to start pod: %s", e)
+ raise HTTPException(500, f"Failed to start pod: {e}")
+
+
+async def _wait_for_pod_ready(pod_id: str, timeout: int = 300):
+ """Background task to wait for pod to be ready."""
+ start = time.time()
+
+ while time.time() - start < timeout:
+ try:
+ pod = runpod.get_pod(pod_id)
+
+ if pod and pod.get("desiredStatus") == "RUNNING":
+ runtime = pod.get("runtime", {})
+ ports = runtime.get("ports", [])
+
+ for p in ports:
+ if p.get("privatePort") == 8188:
+ ip = p.get("ip")
+ port = p.get("publicPort")
+
+ if ip and port:
+ _pod_state["ip"] = ip
+ _pod_state["port"] = int(port)
+ _pod_state["status"] = "running"
+ logger.info("Pod ready at %s:%s", ip, port)
+ return
+
+ except Exception as e:
+ logger.debug("Waiting for pod: %s", e)
+
+ await asyncio.sleep(5)
+
+ logger.error("Pod did not become ready within %ds", timeout)
+ _pod_state["status"] = "stopped"
+
+
+@router.post("/stop")
+async def stop_pod():
+ """Stop the GPU pod."""
+ _get_api_key()
+
+ if not _pod_state["pod_id"]:
+ return {"status": "already_stopped"}
+
+ if _pod_state["status"] == "stopping":
+ return {"status": "stopping", "message": "Pod is already stopping"}
+
+ _pod_state["status"] = "stopping"
+
+ try:
+ pod_id = _pod_state["pod_id"]
+ logger.info("Stopping pod: %s", pod_id)
+
+ runpod.terminate_pod(pod_id)
+
+ _pod_state["pod_id"] = None
+ _pod_state["ip"] = None
+ _pod_state["port"] = None
+ _pod_state["status"] = "stopped"
+ _pod_state["started_at"] = None
+
+ logger.info("Pod stopped")
+ return {"status": "stopped", "message": "Pod terminated"}
+
+ except Exception as e:
+ logger.error("Failed to stop pod: %s", e)
+ _pod_state["status"] = "running" # Revert
+ raise HTTPException(500, f"Failed to stop pod: {e}")
+
+
+@router.get("/loras")
+async def list_pod_loras():
+ """List LoRAs available on the pod."""
+ if _pod_state["status"] != "running" or not _pod_state["ip"]:
+ return {"loras": [], "message": "Pod not running"}
+
+ try:
+ import httpx
+ async with httpx.AsyncClient(timeout=30) as client:
+ url = f"http://{_pod_state['ip']}:{_pod_state['port']}/object_info/LoraLoader"
+ resp = await client.get(url)
+ if resp.status_code == 200:
+ data = resp.json()
+ loras = data.get("LoraLoader", {}).get("input", {}).get("required", {}).get("lora_name", [[]])[0]
+ return {"loras": loras if isinstance(loras, list) else []}
+ except Exception as e:
+ logger.warning("Failed to list pod LoRAs: %s", e)
+
+ return {"loras": [], "comfyui_url": f"http://{_pod_state['ip']}:{_pod_state['port']}"}
+
+
+@router.post("/upload-lora")
+async def upload_lora_to_pod(
+ file: UploadFile = File(...),
+):
+ """Upload a LoRA file to the running pod."""
+ from fastapi import UploadFile, File
+ import httpx
+
+ if _pod_state["status"] != "running":
+ raise HTTPException(400, "Pod not running - start it first")
+
+ if not file.filename.endswith(".safetensors"):
+ raise HTTPException(400, "Only .safetensors files supported")
+
+ try:
+ content = await file.read()
+
+ async with httpx.AsyncClient(timeout=120) as client:
+ # Upload to ComfyUI's models/loras directory
+ url = f"http://{_pod_state['ip']}:{_pod_state['port']}/upload/image"
+ files = {"image": (file.filename, content, "application/octet-stream")}
+ data = {"subfolder": "loras", "type": "input"}
+
+ resp = await client.post(url, files=files, data=data)
+
+ if resp.status_code == 200:
+ return {"status": "uploaded", "filename": file.filename}
+ else:
+ raise HTTPException(500, f"Upload failed: {resp.text}")
+
+ except httpx.TimeoutException:
+ raise HTTPException(504, "Upload timed out")
+ except Exception as e:
+ raise HTTPException(500, f"Upload failed: {e}")
+
+
+class PodGenerateRequest(BaseModel):
+ prompt: str
+ negative_prompt: str = ""
+ width: int = 1024
+ height: int = 1024
+ steps: int = 28
+ cfg: float = 3.5
+ seed: int = -1
+ lora_name: str | None = None
+ lora_strength: float = 0.85
+ character_id: str | None = None
+ template_id: str | None = None
+ content_rating: str = "sfw"
+
+
+# In-memory job tracking for pod generation
+_pod_jobs: dict[str, dict] = {}
+
+
+@router.post("/generate")
+async def generate_on_pod(request: PodGenerateRequest):
+ """Generate an image using the running pod's ComfyUI."""
+ import httpx
+ import random
+
+ if _pod_state["status"] != "running":
+ raise HTTPException(400, "Pod not running - start it first")
+
+ job_id = str(uuid.uuid4())[:8]
+ seed = request.seed if request.seed >= 0 else random.randint(0, 2**32 - 1)
+
+ # Build ComfyUI workflow
+ workflow = _build_flux_workflow(
+ prompt=request.prompt,
+ negative_prompt=request.negative_prompt,
+ width=request.width,
+ height=request.height,
+ steps=request.steps,
+ cfg=request.cfg,
+ seed=seed,
+ lora_name=request.lora_name,
+ lora_strength=request.lora_strength,
+ )
+
+ try:
+ async with httpx.AsyncClient(timeout=30) as client:
+ url = f"http://{_pod_state['ip']}:{_pod_state['port']}/prompt"
+ resp = await client.post(url, json={"prompt": workflow})
+ resp.raise_for_status()
+
+ data = resp.json()
+ prompt_id = data["prompt_id"]
+
+ _pod_jobs[job_id] = {
+ "prompt_id": prompt_id,
+ "status": "running",
+ "seed": seed,
+ "started_at": time.time(),
+ }
+
+ logger.info("Pod generation started: %s -> %s", job_id, prompt_id)
+
+ # Start background task to poll for completion
+ asyncio.create_task(_poll_pod_job(job_id, prompt_id, request.content_rating))
+
+ return {
+ "job_id": job_id,
+ "status": "running",
+ "seed": seed,
+ }
+
+ except Exception as e:
+ logger.error("Pod generation failed: %s", e)
+ raise HTTPException(500, f"Generation failed: {e}")
+
+
+async def _poll_pod_job(job_id: str, prompt_id: str, content_rating: str):
+ """Poll ComfyUI for job completion and save the result."""
+ import httpx
+ from pathlib import Path
+
+ start = time.time()
+ timeout = 300 # 5 minutes
+
+ async with httpx.AsyncClient(timeout=60) as client:
+ while time.time() - start < timeout:
+ try:
+ url = f"http://{_pod_state['ip']}:{_pod_state['port']}/history/{prompt_id}"
+ resp = await client.get(url)
+
+ if resp.status_code == 200:
+ data = resp.json()
+ if prompt_id in data:
+ outputs = data[prompt_id].get("outputs", {})
+
+ # Find SaveImage output
+ for node_id, node_output in outputs.items():
+ if "images" in node_output:
+ image_info = node_output["images"][0]
+ filename = image_info["filename"]
+ subfolder = image_info.get("subfolder", "")
+
+ # Download the image
+ img_url = f"http://{_pod_state['ip']}:{_pod_state['port']}/view"
+ params = {"filename": filename}
+ if subfolder:
+ params["subfolder"] = subfolder
+
+ img_resp = await client.get(img_url, params=params)
+ if img_resp.status_code == 200:
+ # Save to local output directory
+ from content_engine.config import settings
+ output_dir = settings.paths.output_dir / "pod" / content_rating / "raw"
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ local_path = output_dir / f"pod_{job_id}.png"
+ local_path.write_bytes(img_resp.content)
+
+ _pod_jobs[job_id]["status"] = "completed"
+ _pod_jobs[job_id]["output_path"] = str(local_path)
+ _pod_jobs[job_id]["completed_at"] = time.time()
+
+ logger.info("Pod generation completed: %s -> %s", job_id, local_path)
+
+ # Catalog the image
+ try:
+ from content_engine.services.catalog import CatalogService
+ catalog = CatalogService()
+ await catalog.add_image(
+ image_path=local_path,
+ content_rating=content_rating,
+ seed=_pod_jobs[job_id].get("seed"),
+ backend="runpod-pod",
+ )
+ except Exception as e:
+ logger.warning("Failed to catalog pod image: %s", e)
+
+ return
+
+ except Exception as e:
+ logger.debug("Polling pod job: %s", e)
+
+ await asyncio.sleep(2)
+
+ _pod_jobs[job_id]["status"] = "failed"
+ _pod_jobs[job_id]["error"] = "Timeout waiting for generation"
+ logger.error("Pod generation timed out: %s", job_id)
+
+
+@router.get("/jobs/{job_id}")
+async def get_pod_job(job_id: str):
+ """Get status of a pod generation job."""
+ job = _pod_jobs.get(job_id)
+ if not job:
+ raise HTTPException(404, "Job not found")
+ return job
+
+
+def _build_flux_workflow(
+ prompt: str,
+ negative_prompt: str,
+ width: int,
+ height: int,
+ steps: int,
+ cfg: float,
+ seed: int,
+ lora_name: str | None,
+ lora_strength: float,
+) -> dict:
+ """Build a ComfyUI workflow for FLUX generation."""
+
+ # Basic FLUX workflow - compatible with ComfyUI FLUX setup
+ workflow = {
+ "4": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {"ckpt_name": "flux1-dev.safetensors"},
+ },
+ "6": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "text": prompt,
+ "clip": ["4", 1],
+ },
+ },
+ "7": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "text": negative_prompt or "",
+ "clip": ["4", 1],
+ },
+ },
+ "5": {
+ "class_type": "EmptyLatentImage",
+ "inputs": {
+ "width": width,
+ "height": height,
+ "batch_size": 1,
+ },
+ },
+ "3": {
+ "class_type": "KSampler",
+ "inputs": {
+ "seed": seed,
+ "steps": steps,
+ "cfg": cfg,
+ "sampler_name": "euler",
+ "scheduler": "simple",
+ "denoise": 1.0,
+ "model": ["4", 0],
+ "positive": ["6", 0],
+ "negative": ["7", 0],
+ "latent_image": ["5", 0],
+ },
+ },
+ "8": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["3", 0],
+ "vae": ["4", 2],
+ },
+ },
+ "9": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "filename_prefix": "flux_pod",
+ "images": ["8", 0],
+ },
+ },
+ }
+
+ # Add LoRA if specified
+ if lora_name:
+ workflow["10"] = {
+ "class_type": "LoraLoader",
+ "inputs": {
+ "lora_name": lora_name,
+ "strength_model": lora_strength,
+ "strength_clip": lora_strength,
+ "model": ["4", 0],
+ "clip": ["4", 1],
+ },
+ }
+ # Rewire sampler to use LoRA output
+ workflow["3"]["inputs"]["model"] = ["10", 0]
+ workflow["6"]["inputs"]["clip"] = ["10", 1]
+ workflow["7"]["inputs"]["clip"] = ["10", 1]
+
+ return workflow
diff --git a/src/content_engine/api/routes_system.py b/src/content_engine/api/routes_system.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ec562f4d9a23ac01628fbe35272fd27762b57b2
--- /dev/null
+++ b/src/content_engine/api/routes_system.py
@@ -0,0 +1,235 @@
+"""System API routes — health checks, status, and configuration."""
+
+from __future__ import annotations
+
+import os
+from pathlib import Path
+
+from fastapi import APIRouter, HTTPException
+from pydantic import BaseModel
+
+from content_engine.models.schemas import SystemStatus
+from content_engine.config import IS_HF_SPACES
+
+router = APIRouter(prefix="/api", tags=["system"])
+
+_comfyui_client = None
+_catalog = None
+_template_engine = None
+_character_profiles = None
+
+
+def init_routes(comfyui_client, catalog, template_engine, character_profiles=None):
+ """Initialize route dependencies."""
+ global _comfyui_client, _catalog, _template_engine, _character_profiles
+ _comfyui_client = comfyui_client
+ _catalog = catalog
+ _template_engine = template_engine
+ _character_profiles = character_profiles
+
+
+@router.get("/health")
+async def health_check():
+ """Basic health check."""
+ comfyui_ok = False
+ if _comfyui_client:
+ comfyui_ok = await _comfyui_client.is_available()
+ return {"status": "ok", "comfyui": comfyui_ok}
+
+
+@router.get("/status", response_model=SystemStatus)
+async def system_status():
+ """Get comprehensive system status."""
+ comfyui_connected = False
+ gpu_name = None
+ vram_total_gb = None
+ vram_free_gb = None
+ queue_depth = 0
+
+ if _comfyui_client:
+ comfyui_connected = await _comfyui_client.is_available()
+ if comfyui_connected:
+ try:
+ stats = await _comfyui_client.get_system_stats()
+ devices = stats.get("devices", [])
+ if devices:
+ gpu_name = devices[0].get("name")
+ vram_total_gb = devices[0].get("vram_total", 0) / (1024**3)
+ vram_free_gb = devices[0].get("vram_free", 0) / (1024**3)
+ queue_depth = await _comfyui_client.get_queue_depth()
+ except Exception:
+ pass
+
+ total_images = 0
+ if _catalog:
+ total_images = await _catalog.get_total_count()
+
+ return SystemStatus(
+ comfyui_connected=comfyui_connected,
+ gpu_name=gpu_name,
+ vram_total_gb=round(vram_total_gb, 2) if vram_total_gb else None,
+ vram_free_gb=round(vram_free_gb, 2) if vram_free_gb else None,
+ local_queue_depth=queue_depth,
+ cloud_available=False, # Phase 4
+ total_images=total_images,
+ pending_jobs=0,
+ )
+
+
+@router.get("/templates")
+async def list_templates():
+ """List all available prompt templates."""
+ if _template_engine is None:
+ return []
+ templates = _template_engine.list_templates()
+ return [
+ {
+ "id": t.id,
+ "name": t.name,
+ "category": t.category,
+ "rating": t.rating,
+ "variables": {
+ name: {
+ "type": vdef.type,
+ "options": vdef.options,
+ "required": vdef.required,
+ }
+ for name, vdef in t.variables.items()
+ },
+ }
+ for t in templates
+ ]
+
+
+@router.get("/characters")
+async def list_characters():
+ """List all configured character profiles."""
+ if _character_profiles is None:
+ return []
+ return [
+ {
+ "id": c.id,
+ "name": c.name,
+ "trigger_word": c.trigger_word,
+ "lora_filename": c.lora_filename,
+ "lora_strength": c.lora_strength,
+ "description": c.description,
+ }
+ for c in _character_profiles.values()
+ ]
+
+
+@router.get("/models/loras")
+async def list_loras():
+ """List available LoRA models from ComfyUI."""
+ if _comfyui_client is None:
+ return []
+ try:
+ return await _comfyui_client.get_models("loras")
+ except Exception:
+ return []
+
+
+@router.get("/models/checkpoints")
+async def list_checkpoints():
+ """List available checkpoint models from ComfyUI."""
+ if _comfyui_client is None:
+ return []
+ try:
+ return await _comfyui_client.get_models("checkpoints")
+ except Exception:
+ return []
+
+
+# --- API Settings ---
+
+class APISettingsResponse(BaseModel):
+ runpod_configured: bool
+ runpod_key_preview: str | None = None
+ wavespeed_configured: bool
+ wavespeed_key_preview: str | None = None
+ is_cloud: bool
+ env_file_path: str | None = None
+
+
+class UpdateAPIKeysRequest(BaseModel):
+ runpod_api_key: str | None = None
+ wavespeed_api_key: str | None = None
+
+
+def _mask_key(key: str | None) -> str | None:
+ """Mask API key showing only last 4 chars."""
+ if not key:
+ return None
+ if len(key) <= 8:
+ return "****"
+ return f"****{key[-4:]}"
+
+
+@router.get("/settings/api", response_model=APISettingsResponse)
+async def get_api_settings():
+ """Get current API settings status (keys are masked)."""
+ runpod_key = os.environ.get("RUNPOD_API_KEY")
+ wavespeed_key = os.environ.get("WAVESPEED_API_KEY")
+
+ env_file = None
+ if not IS_HF_SPACES:
+ env_file = "D:/AI automation/content_engine/.env"
+
+ return APISettingsResponse(
+ runpod_configured=bool(runpod_key),
+ runpod_key_preview=_mask_key(runpod_key),
+ wavespeed_configured=bool(wavespeed_key),
+ wavespeed_key_preview=_mask_key(wavespeed_key),
+ is_cloud=IS_HF_SPACES,
+ env_file_path=env_file,
+ )
+
+
+@router.post("/settings/api")
+async def update_api_settings(request: UpdateAPIKeysRequest):
+ """Update API keys. Only works in local mode (not HF Spaces).
+
+ On HF Spaces, use the Settings > Secrets panel instead.
+ """
+ if IS_HF_SPACES:
+ raise HTTPException(
+ 400,
+ "Cannot update API keys on Hugging Face Spaces. "
+ "Use Settings > Variables and secrets in your Space dashboard."
+ )
+
+ env_path = Path("D:/AI automation/content_engine/.env")
+
+ # Read existing .env
+ existing = {}
+ if env_path.exists():
+ with open(env_path) as f:
+ for line in f:
+ line = line.strip()
+ if line and not line.startswith("#") and "=" in line:
+ key, val = line.split("=", 1)
+ existing[key.strip()] = val.strip()
+
+ # Update keys
+ updated = []
+ if request.runpod_api_key is not None:
+ existing["RUNPOD_API_KEY"] = request.runpod_api_key
+ os.environ["RUNPOD_API_KEY"] = request.runpod_api_key
+ updated.append("RUNPOD_API_KEY")
+
+ if request.wavespeed_api_key is not None:
+ existing["WAVESPEED_API_KEY"] = request.wavespeed_api_key
+ os.environ["WAVESPEED_API_KEY"] = request.wavespeed_api_key
+ updated.append("WAVESPEED_API_KEY")
+
+ # Write back
+ with open(env_path, "w") as f:
+ for key, val in existing.items():
+ f.write(f"{key}={val}\n")
+
+ return {
+ "status": "updated",
+ "updated_keys": updated,
+ "message": "API keys updated. Restart the server to fully apply changes."
+ }
diff --git a/src/content_engine/api/routes_training.py b/src/content_engine/api/routes_training.py
new file mode 100644
index 0000000000000000000000000000000000000000..55cecb6a65d491f43c461d8a626d1758764c1f10
--- /dev/null
+++ b/src/content_engine/api/routes_training.py
@@ -0,0 +1,269 @@
+"""Training API routes — LoRA model training management."""
+
+from __future__ import annotations
+
+import logging
+from pathlib import Path
+
+from fastapi import APIRouter, File, Form, HTTPException, UploadFile
+
+from content_engine.services.lora_trainer import LoRATrainer, TrainingConfig
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/training", tags=["training"])
+
+_trainer: LoRATrainer | None = None
+_runpod_trainer = None # RunPodTrainer | None
+
+
+def init_routes(trainer: LoRATrainer, runpod_trainer=None):
+ global _trainer, _runpod_trainer
+ _trainer = trainer
+ _runpod_trainer = runpod_trainer
+
+
+@router.get("/status")
+async def training_status():
+ """Check if training infrastructure is ready."""
+ if _trainer is None:
+ return {"ready": False, "sd_scripts_installed": False, "runpod_available": False}
+ return {
+ "ready": True,
+ "sd_scripts_installed": _trainer.sd_scripts_installed,
+ "runpod_available": _runpod_trainer is not None and _runpod_trainer.available,
+ }
+
+
+@router.get("/models")
+async def list_training_models():
+ """List available base models for LoRA training with their recommended parameters."""
+ if _runpod_trainer is None:
+ return {"models": {}, "default": "flux2_dev"}
+
+ models = _runpod_trainer.list_training_models()
+ return {
+ "models": models,
+ "default": "flux2_dev", # FLUX 2 recommended for realistic person
+ }
+
+
+@router.get("/gpu-options")
+async def list_gpu_options():
+ """List available RunPod GPU types."""
+ if _runpod_trainer is None:
+ return {"gpus": {}}
+ return {"gpus": _runpod_trainer.list_gpu_options()}
+
+
+@router.post("/install")
+async def install_sd_scripts():
+ """Install Kohya sd-scripts for LoRA training."""
+ if _trainer is None:
+ raise HTTPException(503, "Trainer not initialized")
+ try:
+ msg = await _trainer.install_sd_scripts()
+ return {"status": "ok", "message": msg}
+ except Exception as e:
+ raise HTTPException(500, f"Installation failed: {e}")
+
+
+@router.post("/start")
+async def start_training(
+ images: list[UploadFile] = File(...),
+ name: str = Form(...),
+ trigger_word: str = Form(""),
+ captions_json: str = Form("{}"),
+ base_model: str = Form("flux2_dev"), # Model registry key (flux2_dev, sd15_realistic, sdxl_base)
+ resolution: int | None = Form(None), # None = use model default
+ num_epochs: int = Form(10),
+ max_train_steps: int | None = Form(None), # If set, overrides epochs
+ learning_rate: float | None = Form(None), # None = use model default
+ network_rank: int | None = Form(None), # None = use model default
+ network_alpha: int | None = Form(None), # None = use model default
+ optimizer: str | None = Form(None), # None = use model default
+ train_batch_size: int = Form(1),
+ save_every_n_epochs: int = Form(2),
+ backend: str = Form("runpod"), # Default to runpod for cloud training
+ gpu_type: str = Form("NVIDIA GeForce RTX 4090"),
+):
+ """Start a LoRA training job (local or RunPod cloud).
+
+ Parameters like resolution, learning_rate, network_rank will use model
+ registry defaults if not specified. Use base_model to select the model type.
+ """
+ import json
+
+ if len(images) < 5:
+ raise HTTPException(400, "Need at least 5 training images for reasonable results")
+
+ # Parse captions
+ try:
+ captions = json.loads(captions_json) if captions_json else {}
+ except json.JSONDecodeError:
+ captions = {}
+
+ # Save uploaded images to temp directory
+ import uuid
+ from content_engine.config import settings
+ upload_dir = settings.paths.data_dir / "training_uploads" / str(uuid.uuid4())[:8]
+ upload_dir.mkdir(parents=True, exist_ok=True)
+
+ image_paths = []
+ for img in images:
+ file_path = upload_dir / img.filename
+ content = await img.read()
+ file_path.write_bytes(content)
+ image_paths.append(str(file_path))
+
+ # Write caption .txt file alongside the image
+ caption_text = captions.get(img.filename, trigger_word or "")
+ caption_path = file_path.with_suffix(".txt")
+ caption_path.write_text(caption_text, encoding="utf-8")
+ logger.info("Saved caption for %s: %s", img.filename, caption_text[:80])
+
+ # Route to RunPod cloud trainer
+ if backend == "runpod":
+ if _runpod_trainer is None:
+ raise HTTPException(503, "RunPod not configured — set RUNPOD_API_KEY in .env")
+
+ # Validate model exists
+ model_cfg = _runpod_trainer.get_model_config(base_model)
+ if not model_cfg:
+ available = list(_runpod_trainer.list_training_models().keys())
+ raise HTTPException(400, f"Unknown base model: {base_model}. Available: {available}")
+
+ job_id = await _runpod_trainer.start_training(
+ name=name,
+ image_paths=image_paths,
+ trigger_word=trigger_word,
+ base_model=base_model,
+ resolution=resolution,
+ num_epochs=num_epochs,
+ max_train_steps=max_train_steps,
+ learning_rate=learning_rate,
+ network_rank=network_rank,
+ network_alpha=network_alpha,
+ optimizer=optimizer,
+ save_every_n_epochs=save_every_n_epochs,
+ gpu_type=gpu_type,
+ )
+ job = _runpod_trainer.get_job(job_id)
+ return {
+ "job_id": job_id,
+ "status": job.status if job else "unknown",
+ "name": name,
+ "backend": "runpod",
+ "base_model": base_model,
+ "model_type": model_cfg.get("model_type", "unknown"),
+ }
+
+ # Local training (uses local GPU with Kohya sd-scripts)
+ if _trainer is None:
+ raise HTTPException(503, "Trainer not initialized")
+
+ # For local training, use model registry defaults if available
+ model_cfg = {}
+ if _runpod_trainer:
+ model_cfg = _runpod_trainer.get_model_config(base_model) or {}
+
+ # Resolve local model path
+ local_model_path = model_cfg.get("local_path") if model_cfg else None
+ if not local_model_path:
+ # Fall back to default local path
+ local_model_path = str(settings.paths.checkpoint_dir / "realisticVisionV51_v51VAE.safetensors")
+
+ config = TrainingConfig(
+ name=name,
+ trigger_word=trigger_word,
+ base_model=local_model_path,
+ resolution=resolution or model_cfg.get("resolution", 512),
+ num_epochs=num_epochs,
+ learning_rate=learning_rate or model_cfg.get("learning_rate", 1e-4),
+ network_rank=network_rank or model_cfg.get("network_rank", 32),
+ network_alpha=network_alpha or model_cfg.get("network_alpha", 16),
+ optimizer=optimizer or model_cfg.get("optimizer", "AdamW8bit"),
+ train_batch_size=train_batch_size,
+ save_every_n_epochs=save_every_n_epochs,
+ )
+
+ job_id = await _trainer.start_training(config, image_paths)
+ job = _trainer.get_job(job_id)
+
+ return {
+ "job_id": job_id,
+ "status": job.status if job else "unknown",
+ "name": name,
+ "backend": "local",
+ "base_model": base_model,
+ }
+
+
+@router.get("/jobs")
+async def list_training_jobs():
+ """List all training jobs (local + cloud)."""
+ jobs = []
+ if _trainer:
+ for j in _trainer.list_jobs():
+ jobs.append({
+ "id": j.id, "name": j.name, "status": j.status,
+ "progress": round(j.progress, 3),
+ "current_epoch": j.current_epoch, "total_epochs": j.total_epochs,
+ "current_step": j.current_step, "total_steps": j.total_steps,
+ "loss": j.loss, "started_at": j.started_at,
+ "completed_at": j.completed_at, "output_path": j.output_path,
+ "error": j.error, "backend": "local",
+ })
+ if _runpod_trainer:
+ for j in _runpod_trainer.list_jobs():
+ jobs.append({
+ "id": j.id, "name": j.name, "status": j.status,
+ "progress": round(j.progress, 3),
+ "current_epoch": j.current_epoch, "total_epochs": j.total_epochs,
+ "current_step": j.current_step, "total_steps": j.total_steps,
+ "loss": j.loss, "started_at": j.started_at,
+ "completed_at": j.completed_at, "output_path": j.output_path,
+ "error": j.error, "backend": "runpod",
+ "base_model": j.base_model, "model_type": j.model_type,
+ })
+ return jobs
+
+
+@router.get("/jobs/{job_id}")
+async def get_training_job(job_id: str):
+ """Get details of a specific training job including logs."""
+ if _trainer is None:
+ raise HTTPException(503, "Trainer not initialized")
+ job = _trainer.get_job(job_id)
+ if job is None:
+ raise HTTPException(404, f"Training job not found: {job_id}")
+ return {
+ "id": job.id,
+ "name": job.name,
+ "status": job.status,
+ "progress": round(job.progress, 3),
+ "current_epoch": job.current_epoch,
+ "total_epochs": job.total_epochs,
+ "current_step": job.current_step,
+ "total_steps": job.total_steps,
+ "loss": job.loss,
+ "started_at": job.started_at,
+ "completed_at": job.completed_at,
+ "output_path": job.output_path,
+ "error": job.error,
+ "log_lines": job.log_lines[-50:],
+ }
+
+
+@router.post("/jobs/{job_id}/cancel")
+async def cancel_training_job(job_id: str):
+ """Cancel a running training job (local or cloud)."""
+ if _runpod_trainer and _runpod_trainer.get_job(job_id):
+ cancelled = await _runpod_trainer.cancel_job(job_id)
+ if cancelled:
+ return {"status": "cancelled", "job_id": job_id}
+ if _trainer:
+ cancelled = await _trainer.cancel_job(job_id)
+ if cancelled:
+ return {"status": "cancelled", "job_id": job_id}
+ raise HTTPException(404, "Job not found or not running")
diff --git a/src/content_engine/api/routes_ui.py b/src/content_engine/api/routes_ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..a49bfd6923d71dfe956e685049d68647f9c0c76e
--- /dev/null
+++ b/src/content_engine/api/routes_ui.py
@@ -0,0 +1,23 @@
+"""Web UI route — serves the single-page dashboard."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+from fastapi import APIRouter
+from fastapi.responses import HTMLResponse, Response
+
+router = APIRouter(tags=["ui"])
+
+UI_HTML_PATH = Path(__file__).parent / "ui.html"
+
+
+@router.get("/", response_class=HTMLResponse)
+async def dashboard():
+ """Serve the main dashboard UI."""
+ content = UI_HTML_PATH.read_text(encoding="utf-8")
+ return Response(
+ content=content,
+ media_type="text/html",
+ headers={"Cache-Control": "no-cache, no-store, must-revalidate"},
+ )
diff --git a/src/content_engine/api/routes_video.py b/src/content_engine/api/routes_video.py
new file mode 100644
index 0000000000000000000000000000000000000000..e49ad10e310bcbac9d3e9f82b2c89972f7697f4f
--- /dev/null
+++ b/src/content_engine/api/routes_video.py
@@ -0,0 +1,309 @@
+"""Video generation routes — WAN 2.2 img2video on RunPod pod."""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import os
+import time
+import uuid
+from pathlib import Path
+
+import runpod
+from fastapi import APIRouter, File, Form, HTTPException, UploadFile
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter(prefix="/api/video", tags=["video"])
+
+# Video jobs tracking
+_video_jobs: dict[str, dict] = {}
+
+# Pod state is shared from routes_pod
+def _get_pod_state():
+ from content_engine.api.routes_pod import _pod_state
+ return _pod_state
+
+
+class VideoGenerateRequest(BaseModel):
+ prompt: str
+ negative_prompt: str = ""
+ num_frames: int = 81 # ~3 seconds at 24fps
+ fps: int = 24
+ seed: int = -1
+
+
+@router.post("/generate")
+async def generate_video(
+ image: UploadFile = File(...),
+ prompt: str = Form(...),
+ negative_prompt: str = Form(""),
+ num_frames: int = Form(81),
+ fps: int = Form(24),
+ seed: int = Form(-1),
+):
+ """Generate a video from an image using WAN 2.2 I2V on the RunPod pod."""
+ import httpx
+ import random
+ import base64
+
+ pod_state = _get_pod_state()
+
+ if pod_state["status"] != "running":
+ raise HTTPException(400, "Pod not running - start it first in Status page")
+
+ job_id = str(uuid.uuid4())[:8]
+ seed = seed if seed >= 0 else random.randint(0, 2**32 - 1)
+
+ # Read the image
+ image_bytes = await image.read()
+ image_b64 = base64.b64encode(image_bytes).decode("utf-8")
+
+ # Build ComfyUI workflow for WAN 2.2 I2V
+ workflow = _build_wan_i2v_workflow(
+ image_b64=image_b64,
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_frames=num_frames,
+ fps=fps,
+ seed=seed,
+ )
+
+ try:
+ async with httpx.AsyncClient(timeout=30) as client:
+ # First upload the image to ComfyUI
+ upload_url = f"http://{pod_state['ip']}:{pod_state['port']}/upload/image"
+ files = {"image": (f"input_{job_id}.png", image_bytes, "image/png")}
+ upload_resp = await client.post(upload_url, files=files)
+
+ if upload_resp.status_code != 200:
+ raise HTTPException(500, "Failed to upload image to pod")
+
+ upload_data = upload_resp.json()
+ uploaded_filename = upload_data.get("name", f"input_{job_id}.png")
+
+ # Update workflow with uploaded filename
+ workflow = _build_wan_i2v_workflow(
+ uploaded_filename=uploaded_filename,
+ prompt=prompt,
+ negative_prompt=negative_prompt,
+ num_frames=num_frames,
+ fps=fps,
+ seed=seed,
+ )
+
+ # Submit workflow
+ url = f"http://{pod_state['ip']}:{pod_state['port']}/prompt"
+ resp = await client.post(url, json={"prompt": workflow})
+ resp.raise_for_status()
+
+ data = resp.json()
+ prompt_id = data["prompt_id"]
+
+ _video_jobs[job_id] = {
+ "prompt_id": prompt_id,
+ "status": "running",
+ "seed": seed,
+ "started_at": time.time(),
+ "num_frames": num_frames,
+ "fps": fps,
+ }
+
+ logger.info("Video generation started: %s -> %s", job_id, prompt_id)
+
+ # Start background task to poll for completion
+ asyncio.create_task(_poll_video_job(job_id, prompt_id))
+
+ return {
+ "job_id": job_id,
+ "status": "running",
+ "seed": seed,
+ "estimated_time": f"~{num_frames * 2} seconds",
+ }
+
+ except httpx.HTTPError as e:
+ logger.error("Video generation failed: %s", e)
+ raise HTTPException(500, f"Generation failed: {e}")
+
+
+async def _poll_video_job(job_id: str, prompt_id: str):
+ """Poll ComfyUI for video job completion."""
+ import httpx
+
+ pod_state = _get_pod_state()
+ start = time.time()
+ timeout = 600 # 10 minutes for video
+
+ async with httpx.AsyncClient(timeout=60) as client:
+ while time.time() - start < timeout:
+ try:
+ url = f"http://{pod_state['ip']}:{pod_state['port']}/history/{prompt_id}"
+ resp = await client.get(url)
+
+ if resp.status_code == 200:
+ data = resp.json()
+ if prompt_id in data:
+ outputs = data[prompt_id].get("outputs", {})
+
+ # Find video output (SaveAnimatedWEBP or VHS_VideoCombine)
+ for node_id, node_output in outputs.items():
+ # Check for gifs/videos
+ if "gifs" in node_output:
+ video_info = node_output["gifs"][0]
+ await _download_video(client, job_id, video_info, pod_state)
+ return
+ # Check for images (animated)
+ if "images" in node_output:
+ img_info = node_output["images"][0]
+ if img_info.get("type") == "output":
+ await _download_video(client, job_id, img_info, pod_state)
+ return
+
+ except Exception as e:
+ logger.debug("Polling video job: %s", e)
+
+ await asyncio.sleep(3)
+
+ _video_jobs[job_id]["status"] = "failed"
+ _video_jobs[job_id]["error"] = "Timeout waiting for video generation"
+ logger.error("Video generation timed out: %s", job_id)
+
+
+async def _download_video(client, job_id: str, video_info: dict, pod_state: dict):
+ """Download the generated video from ComfyUI."""
+ filename = video_info.get("filename")
+ subfolder = video_info.get("subfolder", "")
+ file_type = video_info.get("type", "output")
+
+ # Download video
+ view_url = f"http://{pod_state['ip']}:{pod_state['port']}/view"
+ params = {"filename": filename, "type": file_type}
+ if subfolder:
+ params["subfolder"] = subfolder
+
+ video_resp = await client.get(view_url, params=params)
+
+ if video_resp.status_code == 200:
+ # Save to local output directory
+ from content_engine.config import settings
+ output_dir = settings.paths.output_dir / "videos"
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Determine extension
+ ext = Path(filename).suffix or ".webp"
+ local_path = output_dir / f"video_{job_id}{ext}"
+ local_path.write_bytes(video_resp.content)
+
+ _video_jobs[job_id]["status"] = "completed"
+ _video_jobs[job_id]["output_path"] = str(local_path)
+ _video_jobs[job_id]["completed_at"] = time.time()
+ _video_jobs[job_id]["filename"] = local_path.name
+
+ logger.info("Video saved: %s", local_path)
+ else:
+ _video_jobs[job_id]["status"] = "failed"
+ _video_jobs[job_id]["error"] = "Failed to download video"
+
+
+@router.get("/jobs")
+async def list_video_jobs():
+ """List all video generation jobs."""
+ return list(_video_jobs.values())
+
+
+@router.get("/jobs/{job_id}")
+async def get_video_job(job_id: str):
+ """Get status of a video generation job."""
+ job = _video_jobs.get(job_id)
+ if not job:
+ raise HTTPException(404, "Job not found")
+ return job
+
+
+@router.get("/{filename}")
+async def get_video_file(filename: str):
+ """Serve a generated video file."""
+ from fastapi.responses import FileResponse
+ from content_engine.config import settings
+
+ video_path = settings.paths.output_dir / "videos" / filename
+ if not video_path.exists():
+ raise HTTPException(404, "Video not found")
+
+ media_type = "video/webm" if filename.endswith(".webm") else "image/webp"
+ return FileResponse(video_path, media_type=media_type)
+
+
+def _build_wan_i2v_workflow(
+ uploaded_filename: str = None,
+ image_b64: str = None,
+ prompt: str = "",
+ negative_prompt: str = "",
+ num_frames: int = 81,
+ fps: int = 24,
+ seed: int = -1,
+) -> dict:
+ """Build ComfyUI workflow for WAN 2.2 Image-to-Video."""
+
+ # WAN 2.2 I2V workflow
+ # This assumes the WAN 2.2 nodes are installed on the pod
+ workflow = {
+ # Load the input image
+ "1": {
+ "class_type": "LoadImage",
+ "inputs": {
+ "image": uploaded_filename or "input.png",
+ },
+ },
+ # WAN 2.2 model loader
+ "2": {
+ "class_type": "DownloadAndLoadWanModel",
+ "inputs": {
+ "model": "Wan2.2-I2V-14B-480P",
+ },
+ },
+ # Text encoder
+ "3": {
+ "class_type": "WanTextEncode",
+ "inputs": {
+ "prompt": prompt,
+ "negative_prompt": negative_prompt,
+ "wan_model": ["2", 0],
+ },
+ },
+ # Image-to-Video generation
+ "4": {
+ "class_type": "WanImageToVideo",
+ "inputs": {
+ "image": ["1", 0],
+ "wan_model": ["2", 0],
+ "conditioning": ["3", 0],
+ "num_frames": num_frames,
+ "seed": seed,
+ "steps": 30,
+ "cfg": 5.0,
+ },
+ },
+ # Decode to frames
+ "5": {
+ "class_type": "WanDecode",
+ "inputs": {
+ "samples": ["4", 0],
+ "wan_model": ["2", 0],
+ },
+ },
+ # Save as animated WEBP
+ "6": {
+ "class_type": "SaveAnimatedWEBP",
+ "inputs": {
+ "images": ["5", 0],
+ "filename_prefix": "wan_video",
+ "fps": fps,
+ "lossless": False,
+ "quality": 85,
+ },
+ },
+ }
+
+ return workflow
diff --git a/src/content_engine/api/ui.html b/src/content_engine/api/ui.html
new file mode 100644
index 0000000000000000000000000000000000000000..4dfda9d9812667fe2036c0d3a3624c1c1a132b52
--- /dev/null
+++ b/src/content_engine/api/ui.html
@@ -0,0 +1,2919 @@
+
+
+
+
+
+Content Engine
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Mode
+
+
Text to Image
+
Image to Image
+
Image to Video
+
+
+
Backend
+
+
Local GPU
+
RunPod GPU
+
Cloud API
+
+
+
+
+
+
+
+
+
+
+
+ Upload a reference photo and describe what you want. The model preserves identity, face, and pose while applying your prompt.
+
+
+
+
+
+
+ Checking pod status...
+
+
+
+
+
+
+ Select your trained LoRA to generate images of your character. Start the pod in Status page first.
+
+
+
+
+
+
Source Image
+
+
+
Drop image here or click to browse
+
This image will be animated into a video
+
+
+
+
![]()
+
+
Video Settings
+
+
+
+
+
+
+
+
+
+
+
+ Uses WAN 2.2 I2V on RunPod. Longer videos take more time (~2 sec per frame).
+
+
+
+
+
+
Reference Image
+
+
+
Drop image here or click to browse
+
PNG, JPG supported
+
+
+
+
+
+ 0.65
+
+
+
+
Character (optional)
+
+
+
+
Template (optional - preset prompt recipe)
+
+
+
Content Rating
+
+
+
+
+
+
Prompt
+
+
+
+
+
+
+
+
Settings
+
+
+
+
+ 7
+
+
+
+
+
+
+
+
+
+
+
+
+
Generated images will appear here
+
Write a prompt and click Generate
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Train LoRA Model
+
+
+
+
+
Training Jobs
+
+
+
No training jobs yet
+
Upload images and configure settings to start training
+
+
+
+
+
+
+
+
+
System Status
+
+
+
+
RunPod GPU Pod
+
+
+
+
GPU Pod Status
+
+ Checking...
+
+
+
+
+
+
+
+
+
+
+
+
+ Uptime:
+ 0 min
+
+
+ Cost:
+ $0.00
+
+
+
+
+ Start a GPU pod for image generation and LoRA training. Stop when done to save costs.
+
+
+
+
Available Models
+
+
+
Checkpoints
+
Loading...
+
+
+
LoRA Models
+
Loading...
+
+
+
+
Templates
+
+
+
+
+
+
Settings
+
+
+
+
API Keys
+
+ Loading API settings...
+
+
+
+
+
+
+
+
About
+
+
Content Engine - AI Image & Video Generation
+
+ Powered by RunPod GPU pods with FLUX.2 and WAN 2.2 models.
+
+
+
Version: 1.0.0
+
Backend: FastAPI
+
+
+
+
+
+
+
+
+
+
+
+
+
![]()
+
+
+
+
+
+
+
+
Are you sure?
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/content_engine/config.py b/src/content_engine/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0400c5339b92875dca2c84adee2ed3d0a190ca5
--- /dev/null
+++ b/src/content_engine/config.py
@@ -0,0 +1,93 @@
+"""Configuration loader using Pydantic Settings."""
+
+from __future__ import annotations
+
+import os
+from pathlib import Path
+from typing import Any
+
+import yaml
+from pydantic import BaseModel, Field
+from pydantic_settings import BaseSettings
+
+# Detect if running on Hugging Face Spaces
+IS_HF_SPACES = os.environ.get("HF_SPACES") == "1" or os.environ.get("SPACE_ID") is not None
+
+# Base paths - use environment variables or defaults
+if IS_HF_SPACES:
+ BASE_OUTPUT_DIR = Path(os.environ.get("OUTPUT_DIR", "/app/data/output"))
+ BASE_DATA_DIR = Path(os.environ.get("DATA_DIR", "/app/data"))
+ BASE_DB_PATH = os.environ.get("DB_PATH", "/app/data/db/content_engine.db")
+else:
+ BASE_OUTPUT_DIR = Path("D:/AI automation/output")
+ BASE_DATA_DIR = Path("D:/AI automation/data")
+ BASE_DB_PATH = "D:/AI automation/data/catalog.db"
+
+
+class ComfyUIConfig(BaseModel):
+ url: str = "http://127.0.0.1:8188"
+ max_local_queue_depth: int = 3
+ min_vram_gb: float = 2.0
+
+
+class PathsConfig(BaseModel):
+ output_dir: Path = BASE_OUTPUT_DIR
+ data_dir: Path = BASE_DATA_DIR
+ lora_dir: Path = Path("D:/ComfyUI/Models/Lora") if not IS_HF_SPACES else Path("/app/data/loras")
+ checkpoint_dir: Path = Path("D:/ComfyUI/Models/StableDiffusion") if not IS_HF_SPACES else Path("/app/data/models")
+
+
+class DatabaseConfig(BaseModel):
+ url: str = f"sqlite+aiosqlite:///{BASE_DB_PATH}"
+ jobs_url: str = f"sqlite+aiosqlite:///{BASE_DATA_DIR}/jobs.db"
+
+
+class GenerationConfig(BaseModel):
+ default_checkpoint: str = "realisticVisionV51_v51VAE.safetensors"
+ default_steps: int = 28
+ default_cfg: float = 7.0
+ default_sampler: str = "dpmpp_2m"
+ default_scheduler: str = "karras"
+ default_width: int = 832
+ default_height: int = 1216
+
+
+class SchedulingConfig(BaseModel):
+ posts_per_day: int = 3
+ peak_hours: list[int] = Field(default_factory=lambda: [10, 14, 20])
+ sfw_ratio: float = 0.4
+
+
+class CloudProviderEntry(BaseModel):
+ name: str
+ api_key: str = ""
+ priority: int = 1
+
+
+class Settings(BaseSettings):
+ comfyui: ComfyUIConfig = Field(default_factory=ComfyUIConfig)
+ paths: PathsConfig = Field(default_factory=PathsConfig)
+ database: DatabaseConfig = Field(default_factory=DatabaseConfig)
+ generation: GenerationConfig = Field(default_factory=GenerationConfig)
+ scheduling: SchedulingConfig = Field(default_factory=SchedulingConfig)
+ cloud_providers: list[CloudProviderEntry] = Field(default_factory=list)
+
+
+def load_settings(config_path: Path | None = None) -> Settings:
+ """Load settings from YAML config file, with env var overrides."""
+ if config_path is None:
+ if IS_HF_SPACES:
+ config_path = Path("/app/config/settings.yaml")
+ else:
+ config_path = Path("D:/AI automation/content_engine/config/settings.yaml")
+
+ data: dict[str, Any] = {}
+ if config_path.exists():
+ with open(config_path) as f:
+ data = yaml.safe_load(f) or {}
+
+ return Settings(**data)
+
+
+# Global singleton — initialized on import
+settings = load_settings()
diff --git a/src/content_engine/main.py b/src/content_engine/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b13a08f3afb955ee6cc4db41ecd5284b6880102
--- /dev/null
+++ b/src/content_engine/main.py
@@ -0,0 +1,213 @@
+"""Content Engine — FastAPI application entry point.
+
+Run with:
+ cd "D:\AI automation\content_engine"
+ uvicorn content_engine.main:app --host 0.0.0.0 --port 8000 --reload
+
+Or:
+ python -m content_engine.main
+"""
+
+from __future__ import annotations
+
+import logging
+import os
+from contextlib import asynccontextmanager
+from pathlib import Path
+
+import yaml
+from dotenv import load_dotenv
+from fastapi import FastAPI
+
+from content_engine.config import settings
+from content_engine.models.database import init_db
+from content_engine.services.catalog import CatalogService
+from content_engine.services.comfyui_client import ComfyUIClient
+from content_engine.services.template_engine import TemplateEngine
+from content_engine.services.variation_engine import CharacterProfile, VariationEngine
+from content_engine.services.workflow_builder import WorkflowBuilder
+from content_engine.workers.local_worker import LocalWorker
+
+from content_engine.api import routes_catalog, routes_generation, routes_pod, routes_system, routes_training, routes_ui, routes_video
+
+# Load .env file for API keys
+import os
+IS_HF_SPACES = os.environ.get("HF_SPACES") == "1" or os.environ.get("SPACE_ID") is not None
+if IS_HF_SPACES:
+ load_dotenv(Path("/app/.env"))
+else:
+ load_dotenv(Path("D:/AI automation/content_engine/.env"))
+
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s %(levelname)s %(name)s: %(message)s",
+)
+logger = logging.getLogger(__name__)
+
+# Service instances (created at startup)
+comfyui_client: ComfyUIClient | None = None
+
+
+def load_character_profiles() -> dict[str, CharacterProfile]:
+ """Load all character YAML profiles from config/characters/."""
+ if IS_HF_SPACES:
+ characters_dir = Path("/app/config/characters")
+ else:
+ characters_dir = Path("D:/AI automation/content_engine/config/characters")
+ profiles: dict[str, CharacterProfile] = {}
+
+ if not characters_dir.exists():
+ logger.warning("Characters directory not found: %s", characters_dir)
+ return profiles
+
+ for path in characters_dir.glob("*.yaml"):
+ try:
+ with open(path) as f:
+ data = yaml.safe_load(f)
+ profile = CharacterProfile(
+ id=data["id"],
+ name=data.get("name", data["id"]),
+ trigger_word=data["trigger_word"],
+ lora_filename=data["lora_filename"],
+ lora_strength=data.get("lora_strength", 0.85),
+ default_checkpoint=data.get("default_checkpoint"),
+ style_loras=data.get("style_loras", []),
+ description=data.get("description", ""),
+ physical_traits=data.get("physical_traits", {}),
+ )
+ profiles[profile.id] = profile
+ logger.info("Loaded character: %s (%s)", profile.name, profile.id)
+ except Exception:
+ logger.error("Failed to load character %s", path, exc_info=True)
+
+ return profiles
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ """Startup and shutdown lifecycle."""
+ global comfyui_client
+
+ logger.info("Starting Content Engine...")
+
+ # Initialize database
+ await init_db()
+ logger.info("Database initialized")
+
+ # Create service instances
+ comfyui_client = ComfyUIClient(settings.comfyui.url)
+ workflow_builder = WorkflowBuilder()
+ template_engine = TemplateEngine()
+ template_engine.load_all()
+ catalog = CatalogService()
+ character_profiles = load_character_profiles()
+ variation_engine = VariationEngine(template_engine)
+
+ local_worker = LocalWorker(
+ comfyui_client=comfyui_client,
+ workflow_builder=workflow_builder,
+ template_engine=template_engine,
+ catalog=catalog,
+ )
+
+ # Check ComfyUI connection
+ if await comfyui_client.is_available():
+ logger.info("ComfyUI connected at %s", settings.comfyui.url)
+ else:
+ logger.warning(
+ "ComfyUI not available at %s — generation will fail until connected",
+ settings.comfyui.url,
+ )
+
+ # Initialize WaveSpeed cloud provider if API key is set
+ wavespeed_provider = None
+ wavespeed_key = os.environ.get("WAVESPEED_API_KEY")
+ if wavespeed_key:
+ from content_engine.services.cloud_providers.wavespeed_provider import WaveSpeedProvider
+ wavespeed_provider = WaveSpeedProvider(api_key=wavespeed_key)
+ logger.info("WaveSpeed cloud provider initialized (NanoBanana, SeeDream)")
+ else:
+ logger.info("WaveSpeed not configured — cloud generation disabled")
+
+ # Initialize route dependencies
+ routes_generation.init_routes(
+ local_worker, template_engine, variation_engine, character_profiles,
+ wavespeed_provider=wavespeed_provider, catalog=catalog,
+ comfyui_client=comfyui_client,
+ )
+ routes_catalog.init_routes(catalog)
+ routes_system.init_routes(comfyui_client, catalog, template_engine, character_profiles)
+
+ # Initialize LoRA trainer (local)
+ from content_engine.services.lora_trainer import LoRATrainer
+ lora_trainer = LoRATrainer()
+ logger.info("LoRA trainer initialized (sd-scripts %s)",
+ "ready" if lora_trainer.sd_scripts_installed else "not installed — install via UI")
+
+ # Initialize RunPod cloud trainer if API key is set
+ runpod_trainer = None
+ runpod_provider = None
+ runpod_key = os.environ.get("RUNPOD_API_KEY")
+ runpod_endpoint_id = os.environ.get("RUNPOD_ENDPOINT_ID")
+
+ if runpod_key:
+ from content_engine.services.runpod_trainer import RunPodTrainer
+ runpod_trainer = RunPodTrainer(api_key=runpod_key)
+ logger.info("RunPod cloud trainer initialized — cloud LoRA training available")
+
+ # Initialize RunPod generation provider if endpoint ID is set
+ if runpod_endpoint_id:
+ from content_engine.services.cloud_providers.runpod_provider import RunPodProvider
+ runpod_provider = RunPodProvider(api_key=runpod_key, endpoint_id=runpod_endpoint_id)
+ logger.info("RunPod generation provider initialized (endpoint: %s)", runpod_endpoint_id)
+ else:
+ logger.info("RunPod endpoint not configured — set RUNPOD_ENDPOINT_ID for cloud generation")
+ else:
+ logger.info("RunPod not configured — set RUNPOD_API_KEY for cloud training/generation")
+
+ routes_training.init_routes(lora_trainer, runpod_trainer=runpod_trainer)
+
+ # Update generation routes with RunPod provider
+ routes_generation.set_runpod_provider(runpod_provider)
+
+ logger.info(
+ "Content Engine ready — %d templates, %d characters",
+ len(template_engine.list_templates()),
+ len(character_profiles),
+ )
+
+ yield
+
+ # Shutdown
+ if comfyui_client:
+ await comfyui_client.close()
+ logger.info("Content Engine stopped")
+
+
+# Create the FastAPI app
+app = FastAPI(
+ title="Content Engine",
+ description="Automated content generation system using ComfyUI",
+ version="0.1.0",
+ lifespan=lifespan,
+)
+
+# Register route modules
+app.include_router(routes_ui.router) # UI at / (must be first)
+app.include_router(routes_generation.router)
+app.include_router(routes_catalog.router)
+app.include_router(routes_system.router)
+app.include_router(routes_training.router)
+app.include_router(routes_pod.router)
+app.include_router(routes_video.router)
+
+
+if __name__ == "__main__":
+ import uvicorn
+
+ uvicorn.run(
+ "content_engine.main:app",
+ host="0.0.0.0",
+ port=8000,
+ reload=True,
+ )
diff --git a/src/content_engine/models/__init__.py b/src/content_engine/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..57e5a15cc1c4cf90b25f42a1c0940c6ce1144d2d
--- /dev/null
+++ b/src/content_engine/models/__init__.py
@@ -0,0 +1,31 @@
+"""Database models and Pydantic schemas."""
+
+from content_engine.models.database import (
+ Base,
+ Character,
+ GenerationJob,
+ Image,
+ ScheduledPost,
+)
+from content_engine.models.schemas import (
+ BatchRequest,
+ GenerationRequest,
+ GenerationResponse,
+ ImageResponse,
+ JobStatus,
+ SystemStatus,
+)
+
+__all__ = [
+ "Base",
+ "Character",
+ "GenerationJob",
+ "Image",
+ "ScheduledPost",
+ "BatchRequest",
+ "GenerationRequest",
+ "GenerationResponse",
+ "ImageResponse",
+ "JobStatus",
+ "SystemStatus",
+]
diff --git a/src/content_engine/models/__pycache__/__init__.cpython-311.pyc b/src/content_engine/models/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..535b81a836207467285c43a72726490738eac270
Binary files /dev/null and b/src/content_engine/models/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/models/__pycache__/database.cpython-311.pyc b/src/content_engine/models/__pycache__/database.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faf909f0f7d4686634cae9478ed5c9402df73541
Binary files /dev/null and b/src/content_engine/models/__pycache__/database.cpython-311.pyc differ
diff --git a/src/content_engine/models/__pycache__/schemas.cpython-311.pyc b/src/content_engine/models/__pycache__/schemas.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90298a2ed3f0d9bc8b0500c8a32e07a434ed0e50
Binary files /dev/null and b/src/content_engine/models/__pycache__/schemas.cpython-311.pyc differ
diff --git a/src/content_engine/models/database.py b/src/content_engine/models/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..778e85214b6ec308c8d9362d1687d24eddc0de7d
--- /dev/null
+++ b/src/content_engine/models/database.py
@@ -0,0 +1,166 @@
+"""SQLAlchemy database models for the content catalog and job queue."""
+
+from __future__ import annotations
+
+from datetime import datetime
+
+from sqlalchemy import (
+ Boolean,
+ DateTime,
+ Float,
+ Index,
+ Integer,
+ String,
+ Text,
+ func,
+)
+from sqlalchemy.ext.asyncio import AsyncAttrs, async_sessionmaker, create_async_engine
+from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
+
+from content_engine.config import settings
+
+
+class Base(AsyncAttrs, DeclarativeBase):
+ pass
+
+
+class Character(Base):
+ __tablename__ = "characters"
+
+ id: Mapped[str] = mapped_column(String(64), primary_key=True)
+ name: Mapped[str] = mapped_column(String(128), nullable=False)
+ trigger_word: Mapped[str] = mapped_column(String(128), nullable=False)
+ lora_filename: Mapped[str] = mapped_column(String(256), nullable=False)
+ lora_strength: Mapped[float] = mapped_column(Float, default=0.85)
+ default_checkpoint: Mapped[str | None] = mapped_column(String(256))
+ description: Mapped[str | None] = mapped_column(Text)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime, server_default=func.now()
+ )
+
+
+class Image(Base):
+ __tablename__ = "images"
+
+ id: Mapped[str] = mapped_column(String(36), primary_key=True)
+ batch_id: Mapped[str | None] = mapped_column(String(36), index=True)
+ character_id: Mapped[str | None] = mapped_column(String(64), index=True)
+ template_id: Mapped[str | None] = mapped_column(String(128))
+ content_rating: Mapped[str] = mapped_column(String(8), index=True) # sfw | nsfw
+
+ # Generation parameters
+ positive_prompt: Mapped[str | None] = mapped_column(Text)
+ negative_prompt: Mapped[str | None] = mapped_column(Text)
+ checkpoint: Mapped[str | None] = mapped_column(String(256))
+ loras_json: Mapped[str | None] = mapped_column(Text) # JSON array
+ seed: Mapped[int | None] = mapped_column(Integer)
+ steps: Mapped[int | None] = mapped_column(Integer)
+ cfg: Mapped[float | None] = mapped_column(Float)
+ sampler: Mapped[str | None] = mapped_column(String(64))
+ scheduler: Mapped[str | None] = mapped_column(String(64))
+ width: Mapped[int | None] = mapped_column(Integer)
+ height: Mapped[int | None] = mapped_column(Integer)
+
+ # Searchable variation attributes
+ pose: Mapped[str | None] = mapped_column(String(128))
+ outfit: Mapped[str | None] = mapped_column(String(128))
+ emotion: Mapped[str | None] = mapped_column(String(128))
+ camera_angle: Mapped[str | None] = mapped_column(String(128))
+ lighting: Mapped[str | None] = mapped_column(String(128))
+ scene: Mapped[str | None] = mapped_column(String(128))
+
+ # File info
+ file_path: Mapped[str] = mapped_column(String(512), nullable=False)
+ file_hash: Mapped[str | None] = mapped_column(String(64))
+ file_size: Mapped[int | None] = mapped_column(Integer)
+ generation_backend: Mapped[str | None] = mapped_column(String(32)) # local | cloud
+ comfyui_prompt_id: Mapped[str | None] = mapped_column(String(36))
+ generation_time_seconds: Mapped[float | None] = mapped_column(Float)
+
+ # Quality and publishing
+ quality_score: Mapped[float | None] = mapped_column(Float)
+ is_approved: Mapped[bool] = mapped_column(Boolean, default=False)
+ is_published: Mapped[bool] = mapped_column(Boolean, default=False)
+ published_platform: Mapped[str | None] = mapped_column(String(64))
+ published_at: Mapped[datetime | None] = mapped_column(DateTime)
+ scheduled_at: Mapped[datetime | None] = mapped_column(DateTime)
+
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime, server_default=func.now()
+ )
+
+ __table_args__ = (
+ Index("idx_images_approved", "is_approved", postgresql_where=(is_approved == True)), # noqa: E712
+ Index(
+ "idx_images_unpublished",
+ "is_published",
+ "is_approved",
+ ),
+ )
+
+
+class GenerationJob(Base):
+ __tablename__ = "generation_jobs"
+
+ id: Mapped[str] = mapped_column(String(36), primary_key=True)
+ batch_id: Mapped[str | None] = mapped_column(String(36), index=True)
+ character_id: Mapped[str | None] = mapped_column(String(64))
+ template_id: Mapped[str | None] = mapped_column(String(128))
+ content_rating: Mapped[str | None] = mapped_column(String(8))
+ variables_json: Mapped[str | None] = mapped_column(Text)
+ workflow_json: Mapped[str | None] = mapped_column(Text)
+ backend: Mapped[str | None] = mapped_column(String(32)) # local | replicate | runpod
+ status: Mapped[str] = mapped_column(
+ String(16), default="pending", index=True
+ ) # pending | queued | running | completed | failed
+ comfyui_prompt_id: Mapped[str | None] = mapped_column(String(36))
+ cloud_job_id: Mapped[str | None] = mapped_column(String(128))
+ result_image_id: Mapped[str | None] = mapped_column(String(36))
+ error_message: Mapped[str | None] = mapped_column(Text)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime, server_default=func.now()
+ )
+ started_at: Mapped[datetime | None] = mapped_column(DateTime)
+ completed_at: Mapped[datetime | None] = mapped_column(DateTime)
+
+
+class ScheduledPost(Base):
+ __tablename__ = "scheduled_posts"
+
+ id: Mapped[str] = mapped_column(String(36), primary_key=True)
+ image_id: Mapped[str] = mapped_column(String(36), nullable=False)
+ platform: Mapped[str] = mapped_column(String(64), nullable=False)
+ scheduled_at: Mapped[datetime] = mapped_column(DateTime, nullable=False)
+ caption: Mapped[str | None] = mapped_column(Text)
+ is_teaser: Mapped[bool] = mapped_column(Boolean, default=False)
+ status: Mapped[str] = mapped_column(
+ String(16), default="pending"
+ ) # pending | published | failed | cancelled
+ published_at: Mapped[datetime | None] = mapped_column(DateTime)
+ error_message: Mapped[str | None] = mapped_column(Text)
+ created_at: Mapped[datetime] = mapped_column(
+ DateTime, server_default=func.now()
+ )
+
+ __table_args__ = (
+ Index("idx_scheduled_pending", "status", "scheduled_at"),
+ )
+
+
+# --- Engine / Session factories ---
+
+_catalog_engine = create_async_engine(
+ settings.database.url,
+ echo=False,
+ connect_args={"check_same_thread": False}, # SQLite specific
+)
+
+catalog_session_factory = async_sessionmaker(
+ _catalog_engine, expire_on_commit=False
+)
+
+
+async def init_db() -> None:
+ """Create all tables. Call once at startup."""
+ async with _catalog_engine.begin() as conn:
+ await conn.run_sync(Base.metadata.create_all)
diff --git a/src/content_engine/models/schemas.py b/src/content_engine/models/schemas.py
new file mode 100644
index 0000000000000000000000000000000000000000..089f889c38104b4d59c9d7f9ee4fab9d9c27a057
--- /dev/null
+++ b/src/content_engine/models/schemas.py
@@ -0,0 +1,118 @@
+"""Pydantic request/response schemas for the API."""
+
+from __future__ import annotations
+
+from datetime import datetime
+
+from pydantic import BaseModel, Field
+
+
+# --- Request schemas ---
+
+
+class LoRASpec(BaseModel):
+ name: str
+ strength_model: float = 0.85
+ strength_clip: float = 0.85
+
+
+class GenerationRequest(BaseModel):
+ """Single image generation request."""
+
+ character_id: str | None = None
+ template_id: str | None = None
+ content_rating: str = "sfw" # sfw | nsfw
+
+ # Direct prompt override (if not using template)
+ positive_prompt: str | None = None
+ negative_prompt: str | None = None
+
+ # Model configuration
+ checkpoint: str | None = None
+ loras: list[LoRASpec] = Field(default_factory=list)
+
+ # Sampler settings
+ seed: int | None = None
+ steps: int | None = None
+ cfg: float | None = None
+ sampler: str | None = None
+ scheduler: str | None = None
+ width: int | None = None
+ height: int | None = None
+
+ # Variation variables (for template rendering)
+ variables: dict[str, str] = Field(default_factory=dict)
+
+
+class BatchRequest(BaseModel):
+ """Batch generation request."""
+
+ character_id: str
+ template_id: str
+ content_rating: str = "sfw"
+ count: int = 10
+ variation_mode: str = "random" # curated | random | exhaustive
+ pin: dict[str, str] = Field(default_factory=dict)
+ seed_strategy: str = "random" # random | sequential | fixed
+
+
+# --- Response schemas ---
+
+
+class GenerationResponse(BaseModel):
+ job_id: str
+ batch_id: str | None = None
+ status: str
+ backend: str | None = None
+
+
+class JobStatus(BaseModel):
+ job_id: str
+ batch_id: str | None = None
+ status: str # pending | queued | running | completed | failed
+ backend: str | None = None
+ progress: float | None = None # 0.0 - 1.0
+ result_image_id: str | None = None
+ error_message: str | None = None
+ created_at: datetime | None = None
+ started_at: datetime | None = None
+ completed_at: datetime | None = None
+
+
+class ImageResponse(BaseModel):
+ id: str
+ character_id: str | None = None
+ template_id: str | None = None
+ content_rating: str
+ file_path: str
+ seed: int | None = None
+ pose: str | None = None
+ outfit: str | None = None
+ emotion: str | None = None
+ camera_angle: str | None = None
+ lighting: str | None = None
+ scene: str | None = None
+ quality_score: float | None = None
+ is_approved: bool
+ is_published: bool
+ created_at: datetime | None = None
+
+
+class SystemStatus(BaseModel):
+ comfyui_connected: bool
+ gpu_name: str | None = None
+ vram_total_gb: float | None = None
+ vram_free_gb: float | None = None
+ local_queue_depth: int = 0
+ cloud_available: bool = False
+ total_images: int = 0
+ pending_jobs: int = 0
+
+
+class BatchStatusResponse(BaseModel):
+ batch_id: str
+ total_jobs: int
+ completed: int
+ failed: int
+ pending: int
+ running: int
diff --git a/src/content_engine/services/__init__.py b/src/content_engine/services/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..df0b023fb86593d924166952f516f8764e6efb0d
--- /dev/null
+++ b/src/content_engine/services/__init__.py
@@ -0,0 +1 @@
+"""Services layer for the content engine."""
diff --git a/src/content_engine/services/__pycache__/__init__.cpython-311.pyc b/src/content_engine/services/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4448e5f7d1a7e8d57ebff89041d75fb5b78031b9
Binary files /dev/null and b/src/content_engine/services/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/catalog.cpython-311.pyc b/src/content_engine/services/__pycache__/catalog.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d68a853fb246ca769ec5017dbfd8350c3f08a9da
Binary files /dev/null and b/src/content_engine/services/__pycache__/catalog.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/comfyui_client.cpython-311.pyc b/src/content_engine/services/__pycache__/comfyui_client.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be4bb64a51779d3d391c8e360670092268b21219
Binary files /dev/null and b/src/content_engine/services/__pycache__/comfyui_client.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/lora_trainer.cpython-311.pyc b/src/content_engine/services/__pycache__/lora_trainer.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e511fea4e113ca3a08499a59a4b9234fd0dcbfa
Binary files /dev/null and b/src/content_engine/services/__pycache__/lora_trainer.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/runpod_trainer.cpython-311.pyc b/src/content_engine/services/__pycache__/runpod_trainer.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0f11c82322fb7a55cfb8fb7a915cdf2c2309d26
Binary files /dev/null and b/src/content_engine/services/__pycache__/runpod_trainer.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/template_engine.cpython-311.pyc b/src/content_engine/services/__pycache__/template_engine.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c690b9743cf096d27b53c5c5233affc42fbb644
Binary files /dev/null and b/src/content_engine/services/__pycache__/template_engine.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/variation_engine.cpython-311.pyc b/src/content_engine/services/__pycache__/variation_engine.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09b501194dbde983c7eafa4bcc6d9a643aada9a0
Binary files /dev/null and b/src/content_engine/services/__pycache__/variation_engine.cpython-311.pyc differ
diff --git a/src/content_engine/services/__pycache__/workflow_builder.cpython-311.pyc b/src/content_engine/services/__pycache__/workflow_builder.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d136af5fbce052973e3dbe67c96cbaa339d04812
Binary files /dev/null and b/src/content_engine/services/__pycache__/workflow_builder.cpython-311.pyc differ
diff --git a/src/content_engine/services/catalog.py b/src/content_engine/services/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..db74000dd9423de3ce9e2bbcf7b8befacf9a76f1
--- /dev/null
+++ b/src/content_engine/services/catalog.py
@@ -0,0 +1,212 @@
+"""Image catalog service — stores and queries image metadata."""
+
+from __future__ import annotations
+
+import hashlib
+import json
+import logging
+import uuid
+from datetime import datetime
+from pathlib import Path
+from typing import Any
+
+from sqlalchemy import select, func
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from content_engine.config import settings
+from content_engine.models.database import Image, catalog_session_factory
+
+logger = logging.getLogger(__name__)
+
+
+class CatalogService:
+ """Manages the image catalog: inserting, querying, and organizing generated images."""
+
+ async def insert_image(
+ self,
+ *,
+ file_path: str,
+ image_bytes: bytes | None = None,
+ character_id: str | None = None,
+ template_id: str | None = None,
+ content_rating: str = "sfw",
+ batch_id: str | None = None,
+ positive_prompt: str | None = None,
+ negative_prompt: str | None = None,
+ checkpoint: str | None = None,
+ loras: list[dict[str, Any]] | None = None,
+ seed: int | None = None,
+ steps: int | None = None,
+ cfg: float | None = None,
+ sampler: str | None = None,
+ scheduler: str | None = None,
+ width: int | None = None,
+ height: int | None = None,
+ generation_backend: str | None = None,
+ comfyui_prompt_id: str | None = None,
+ generation_time_seconds: float | None = None,
+ variables: dict[str, str] | None = None,
+ ) -> str:
+ """Insert a new image record into the catalog. Returns the image ID."""
+ image_id = str(uuid.uuid4())
+ variables = variables or {}
+
+ # Compute file hash if bytes provided
+ file_hash = None
+ file_size = None
+ if image_bytes:
+ file_hash = hashlib.sha256(image_bytes).hexdigest()
+ file_size = len(image_bytes)
+
+ record = Image(
+ id=image_id,
+ batch_id=batch_id,
+ character_id=character_id,
+ template_id=template_id,
+ content_rating=content_rating,
+ positive_prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ checkpoint=checkpoint,
+ loras_json=json.dumps(loras) if loras else None,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ sampler=sampler,
+ scheduler=scheduler,
+ width=width,
+ height=height,
+ pose=variables.get("pose"),
+ outfit=variables.get("outfit"),
+ emotion=variables.get("emotion"),
+ camera_angle=variables.get("camera_angle"),
+ lighting=variables.get("lighting"),
+ scene=variables.get("scene"),
+ file_path=file_path,
+ file_hash=file_hash,
+ file_size=file_size,
+ generation_backend=generation_backend,
+ comfyui_prompt_id=comfyui_prompt_id,
+ generation_time_seconds=generation_time_seconds,
+ )
+
+ async with catalog_session_factory() as session:
+ session.add(record)
+ await session.commit()
+
+ logger.info("Cataloged image %s at %s", image_id, file_path)
+ return image_id
+
+ async def get_image(self, image_id: str) -> Image | None:
+ """Get a single image by ID."""
+ async with catalog_session_factory() as session:
+ return await session.get(Image, image_id)
+
+ async def search(
+ self,
+ *,
+ character_id: str | None = None,
+ content_rating: str | None = None,
+ template_id: str | None = None,
+ is_approved: bool | None = None,
+ is_published: bool | None = None,
+ pose: str | None = None,
+ outfit: str | None = None,
+ emotion: str | None = None,
+ limit: int = 50,
+ offset: int = 0,
+ ) -> list[Image]:
+ """Search images with filters."""
+ stmt = select(Image)
+
+ if character_id is not None:
+ stmt = stmt.where(Image.character_id == character_id)
+ if content_rating is not None:
+ stmt = stmt.where(Image.content_rating == content_rating)
+ if template_id is not None:
+ stmt = stmt.where(Image.template_id == template_id)
+ if is_approved is not None:
+ stmt = stmt.where(Image.is_approved == is_approved)
+ if is_published is not None:
+ stmt = stmt.where(Image.is_published == is_published)
+ if pose is not None:
+ stmt = stmt.where(Image.pose == pose)
+ if outfit is not None:
+ stmt = stmt.where(Image.outfit == outfit)
+ if emotion is not None:
+ stmt = stmt.where(Image.emotion == emotion)
+
+ stmt = stmt.order_by(Image.created_at.desc()).limit(limit).offset(offset)
+
+ async with catalog_session_factory() as session:
+ result = await session.execute(stmt)
+ return list(result.scalars().all())
+
+ async def approve_image(self, image_id: str) -> bool:
+ """Mark an image as approved for publishing."""
+ async with catalog_session_factory() as session:
+ image = await session.get(Image, image_id)
+ if not image:
+ return False
+ image.is_approved = True
+ await session.commit()
+ return True
+
+ async def delete_image(self, image_id: str) -> bool:
+ """Delete an image record and its file from disk."""
+ async with catalog_session_factory() as session:
+ image = await session.get(Image, image_id)
+ if not image:
+ return False
+ file_path = Path(image.file_path)
+ if file_path.exists():
+ file_path.unlink()
+ await session.delete(image)
+ await session.commit()
+ return True
+
+ async def get_approved_unpublished(
+ self, character_id: str, content_rating: str | None = None, limit: int = 100
+ ) -> list[Image]:
+ """Get approved but unpublished images for a character."""
+ stmt = (
+ select(Image)
+ .where(Image.character_id == character_id)
+ .where(Image.is_approved == True) # noqa: E712
+ .where(Image.is_published == False) # noqa: E712
+ )
+ if content_rating:
+ stmt = stmt.where(Image.content_rating == content_rating)
+ stmt = stmt.order_by(Image.created_at.asc()).limit(limit)
+
+ async with catalog_session_factory() as session:
+ result = await session.execute(stmt)
+ return list(result.scalars().all())
+
+ async def get_total_count(self) -> int:
+ """Get total number of images in catalog."""
+ async with catalog_session_factory() as session:
+ result = await session.execute(select(func.count(Image.id)))
+ return result.scalar() or 0
+
+ def resolve_output_path(
+ self,
+ character_id: str,
+ content_rating: str,
+ filename: str,
+ subfolder: str = "raw",
+ ) -> Path:
+ """Resolve the output file path for a generated image.
+
+ Structure: output/{character_id}/{rating}/{subfolder}/{year-month}/{filename}
+ """
+ now = datetime.now()
+ date_folder = now.strftime("%Y-%m")
+ path = (
+ settings.paths.output_dir
+ / character_id
+ / content_rating
+ / subfolder
+ / date_folder
+ )
+ path.mkdir(parents=True, exist_ok=True)
+ return path / filename
diff --git a/src/content_engine/services/cloud_providers/__init__.py b/src/content_engine/services/cloud_providers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15506f868792e34e9715e62a9bb9e23cdd97e409
--- /dev/null
+++ b/src/content_engine/services/cloud_providers/__init__.py
@@ -0,0 +1 @@
+"""Cloud provider integrations for fallback generation."""
diff --git a/src/content_engine/services/cloud_providers/__pycache__/__init__.cpython-311.pyc b/src/content_engine/services/cloud_providers/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68b70ceb4d63ed2e72c431c11386b5f47d8e6d96
Binary files /dev/null and b/src/content_engine/services/cloud_providers/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/services/cloud_providers/__pycache__/base.cpython-311.pyc b/src/content_engine/services/cloud_providers/__pycache__/base.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..caac7f9069837686cd56ecc4c44b704ade7bfd9b
Binary files /dev/null and b/src/content_engine/services/cloud_providers/__pycache__/base.cpython-311.pyc differ
diff --git a/src/content_engine/services/cloud_providers/__pycache__/wavespeed_provider.cpython-311.pyc b/src/content_engine/services/cloud_providers/__pycache__/wavespeed_provider.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f211e8737cc205fb306715d61efdf06bd9a5041
Binary files /dev/null and b/src/content_engine/services/cloud_providers/__pycache__/wavespeed_provider.cpython-311.pyc differ
diff --git a/src/content_engine/services/cloud_providers/base.py b/src/content_engine/services/cloud_providers/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4ae240c70b9e6b402ba00ed8fc017a24a2ef0d6
--- /dev/null
+++ b/src/content_engine/services/cloud_providers/base.py
@@ -0,0 +1,56 @@
+"""Abstract base class for cloud generation providers."""
+
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+
+
+@dataclass
+class CloudGenerationResult:
+ """Result from a cloud provider generation."""
+
+ job_id: str
+ image_bytes: bytes
+ generation_time_seconds: float
+
+
+class CloudProvider(ABC):
+ """Abstract interface for cloud GPU providers.
+
+ Implement this for each provider (Replicate, RunPod, fal.ai, etc.).
+ """
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ """Provider name (e.g., 'replicate', 'runpod')."""
+
+ @abstractmethod
+ async def submit_generation(
+ self,
+ *,
+ positive_prompt: str,
+ negative_prompt: str,
+ checkpoint: str,
+ lora_name: str | None = None,
+ lora_strength: float = 0.85,
+ seed: int = -1,
+ steps: int = 28,
+ cfg: float = 7.0,
+ width: int = 832,
+ height: int = 1216,
+ ) -> str:
+ """Submit a generation job. Returns a job ID for tracking."""
+
+ @abstractmethod
+ async def check_status(self, job_id: str) -> str:
+ """Check job status. Returns: 'pending', 'running', 'completed', 'failed'."""
+
+ @abstractmethod
+ async def get_result(self, job_id: str) -> CloudGenerationResult:
+ """Download the completed generation result."""
+
+ @abstractmethod
+ async def is_available(self) -> bool:
+ """Check if this provider is configured and reachable."""
diff --git a/src/content_engine/services/cloud_providers/runpod_pod_provider.py b/src/content_engine/services/cloud_providers/runpod_pod_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9829d1d4d65a938cf91d44a2e4383eedd74335e
--- /dev/null
+++ b/src/content_engine/services/cloud_providers/runpod_pod_provider.py
@@ -0,0 +1,413 @@
+"""RunPod Pod-based generation provider.
+
+Spins up a GPU pod with ComfyUI + FLUX.2 on demand, generates images,
+then optionally shuts down. Simpler than serverless (no custom Docker needed).
+
+The pod uses a pre-built ComfyUI image with FLUX.2 support.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import time
+from typing import Any
+
+import httpx
+import runpod
+
+from content_engine.services.cloud_providers.base import CloudGenerationResult, CloudProvider
+
+logger = logging.getLogger(__name__)
+
+# Pre-built ComfyUI template with FLUX support
+COMFYUI_TEMPLATE = "runpod/comfyui:flux" # RunPod's official ComfyUI + FLUX image
+DOCKER_IMAGE = "ghcr.io/ai-dock/comfyui:v2-cuda-12.1.1-base"
+
+# Default GPU for FLUX.2 (needs 24GB VRAM)
+DEFAULT_GPU = "NVIDIA GeForce RTX 4090"
+
+# ComfyUI API port
+COMFYUI_PORT = 8188
+
+
+class RunPodPodProvider(CloudProvider):
+ """Generate images using an on-demand RunPod pod with ComfyUI."""
+
+ def __init__(self, api_key: str, auto_shutdown_minutes: int = 10):
+ self._api_key = api_key
+ runpod.api_key = api_key
+ self._auto_shutdown_minutes = auto_shutdown_minutes
+ self._pod_id: str | None = None
+ self._pod_ip: str | None = None
+ self._pod_port: int | None = None
+ self._last_activity: float = 0
+ self._http = httpx.AsyncClient(timeout=120)
+ self._shutdown_task: asyncio.Task | None = None
+
+ @property
+ def name(self) -> str:
+ return "runpod-pod"
+
+ async def _ensure_pod_running(self) -> tuple[str, int]:
+ """Ensure a ComfyUI pod is running. Returns (ip, port)."""
+ self._last_activity = time.time()
+
+ # Check if existing pod is still running
+ if self._pod_id:
+ try:
+ pod = await asyncio.to_thread(runpod.get_pod, self._pod_id)
+ if pod and pod.get("desiredStatus") == "RUNNING":
+ runtime = pod.get("runtime", {})
+ ports = runtime.get("ports", [])
+ for p in ports:
+ if p.get("privatePort") == COMFYUI_PORT:
+ self._pod_ip = p.get("ip")
+ self._pod_port = p.get("publicPort")
+ if self._pod_ip and self._pod_port:
+ return self._pod_ip, self._pod_port
+ except Exception as e:
+ logger.warning("Failed to check pod status: %s", e)
+ self._pod_id = None
+
+ # Create new pod
+ logger.info("Starting ComfyUI pod with FLUX.2...")
+
+ pod = await asyncio.to_thread(
+ runpod.create_pod,
+ name="content-engine-comfyui",
+ image_name=DOCKER_IMAGE,
+ gpu_type_id=DEFAULT_GPU,
+ volume_in_gb=50,
+ container_disk_in_gb=20,
+ ports=f"{COMFYUI_PORT}/http",
+ env={
+ "PROVISIONING_SCRIPT": "https://raw.githubusercontent.com/ai-dock/comfyui/main/config/provisioning/flux.sh",
+ },
+ )
+
+ self._pod_id = pod["id"]
+ logger.info("Pod created: %s", self._pod_id)
+
+ # Wait for pod to be ready
+ ip, port = await self._wait_for_pod_ready()
+ self._pod_ip = ip
+ self._pod_port = port
+
+ # Wait for ComfyUI to be responsive
+ await self._wait_for_comfyui(ip, port)
+
+ # Schedule auto-shutdown
+ self._schedule_shutdown()
+
+ return ip, port
+
+ async def _wait_for_pod_ready(self, timeout: int = 300) -> tuple[str, int]:
+ """Wait for pod to be running and return ComfyUI endpoint."""
+ start = time.time()
+
+ while time.time() - start < timeout:
+ try:
+ pod = await asyncio.to_thread(runpod.get_pod, self._pod_id)
+
+ if pod.get("desiredStatus") == "RUNNING":
+ runtime = pod.get("runtime", {})
+ ports = runtime.get("ports", [])
+
+ for p in ports:
+ if p.get("privatePort") == COMFYUI_PORT:
+ ip = p.get("ip")
+ port = p.get("publicPort")
+ if ip and port:
+ logger.info("Pod ready at %s:%s", ip, port)
+ return ip, int(port)
+
+ except Exception as e:
+ logger.debug("Waiting for pod: %s", e)
+
+ await asyncio.sleep(5)
+
+ raise TimeoutError(f"Pod did not become ready within {timeout}s")
+
+ async def _wait_for_comfyui(self, ip: str, port: int, timeout: int = 300):
+ """Wait for ComfyUI API to be responsive."""
+ start = time.time()
+ url = f"http://{ip}:{port}/system_stats"
+
+ while time.time() - start < timeout:
+ try:
+ resp = await self._http.get(url)
+ if resp.status_code == 200:
+ logger.info("ComfyUI is ready!")
+ return
+ except Exception:
+ pass
+
+ await asyncio.sleep(5)
+ logger.info("Waiting for ComfyUI to start...")
+
+ raise TimeoutError("ComfyUI did not become ready")
+
+ def _schedule_shutdown(self):
+ """Schedule auto-shutdown after idle period."""
+ if self._shutdown_task:
+ self._shutdown_task.cancel()
+
+ async def shutdown_if_idle():
+ while True:
+ await asyncio.sleep(60) # Check every minute
+ idle_time = time.time() - self._last_activity
+ if idle_time > self._auto_shutdown_minutes * 60:
+ logger.info("Auto-shutting down idle pod...")
+ await self.shutdown_pod()
+ break
+
+ self._shutdown_task = asyncio.create_task(shutdown_if_idle())
+
+ async def shutdown_pod(self):
+ """Manually shut down the pod."""
+ if self._pod_id:
+ try:
+ await asyncio.to_thread(runpod.stop_pod, self._pod_id)
+ logger.info("Pod stopped: %s", self._pod_id)
+ except Exception as e:
+ logger.warning("Failed to stop pod: %s", e)
+ self._pod_id = None
+ self._pod_ip = None
+ self._pod_port = None
+
+ async def submit_generation(
+ self,
+ *,
+ positive_prompt: str,
+ negative_prompt: str = "",
+ checkpoint: str = "flux1-dev.safetensors",
+ lora_name: str | None = None,
+ lora_strength: float = 0.85,
+ seed: int = -1,
+ steps: int = 28,
+ cfg: float = 3.5,
+ width: int = 1024,
+ height: int = 1024,
+ ) -> str:
+ """Submit generation to ComfyUI on the pod."""
+
+ ip, port = await self._ensure_pod_running()
+ self._last_activity = time.time()
+
+ # Build ComfyUI workflow for FLUX
+ workflow = self._build_flux_workflow(
+ prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ width=width,
+ height=height,
+ steps=steps,
+ cfg=cfg,
+ seed=seed,
+ lora_name=lora_name,
+ lora_strength=lora_strength,
+ )
+
+ # Submit to ComfyUI
+ url = f"http://{ip}:{port}/prompt"
+ resp = await self._http.post(url, json={"prompt": workflow})
+ resp.raise_for_status()
+
+ data = resp.json()
+ prompt_id = data["prompt_id"]
+
+ logger.info("ComfyUI job submitted: %s", prompt_id)
+ return prompt_id
+
+ def _build_flux_workflow(
+ self,
+ prompt: str,
+ negative_prompt: str,
+ width: int,
+ height: int,
+ steps: int,
+ cfg: float,
+ seed: int,
+ lora_name: str | None,
+ lora_strength: float,
+ ) -> dict:
+ """Build a ComfyUI workflow for FLUX generation."""
+ import random
+
+ if seed < 0:
+ seed = random.randint(0, 2**32 - 1)
+
+ # Basic FLUX workflow
+ workflow = {
+ "3": {
+ "class_type": "CheckpointLoaderSimple",
+ "inputs": {"ckpt_name": "flux1-dev.safetensors"},
+ },
+ "6": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "text": prompt,
+ "clip": ["3", 1],
+ },
+ },
+ "7": {
+ "class_type": "CLIPTextEncode",
+ "inputs": {
+ "text": negative_prompt or "",
+ "clip": ["3", 1],
+ },
+ },
+ "5": {
+ "class_type": "EmptyLatentImage",
+ "inputs": {
+ "width": width,
+ "height": height,
+ "batch_size": 1,
+ },
+ },
+ "10": {
+ "class_type": "KSampler",
+ "inputs": {
+ "seed": seed,
+ "steps": steps,
+ "cfg": cfg,
+ "sampler_name": "euler",
+ "scheduler": "simple",
+ "denoise": 1.0,
+ "model": ["3", 0],
+ "positive": ["6", 0],
+ "negative": ["7", 0],
+ "latent_image": ["5", 0],
+ },
+ },
+ "8": {
+ "class_type": "VAEDecode",
+ "inputs": {
+ "samples": ["10", 0],
+ "vae": ["3", 2],
+ },
+ },
+ "9": {
+ "class_type": "SaveImage",
+ "inputs": {
+ "filename_prefix": "flux_gen",
+ "images": ["8", 0],
+ },
+ },
+ }
+
+ # Add LoRA if specified
+ if lora_name:
+ workflow["4"] = {
+ "class_type": "LoraLoader",
+ "inputs": {
+ "lora_name": lora_name,
+ "strength_model": lora_strength,
+ "strength_clip": lora_strength,
+ "model": ["3", 0],
+ "clip": ["3", 1],
+ },
+ }
+ # Rewire sampler to use LoRA output
+ workflow["10"]["inputs"]["model"] = ["4", 0]
+ workflow["6"]["inputs"]["clip"] = ["4", 1]
+ workflow["7"]["inputs"]["clip"] = ["4", 1]
+
+ return workflow
+
+ async def check_status(self, job_id: str) -> str:
+ """Check ComfyUI job status."""
+ if not self._pod_ip or not self._pod_port:
+ return "failed"
+
+ try:
+ url = f"http://{self._pod_ip}:{self._pod_port}/history/{job_id}"
+ resp = await self._http.get(url)
+
+ if resp.status_code == 200:
+ data = resp.json()
+ if job_id in data:
+ outputs = data[job_id].get("outputs", {})
+ if outputs:
+ return "completed"
+ status = data[job_id].get("status", {})
+ if status.get("completed"):
+ return "completed"
+ if status.get("status_str") == "error":
+ return "failed"
+ return "running"
+
+ return "pending"
+
+ except Exception as e:
+ logger.error("Status check failed: %s", e)
+ return "running"
+
+ async def get_result(self, job_id: str) -> CloudGenerationResult:
+ """Get the generated image from ComfyUI."""
+ if not self._pod_ip or not self._pod_port:
+ raise RuntimeError("Pod not running")
+
+ # Get history to find output filename
+ url = f"http://{self._pod_ip}:{self._pod_port}/history/{job_id}"
+ resp = await self._http.get(url)
+ resp.raise_for_status()
+
+ data = resp.json()
+ job_data = data.get(job_id, {})
+ outputs = job_data.get("outputs", {})
+
+ # Find the SaveImage output
+ for node_id, node_output in outputs.items():
+ if "images" in node_output:
+ image_info = node_output["images"][0]
+ filename = image_info["filename"]
+ subfolder = image_info.get("subfolder", "")
+
+ # Download the image
+ img_url = f"http://{self._pod_ip}:{self._pod_port}/view"
+ params = {"filename": filename}
+ if subfolder:
+ params["subfolder"] = subfolder
+
+ img_resp = await self._http.get(img_url, params=params)
+ img_resp.raise_for_status()
+
+ return CloudGenerationResult(
+ job_id=job_id,
+ image_bytes=img_resp.content,
+ generation_time_seconds=0, # TODO: track actual time
+ )
+
+ raise RuntimeError(f"No image output found for job {job_id}")
+
+ async def wait_for_completion(
+ self,
+ job_id: str,
+ timeout: int = 300,
+ poll_interval: float = 2.0,
+ ) -> CloudGenerationResult:
+ """Wait for job completion."""
+ start = time.time()
+
+ while time.time() - start < timeout:
+ status = await self.check_status(job_id)
+
+ if status == "completed":
+ return await self.get_result(job_id)
+ elif status == "failed":
+ raise RuntimeError(f"ComfyUI job {job_id} failed")
+
+ await asyncio.sleep(poll_interval)
+
+ raise TimeoutError(f"Job {job_id} timed out after {timeout}s")
+
+ async def is_available(self) -> bool:
+ """Check if RunPod API is accessible."""
+ return bool(self._api_key)
+
+ async def close(self):
+ """Cleanup."""
+ if self._shutdown_task:
+ self._shutdown_task.cancel()
+ await self._http.aclose()
diff --git a/src/content_engine/services/cloud_providers/runpod_provider.py b/src/content_engine/services/cloud_providers/runpod_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..ded8e0ea67ba5d728e91e5a0f70f7171c30ccc81
--- /dev/null
+++ b/src/content_engine/services/cloud_providers/runpod_provider.py
@@ -0,0 +1,247 @@
+"""RunPod serverless generation provider.
+
+Uses RunPod's serverless GPU endpoints for image generation.
+Requires a pre-deployed endpoint with ComfyUI or an SD model.
+
+Setup:
+ 1. Deploy a serverless endpoint on RunPod with your model
+ 2. Set RUNPOD_API_KEY and RUNPOD_ENDPOINT_ID in .env
+"""
+
+from __future__ import annotations
+
+import asyncio
+import base64
+import logging
+import time
+from typing import Any
+
+import httpx
+import runpod
+
+from content_engine.services.cloud_providers.base import CloudGenerationResult, CloudProvider
+
+logger = logging.getLogger(__name__)
+
+# Default timeout for generation (seconds)
+GENERATION_TIMEOUT = 300
+
+
+class RunPodProvider(CloudProvider):
+ """Cloud provider using RunPod serverless endpoints for image generation."""
+
+ def __init__(self, api_key: str, endpoint_id: str):
+ self._api_key = api_key
+ self._endpoint_id = endpoint_id
+ runpod.api_key = api_key
+ self._endpoint = runpod.Endpoint(endpoint_id)
+ self._jobs: dict[str, dict[str, Any]] = {}
+ self._http = httpx.AsyncClient(timeout=60)
+
+ @property
+ def name(self) -> str:
+ return "runpod"
+
+ async def submit_generation(
+ self,
+ *,
+ positive_prompt: str,
+ negative_prompt: str,
+ checkpoint: str,
+ lora_name: str | None = None,
+ lora_strength: float = 0.85,
+ seed: int = -1,
+ steps: int = 28,
+ cfg: float = 7.0,
+ width: int = 832,
+ height: int = 1216,
+ ) -> str:
+ """Submit a generation job to RunPod serverless.
+
+ Returns a job ID for tracking.
+ """
+ # Build input payload for the serverless worker
+ # This assumes a ComfyUI or SD worker that accepts these parameters
+ payload = {
+ "input": {
+ "prompt": positive_prompt,
+ "negative_prompt": negative_prompt,
+ "checkpoint": checkpoint,
+ "width": width,
+ "height": height,
+ "steps": steps,
+ "cfg_scale": cfg,
+ "seed": seed,
+ }
+ }
+
+ # Add LoRA if specified
+ if lora_name:
+ payload["input"]["lora"] = {
+ "name": lora_name,
+ "strength": lora_strength,
+ }
+
+ start_time = time.time()
+
+ try:
+ # Submit async job
+ run_request = await asyncio.to_thread(
+ self._endpoint.run,
+ payload["input"]
+ )
+
+ job_id = run_request.job_id
+ self._jobs[job_id] = {
+ "request": run_request,
+ "start_time": start_time,
+ "status": "pending",
+ }
+
+ logger.info("RunPod job submitted: %s", job_id)
+ return job_id
+
+ except Exception as e:
+ logger.error("RunPod submit failed: %s", e)
+ raise RuntimeError(f"Failed to submit to RunPod: {e}")
+
+ async def check_status(self, job_id: str) -> str:
+ """Check job status. Returns: 'pending', 'running', 'completed', 'failed'."""
+ job_info = self._jobs.get(job_id)
+ if not job_info:
+ return "failed"
+
+ try:
+ run_request = job_info["request"]
+ status = await asyncio.to_thread(run_request.status)
+
+ # Map RunPod statuses to our standard statuses
+ status_map = {
+ "IN_QUEUE": "pending",
+ "IN_PROGRESS": "running",
+ "COMPLETED": "completed",
+ "FAILED": "failed",
+ "CANCELLED": "failed",
+ "TIMED_OUT": "failed",
+ }
+
+ normalized = status_map.get(status, "running")
+ job_info["status"] = normalized
+ return normalized
+
+ except Exception as e:
+ logger.error("Status check failed for %s: %s", job_id, e)
+ return "failed"
+
+ async def get_result(self, job_id: str) -> CloudGenerationResult:
+ """Download the completed generation result."""
+ job_info = self._jobs.get(job_id)
+ if not job_info:
+ raise RuntimeError(f"Job not found: {job_id}")
+
+ try:
+ run_request = job_info["request"]
+ start_time = job_info["start_time"]
+
+ # Get output (blocks until complete or timeout)
+ output = await asyncio.to_thread(run_request.output)
+
+ generation_time = time.time() - start_time
+
+ # Parse output - format depends on worker implementation
+ # Common formats:
+ # 1. {"image_url": "data:image/png;base64,..."}
+ # 2. {"images": ["base64..."]}
+ # 3. {"output": [{"image": "base64..."}]}
+
+ image_bytes = self._extract_image_from_output(output)
+
+ # Cleanup
+ self._jobs.pop(job_id, None)
+
+ return CloudGenerationResult(
+ job_id=job_id,
+ image_bytes=image_bytes,
+ generation_time_seconds=generation_time,
+ )
+
+ except Exception as e:
+ logger.error("Failed to get result for %s: %s", job_id, e)
+ raise RuntimeError(f"Failed to get RunPod result: {e}")
+
+ def _extract_image_from_output(self, output: Any) -> bytes:
+ """Extract image bytes from various output formats."""
+ if isinstance(output, dict):
+ # Format: {"image_url": "data:image/png;base64,..."}
+ if "image_url" in output:
+ return self._decode_data_url(output["image_url"])
+
+ # Format: {"image": "base64..."}
+ if "image" in output:
+ return base64.b64decode(output["image"])
+
+ # Format: {"images": ["base64..."]}
+ if "images" in output and output["images"]:
+ return base64.b64decode(output["images"][0])
+
+ # Format: {"output": {"image": "..."}}
+ if "output" in output:
+ return self._extract_image_from_output(output["output"])
+
+ elif isinstance(output, list) and output:
+ # Format: [{"image_url": "..."}]
+ return self._extract_image_from_output(output[0])
+
+ elif isinstance(output, str):
+ # Direct base64 string or data URL
+ if output.startswith("data:image"):
+ return self._decode_data_url(output)
+ return base64.b64decode(output)
+
+ raise ValueError(f"Could not extract image from output: {type(output)}")
+
+ def _decode_data_url(self, data_url: str) -> bytes:
+ """Decode a data:image/xxx;base64,... URL to bytes."""
+ if "," in data_url:
+ _, base64_data = data_url.split(",", 1)
+ return base64.b64decode(base64_data)
+ return base64.b64decode(data_url)
+
+ async def is_available(self) -> bool:
+ """Check if RunPod is configured and reachable."""
+ if not self._api_key or not self._endpoint_id:
+ return False
+
+ try:
+ # Try to check endpoint health
+ # RunPod SDK doesn't have a direct health check, so we verify the API key works
+ runpod.api_key = self._api_key
+ # This is a lightweight check - just verify we can make API calls
+ return True
+ except Exception:
+ return False
+
+ async def wait_for_completion(
+ self,
+ job_id: str,
+ timeout: int = GENERATION_TIMEOUT,
+ poll_interval: float = 2.0,
+ ) -> CloudGenerationResult:
+ """Wait for job completion and return result."""
+ start = time.time()
+
+ while time.time() - start < timeout:
+ status = await self.check_status(job_id)
+
+ if status == "completed":
+ return await self.get_result(job_id)
+ elif status == "failed":
+ raise RuntimeError(f"RunPod job {job_id} failed")
+
+ await asyncio.sleep(poll_interval)
+
+ raise TimeoutError(f"RunPod job {job_id} timed out after {timeout}s")
+
+ async def close(self):
+ """Close HTTP client."""
+ await self._http.aclose()
diff --git a/src/content_engine/services/cloud_providers/wavespeed_provider.py b/src/content_engine/services/cloud_providers/wavespeed_provider.py
new file mode 100644
index 0000000000000000000000000000000000000000..9810d0f65b7dc287fafd94f4d57853b10ca539a3
--- /dev/null
+++ b/src/content_engine/services/cloud_providers/wavespeed_provider.py
@@ -0,0 +1,427 @@
+"""WaveSpeed.ai cloud provider — integrates NanoBanana, SeeDream and other models.
+
+WaveSpeed provides fast cloud inference for text-to-image and image editing models
+including Google NanoBanana and ByteDance SeeDream series.
+
+Text-to-image models:
+ - google-nano-banana-text-to-image
+ - google-nano-banana-pro-text-to-image
+ - bytedance-seedream-v3 / v3.1 / v4 / v4.5
+
+Image editing models (accept reference images):
+ - bytedance-seedream-v4.5-edit
+ - bytedance-seedream-v4-edit
+ - google-nano-banana-edit
+ - google-nano-banana-pro-edit
+
+SDK: pip install wavespeed
+Docs: https://wavespeed.ai/docs
+"""
+
+from __future__ import annotations
+
+import base64
+import logging
+import time
+import uuid
+from typing import Any
+
+import httpx
+from wavespeed import Client as WaveSpeedClient
+
+from content_engine.services.cloud_providers.base import CloudGenerationResult, CloudProvider
+
+logger = logging.getLogger(__name__)
+
+# Map friendly names to WaveSpeed model IDs (text-to-image)
+MODEL_MAP = {
+ # NanoBanana
+ "nano-banana": "google-nano-banana-text-to-image",
+ "nano-banana-pro": "google-nano-banana-pro-text-to-image",
+ # SeeDream
+ "seedream-3": "bytedance-seedream-v3",
+ "seedream-3.1": "bytedance-seedream-v3.1",
+ "seedream-4": "bytedance-seedream-v4",
+ "seedream-4.5": "bytedance-seedream-v4.5",
+ # Default
+ "default": "bytedance-seedream-v4.5",
+}
+
+# Map friendly names to WaveSpeed edit model API paths
+EDIT_MODEL_MAP = {
+ "seedream-4.5-edit": "bytedance/seedream-v4.5/edit",
+ "seedream-4-edit": "bytedance/seedream-v4/edit",
+ "nano-banana-edit": "google/nano-banana/edit",
+ "nano-banana-pro-edit": "google/nano-banana-pro/edit",
+ # Default edit model
+ "default": "bytedance/seedream-v4.5/edit",
+}
+
+WAVESPEED_API_BASE = "https://api.wavespeed.ai/api/v3"
+
+
+class WaveSpeedProvider(CloudProvider):
+ """Cloud provider using WaveSpeed.ai for NanoBanana and SeeDream models."""
+
+ def __init__(self, api_key: str):
+ self._api_key = api_key
+ self._client = WaveSpeedClient(api_key=api_key)
+ self._http_client = httpx.AsyncClient(timeout=300)
+
+ @property
+ def name(self) -> str:
+ return "wavespeed"
+
+ def _resolve_model(self, model_name: str | None) -> str:
+ """Resolve a friendly model name to a WaveSpeed model ID."""
+ if model_name and model_name in MODEL_MAP:
+ return MODEL_MAP[model_name]
+ if model_name:
+ return model_name
+ return MODEL_MAP["default"]
+
+ def _resolve_edit_model(self, model_name: str | None) -> str:
+ """Resolve a friendly name to a WaveSpeed edit model API path."""
+ if model_name and model_name in EDIT_MODEL_MAP:
+ return EDIT_MODEL_MAP[model_name]
+ if model_name:
+ return model_name
+ return EDIT_MODEL_MAP["default"]
+
+ @staticmethod
+ def _ensure_min_image_size(image_bytes: bytes, min_pixels: int = 3686400) -> bytes:
+ """Upscale image if total pixel count is below the minimum required by the API.
+
+ WaveSpeed edit APIs require images to be at least 3686400 pixels (~1920x1920).
+ Uses Lanczos resampling for quality.
+ """
+ import io
+ from PIL import Image
+
+ img = Image.open(io.BytesIO(image_bytes))
+ w, h = img.size
+ current_pixels = w * h
+
+ if current_pixels >= min_pixels:
+ return image_bytes
+
+ # Scale up proportionally to meet minimum
+ scale = (min_pixels / current_pixels) ** 0.5
+ new_w = int(w * scale) + 1 # +1 to ensure we exceed minimum
+ new_h = int(h * scale) + 1
+ logger.info("Upscaling image from %dx%d (%d px) to %dx%d (%d px) for API minimum",
+ w, h, current_pixels, new_w, new_h, new_w * new_h)
+ img = img.resize((new_w, new_h), Image.LANCZOS)
+
+ buf = io.BytesIO()
+ img.save(buf, format="PNG")
+ return buf.getvalue()
+
+ async def _upload_temp_image(self, image_bytes: bytes) -> str:
+ """Upload image to a temporary public host and return the URL.
+
+ Uses catbox.moe (anonymous, no account needed, 1hr expiry for temp).
+ Falls back to base64 data URI if upload fails.
+ """
+ try:
+ # Try catbox.moe litterbox (temporary file hosting, 1h expiry)
+ import aiohttp
+ async with aiohttp.ClientSession() as session:
+ data = aiohttp.FormData()
+ data.add_field("reqtype", "fileupload")
+ data.add_field("time", "1h")
+ data.add_field(
+ "fileToUpload",
+ image_bytes,
+ filename="ref_image.png",
+ content_type="image/png",
+ )
+ async with session.post(
+ "https://litterbox.catbox.moe/resources/internals/api.php",
+ data=data,
+ ) as resp:
+ if resp.status == 200:
+ url = (await resp.text()).strip()
+ if url.startswith("http"):
+ logger.info("Uploaded temp image: %s", url)
+ return url
+ except Exception as e:
+ logger.warning("Catbox upload failed: %s", e)
+
+ # Fallback: try imgbb (free, no key needed for anonymous uploads)
+ try:
+ b64 = base64.b64encode(image_bytes).decode()
+ resp = await self._http_client.post(
+ "https://api.imgbb.com/1/upload",
+ data={"image": b64, "expiration": 3600},
+ params={"key": ""}, # Anonymous upload
+ )
+ if resp.status_code == 200:
+ url = resp.json()["data"]["url"]
+ logger.info("Uploaded temp image to imgbb: %s", url)
+ return url
+ except Exception as e:
+ logger.warning("imgbb upload failed: %s", e)
+
+ # Last resort: use 0x0.st
+ try:
+ import aiohttp
+ async with aiohttp.ClientSession() as session:
+ data = aiohttp.FormData()
+ data.add_field(
+ "file",
+ image_bytes,
+ filename="ref_image.png",
+ content_type="image/png",
+ )
+ async with session.post("https://0x0.st", data=data) as resp:
+ if resp.status == 200:
+ url = (await resp.text()).strip()
+ if url.startswith("http"):
+ logger.info("Uploaded temp image to 0x0.st: %s", url)
+ return url
+ except Exception as e:
+ logger.warning("0x0.st upload failed: %s", e)
+
+ raise RuntimeError(
+ "Failed to upload reference image to a public host. "
+ "WaveSpeed edit APIs require publicly accessible image URLs."
+ )
+
+ async def submit_generation(
+ self,
+ *,
+ positive_prompt: str,
+ negative_prompt: str = "",
+ checkpoint: str = "",
+ lora_name: str | None = None,
+ lora_strength: float = 0.85,
+ seed: int = -1,
+ steps: int = 28,
+ cfg: float = 7.0,
+ width: int = 832,
+ height: int = 1216,
+ model: str | None = None,
+ ) -> str:
+ """Submit a generation job to WaveSpeed. Returns a job ID."""
+ wavespeed_model = self._resolve_model(model)
+
+ payload: dict[str, Any] = {
+ "prompt": positive_prompt,
+ "output_format": "png",
+ }
+
+ if negative_prompt:
+ payload["negative_prompt"] = negative_prompt
+
+ payload["width"] = width
+ payload["height"] = height
+
+ if seed >= 0:
+ payload["seed"] = seed
+
+ if lora_name:
+ payload["loras"] = [{"path": lora_name, "scale": lora_strength}]
+
+ logger.info("Submitting to WaveSpeed model=%s", wavespeed_model)
+
+ try:
+ output = self._client.run(
+ wavespeed_model,
+ payload,
+ timeout=300.0,
+ poll_interval=2.0,
+ )
+ job_id = str(uuid.uuid4())
+ self._last_result = {
+ "job_id": job_id,
+ "output": output,
+ "timestamp": time.time(),
+ }
+ return job_id
+
+ except Exception as e:
+ logger.error("WaveSpeed generation failed: %s", e)
+ raise
+
+ async def submit_edit(
+ self,
+ *,
+ prompt: str,
+ image_urls: list[str],
+ model: str | None = None,
+ size: str | None = None,
+ ) -> str:
+ """Submit an image editing job to WaveSpeed. Returns a job ID.
+
+ Uses the SeeDream Edit or NanoBanana Edit APIs which accept reference
+ images and apply prompt-guided transformations while preserving identity.
+ """
+ edit_model_path = self._resolve_edit_model(model)
+ endpoint = f"{WAVESPEED_API_BASE}/{edit_model_path}"
+
+ payload: dict[str, Any] = {
+ "prompt": prompt,
+ "images": image_urls,
+ "enable_sync_mode": True,
+ "output_format": "png",
+ }
+
+ if size:
+ payload["size"] = size
+
+ logger.info("Submitting edit to WaveSpeed model=%s images=%d", edit_model_path, len(image_urls))
+
+ try:
+ resp = await self._http_client.post(
+ endpoint,
+ json=payload,
+ headers={
+ "Authorization": f"Bearer {self._api_key}",
+ "Content-Type": "application/json",
+ },
+ )
+ resp.raise_for_status()
+ result_data = resp.json()
+
+ job_id = str(uuid.uuid4())
+ self._last_result = {
+ "job_id": job_id,
+ "output": result_data,
+ "timestamp": time.time(),
+ }
+ return job_id
+
+ except httpx.HTTPStatusError as e:
+ body = e.response.text
+ logger.error("WaveSpeed edit failed (HTTP %d): %s", e.response.status_code, body[:500])
+ raise RuntimeError(f"WaveSpeed edit API error: {body[:200]}") from e
+ except Exception as e:
+ logger.error("WaveSpeed edit failed: %s", e)
+ raise
+
+ async def edit_image(
+ self,
+ *,
+ prompt: str,
+ image_bytes: bytes,
+ model: str | None = None,
+ size: str | None = None,
+ ) -> CloudGenerationResult:
+ """Full edit flow: upload image to temp host, call edit API, download result."""
+ start = time.time()
+
+ # WaveSpeed edit APIs require minimum image size (3686400 pixels = ~1920x1920)
+ # Auto-upscale small images to meet the requirement
+ image_bytes = self._ensure_min_image_size(image_bytes, min_pixels=3686400)
+
+ # Upload reference image to a public URL
+ image_url = await self._upload_temp_image(image_bytes)
+
+ # Submit edit job
+ job_id = await self.submit_edit(
+ prompt=prompt,
+ image_urls=[image_url],
+ model=model,
+ size=size,
+ )
+
+ # Get result (already cached by submit_edit with sync mode)
+ return await self.get_result(job_id)
+
+ async def check_status(self, job_id: str) -> str:
+ """Check job status. WaveSpeed SDK polls internally, so completed jobs are immediate."""
+ if hasattr(self, '_last_result') and self._last_result.get("job_id") == job_id:
+ return "completed"
+ return "unknown"
+
+ async def get_result(self, job_id: str) -> CloudGenerationResult:
+ """Get the generation result including image bytes."""
+ if not hasattr(self, '_last_result') or self._last_result.get("job_id") != job_id:
+ raise RuntimeError(f"No cached result for job {job_id}")
+
+ output = self._last_result["output"]
+ elapsed = time.time() - self._last_result["timestamp"]
+
+ # Extract image URL from output — handle various response shapes
+ image_url = None
+ if isinstance(output, dict):
+ # Check for failed status (API may return 200 with status:failed inside)
+ data = output.get("data", output)
+ if data.get("status") == "failed":
+ error_msg = data.get("error", "Unknown error")
+ raise RuntimeError(f"WaveSpeed generation failed: {error_msg}")
+
+ # Direct API response: {"data": {"outputs": [url, ...]}}
+ outputs = data.get("outputs", [])
+ if outputs:
+ image_url = outputs[0]
+ elif "output" in data:
+ out = data["output"]
+ if isinstance(out, list) and out:
+ image_url = out[0]
+ elif isinstance(out, str):
+ image_url = out
+ elif isinstance(output, list) and output:
+ image_url = output[0]
+ elif isinstance(output, str):
+ image_url = output
+
+ if not image_url:
+ raise RuntimeError(f"No image URL in WaveSpeed output: {output}")
+
+ # Download the image
+ logger.info("Downloading from WaveSpeed: %s", image_url[:80])
+ response = await self._http_client.get(image_url)
+ response.raise_for_status()
+
+ return CloudGenerationResult(
+ job_id=job_id,
+ image_bytes=response.content,
+ generation_time_seconds=elapsed,
+ )
+
+ async def generate(
+ self,
+ *,
+ positive_prompt: str,
+ negative_prompt: str = "",
+ model: str | None = None,
+ width: int = 1024,
+ height: int = 1024,
+ seed: int = -1,
+ lora_name: str | None = None,
+ lora_strength: float = 0.85,
+ ) -> CloudGenerationResult:
+ """Convenience method: submit + get result in one call."""
+ job_id = await self.submit_generation(
+ positive_prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ model=model,
+ width=width,
+ height=height,
+ seed=seed,
+ lora_name=lora_name,
+ lora_strength=lora_strength,
+ )
+ return await self.get_result(job_id)
+
+ async def is_available(self) -> bool:
+ """Check if WaveSpeed API is reachable with valid credentials."""
+ try:
+ test = self._client.run(
+ "wavespeed-ai/z-image/turbo",
+ {"prompt": "test"},
+ enable_sync_mode=True,
+ timeout=10.0,
+ )
+ return True
+ except Exception:
+ try:
+ resp = await self._http_client.get(
+ "https://api.wavespeed.ai/api/v3/health",
+ headers={"Authorization": f"Bearer {self._api_key}"},
+ )
+ return resp.status_code < 500
+ except Exception:
+ return False
diff --git a/src/content_engine/services/comfyui_client.py b/src/content_engine/services/comfyui_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2c12206bb94ed42233c81235e46856081355558
--- /dev/null
+++ b/src/content_engine/services/comfyui_client.py
@@ -0,0 +1,227 @@
+"""Async ComfyUI API client using aiohttp for HTTP and WebSocket communication.
+
+Based on the pattern from ComfyUI's own websockets_api_example.py.
+Communicates with ComfyUI at http://127.0.0.1:8188.
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import uuid
+from dataclasses import dataclass, field
+from typing import Any
+
+import aiohttp
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class ComfyUIResult:
+ """Result from a completed ComfyUI generation."""
+
+ prompt_id: str
+ outputs: dict[str, Any] = field(default_factory=dict)
+ images: list[ImageOutput] = field(default_factory=list)
+
+
+@dataclass
+class ImageOutput:
+ """A single output image from ComfyUI."""
+
+ filename: str
+ subfolder: str
+ type: str # "output" or "temp"
+
+
+class ComfyUIError(Exception):
+ """Raised when ComfyUI returns an error."""
+
+
+class ComfyUIClient:
+ """Async client for the ComfyUI API.
+
+ Usage:
+ client = ComfyUIClient("http://127.0.0.1:8188")
+ result = await client.generate(workflow_dict)
+ image_bytes = await client.download_image(result.images[0])
+ """
+
+ def __init__(self, base_url: str = "http://127.0.0.1:8188"):
+ self.base_url = base_url.rstrip("/")
+ self.client_id = str(uuid.uuid4())
+ self._session: aiohttp.ClientSession | None = None
+
+ async def _get_session(self) -> aiohttp.ClientSession:
+ if self._session is None or self._session.closed:
+ self._session = aiohttp.ClientSession()
+ return self._session
+
+ async def close(self) -> None:
+ if self._session and not self._session.closed:
+ await self._session.close()
+
+ # --- Core generation ---
+
+ async def queue_prompt(self, workflow: dict) -> str:
+ """Submit a workflow to ComfyUI. Returns the prompt_id."""
+ prompt_id = str(uuid.uuid4())
+ payload = {
+ "prompt": workflow,
+ "client_id": self.client_id,
+ "prompt_id": prompt_id,
+ }
+ session = await self._get_session()
+ async with session.post(f"{self.base_url}/prompt", json=payload) as resp:
+ if resp.status != 200:
+ body = await resp.text()
+ raise ComfyUIError(f"Prompt rejected (HTTP {resp.status}): {body}")
+ data = await resp.json()
+ return data.get("prompt_id", prompt_id)
+
+ async def wait_for_completion(
+ self, prompt_id: str, timeout: float = 600
+ ) -> ComfyUIResult:
+ """Wait for a prompt to finish via WebSocket, then fetch results."""
+ ws_host = self.base_url.replace("http://", "").replace("https://", "")
+ ws_url = f"ws://{ws_host}/ws?clientId={self.client_id}"
+
+ session = await self._get_session()
+ try:
+ async with asyncio.timeout(timeout):
+ async with session.ws_connect(ws_url) as ws:
+ async for msg in ws:
+ if msg.type == aiohttp.WSMsgType.TEXT:
+ data = json.loads(msg.data)
+ if data.get("type") == "executing":
+ exec_data = data.get("data", {})
+ if (
+ exec_data.get("node") is None
+ and exec_data.get("prompt_id") == prompt_id
+ ):
+ break
+ # Binary messages are latent previews — skip
+ except TimeoutError:
+ raise ComfyUIError(
+ f"Timeout waiting for prompt {prompt_id} after {timeout}s"
+ )
+
+ return await self._fetch_result(prompt_id)
+
+ async def generate(self, workflow: dict, timeout: float = 600) -> ComfyUIResult:
+ """Submit workflow and wait for completion. Returns the result."""
+ prompt_id = await self.queue_prompt(workflow)
+ logger.info("Queued prompt %s", prompt_id)
+ return await self.wait_for_completion(prompt_id, timeout)
+
+ # --- Result fetching ---
+
+ async def _fetch_result(self, prompt_id: str) -> ComfyUIResult:
+ """Fetch history for a completed prompt and extract image outputs."""
+ history = await self.get_history(prompt_id)
+ prompt_history = history.get(prompt_id, {})
+ outputs = prompt_history.get("outputs", {})
+
+ images: list[ImageOutput] = []
+ for _node_id, node_output in outputs.items():
+ for img_info in node_output.get("images", []):
+ images.append(
+ ImageOutput(
+ filename=img_info["filename"],
+ subfolder=img_info.get("subfolder", ""),
+ type=img_info.get("type", "output"),
+ )
+ )
+
+ return ComfyUIResult(
+ prompt_id=prompt_id,
+ outputs=outputs,
+ images=images,
+ )
+
+ async def download_image(self, image: ImageOutput) -> bytes:
+ """Download an output image from ComfyUI."""
+ params = {
+ "filename": image.filename,
+ "subfolder": image.subfolder,
+ "type": image.type,
+ }
+ session = await self._get_session()
+ async with session.get(f"{self.base_url}/view", params=params) as resp:
+ if resp.status != 200:
+ raise ComfyUIError(f"Failed to download image: HTTP {resp.status}")
+ return await resp.read()
+
+ # --- Monitoring ---
+
+ async def get_history(self, prompt_id: str) -> dict:
+ """Get execution history for a prompt."""
+ session = await self._get_session()
+ async with session.get(f"{self.base_url}/history/{prompt_id}") as resp:
+ return await resp.json()
+
+ async def get_system_stats(self) -> dict:
+ """Get system stats including GPU VRAM info."""
+ session = await self._get_session()
+ async with session.get(f"{self.base_url}/system_stats") as resp:
+ return await resp.json()
+
+ async def get_queue_info(self) -> dict:
+ """Get current queue state (running + pending)."""
+ session = await self._get_session()
+ async with session.get(f"{self.base_url}/prompt") as resp:
+ return await resp.json()
+
+ async def get_queue_depth(self) -> int:
+ """Get number of pending items in the queue."""
+ info = await self.get_queue_info()
+ return len(info.get("queue_pending", []))
+
+ async def get_vram_free_gb(self) -> float | None:
+ """Get free VRAM in GB, or None if unavailable."""
+ try:
+ stats = await self.get_system_stats()
+ devices = stats.get("devices", [])
+ if devices:
+ return devices[0].get("vram_free", 0) / (1024**3)
+ except Exception:
+ logger.warning("Failed to get VRAM stats", exc_info=True)
+ return None
+
+ async def is_available(self) -> bool:
+ """Check if ComfyUI is reachable."""
+ try:
+ session = await self._get_session()
+ async with session.get(
+ f"{self.base_url}/system_stats", timeout=aiohttp.ClientTimeout(total=5)
+ ) as resp:
+ return resp.status == 200
+ except Exception:
+ return False
+
+ async def upload_image(
+ self, image_bytes: bytes, filename: str, overwrite: bool = True
+ ) -> str:
+ """Upload an image to ComfyUI's input directory. Returns the stored filename."""
+ session = await self._get_session()
+ data = aiohttp.FormData()
+ data.add_field(
+ "image", image_bytes, filename=filename, content_type="image/png"
+ )
+ data.add_field("overwrite", str(overwrite).lower())
+ async with session.post(f"{self.base_url}/upload/image", data=data) as resp:
+ if resp.status != 200:
+ body = await resp.text()
+ raise ComfyUIError(f"Image upload failed (HTTP {resp.status}): {body}")
+ result = await resp.json()
+ return result.get("name", filename)
+
+ async def get_models(self, folder: str = "loras") -> list[str]:
+ """List available models in a folder (loras, checkpoints, etc.)."""
+ session = await self._get_session()
+ async with session.get(f"{self.base_url}/models/{folder}") as resp:
+ if resp.status == 200:
+ return await resp.json()
+ return []
diff --git a/src/content_engine/services/lora_trainer.py b/src/content_engine/services/lora_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..22c404dab6568533f4ffbcc574a99c37be23a004
--- /dev/null
+++ b/src/content_engine/services/lora_trainer.py
@@ -0,0 +1,353 @@
+"""LoRA training service — train custom LoRA models from reference images.
+
+Wraps Kohya's sd-scripts for LoRA training with sensible defaults for
+character LoRAs on SD 1.5 / RealisticVision. Manages the full pipeline:
+dataset preparation, config generation, training launch, and output handling.
+
+Requirements (installed automatically on first use):
+ - kohya sd-scripts (cloned from GitHub)
+ - accelerate, lion-pytorch, prodigy-optimizer
+"""
+
+from __future__ import annotations
+
+import asyncio
+import json
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import time
+import uuid
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+IS_HF_SPACES = os.environ.get("HF_SPACES") == "1" or os.environ.get("SPACE_ID") is not None
+
+if IS_HF_SPACES:
+ TRAINING_BASE_DIR = Path("/app/data/training")
+ LORA_OUTPUT_DIR = Path("/app/data/loras")
+else:
+ TRAINING_BASE_DIR = Path("D:/AI automation/content_engine/training")
+ LORA_OUTPUT_DIR = Path("D:/ComfyUI/Models/Lora")
+
+SD_SCRIPTS_DIR = TRAINING_BASE_DIR / "sd-scripts"
+
+
+def _default_base_model() -> str:
+ """Get default base model path based on environment."""
+ if IS_HF_SPACES:
+ return "/app/models/realisticVisionV51_v51VAE.safetensors"
+ return "D:/ComfyUI/Models/StableDiffusion/realisticVisionV51_v51VAE.safetensors"
+
+
+@dataclass
+class TrainingConfig:
+ """Configuration for a LoRA training job."""
+
+ name: str
+ base_model: str = "" # Set in __post_init__
+ resolution: int = 512
+ train_batch_size: int = 1
+ num_epochs: int = 10
+ learning_rate: float = 1e-4
+ network_rank: int = 32 # LoRA rank (dim)
+ network_alpha: int = 16
+ optimizer: str = "AdamW8bit" # AdamW8bit, Lion, Prodigy
+ lr_scheduler: str = "cosine_with_restarts"
+ max_train_steps: int | None = None # If set, overrides epochs
+ save_every_n_epochs: int = 2
+ clip_skip: int = 1
+ mixed_precision: str = "fp16"
+ seed: int = 42
+ caption_extension: str = ".txt"
+ trigger_word: str = ""
+ extra_args: dict[str, Any] = field(default_factory=dict)
+
+ def __post_init__(self):
+ if not self.base_model:
+ self.base_model = _default_base_model()
+
+
+@dataclass
+class TrainingJob:
+ """Tracks state of a running or completed training job."""
+
+ id: str
+ name: str
+ config: TrainingConfig
+ status: str = "pending" # pending, preparing, training, completed, failed
+ progress: float = 0.0
+ current_epoch: int = 0
+ total_epochs: int = 0
+ current_step: int = 0
+ total_steps: int = 0
+ loss: float | None = None
+ started_at: float | None = None
+ completed_at: float | None = None
+ output_path: str | None = None
+ error: str | None = None
+ log_lines: list[str] = field(default_factory=list)
+
+
+class LoRATrainer:
+ """Manages LoRA training jobs using Kohya sd-scripts."""
+
+ def __init__(self):
+ self._jobs: dict[str, TrainingJob] = {}
+ self._processes: dict[str, asyncio.subprocess.Process] = {}
+ TRAINING_BASE_DIR.mkdir(parents=True, exist_ok=True)
+
+ @property
+ def sd_scripts_installed(self) -> bool:
+ return (SD_SCRIPTS_DIR / "train_network.py").exists()
+
+ async def install_sd_scripts(self) -> str:
+ """Clone and set up Kohya sd-scripts. Returns status message."""
+ if self.sd_scripts_installed:
+ return "sd-scripts already installed"
+
+ SD_SCRIPTS_DIR.parent.mkdir(parents=True, exist_ok=True)
+
+ logger.info("Cloning kohya sd-scripts...")
+ proc = await asyncio.create_subprocess_exec(
+ "git", "clone", "--depth", "1",
+ "https://github.com/kohya-ss/sd-scripts.git",
+ str(SD_SCRIPTS_DIR),
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ stdout, stderr = await proc.communicate()
+ if proc.returncode != 0:
+ raise RuntimeError(f"Failed to clone sd-scripts: {stderr.decode()}")
+
+ # Install requirements
+ logger.info("Installing sd-scripts requirements...")
+ proc = await asyncio.create_subprocess_exec(
+ sys.executable, "-m", "pip", "install",
+ "accelerate", "lion-pytorch", "prodigy-optimizer",
+ "safetensors", "diffusers", "transformers",
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.PIPE,
+ )
+ await proc.communicate()
+
+ logger.info("sd-scripts installation complete")
+ return "sd-scripts installed successfully"
+
+ def prepare_dataset(self, job_id: str, image_paths: list[str], trigger_word: str = "") -> Path:
+ """Prepare a training dataset directory with proper structure.
+
+ Creates: training/{job_id}/dataset/{num_repeats}_{trigger_word}/
+ Each image gets a caption file with the trigger word.
+ """
+ dataset_dir = TRAINING_BASE_DIR / job_id / "dataset"
+ # Convention: {repeats}_{concept_name}
+ repeats = 10
+ concept_dir = dataset_dir / f"{repeats}_{trigger_word or 'character'}"
+ concept_dir.mkdir(parents=True, exist_ok=True)
+
+ for img_path in image_paths:
+ src = Path(img_path)
+ if not src.exists():
+ logger.warning("Image not found: %s", img_path)
+ continue
+ dst = concept_dir / src.name
+ shutil.copy2(src, dst)
+
+ # Create caption file
+ caption_file = dst.with_suffix(".txt")
+ caption_file.write_text(trigger_word or "")
+
+ return dataset_dir
+
+ async def start_training(self, config: TrainingConfig, image_paths: list[str]) -> str:
+ """Start a LoRA training job. Returns the job ID."""
+ job_id = str(uuid.uuid4())[:8]
+
+ if not self.sd_scripts_installed:
+ await self.install_sd_scripts()
+
+ job = TrainingJob(
+ id=job_id,
+ name=config.name,
+ config=config,
+ status="preparing",
+ total_epochs=config.num_epochs,
+ )
+ self._jobs[job_id] = job
+
+ # Prepare dataset
+ try:
+ dataset_dir = self.prepare_dataset(job_id, image_paths, config.trigger_word)
+ except Exception as e:
+ job.status = "failed"
+ job.error = f"Dataset preparation failed: {e}"
+ return job_id
+
+ # Create output directory
+ output_dir = TRAINING_BASE_DIR / job_id / "output"
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Build training command
+ cmd = self._build_training_command(config, dataset_dir, output_dir)
+ job.log_lines.append(f"Command: {' '.join(cmd)}")
+
+ # Launch training process
+ job.status = "training"
+ job.started_at = time.time()
+
+ asyncio.create_task(self._run_training(job_id, cmd, output_dir, config))
+
+ return job_id
+
+ def _build_training_command(
+ self, config: TrainingConfig, dataset_dir: Path, output_dir: Path
+ ) -> list[str]:
+ """Build the training command for Kohya sd-scripts."""
+ cmd = [
+ sys.executable,
+ str(SD_SCRIPTS_DIR / "train_network.py"),
+ f"--pretrained_model_name_or_path={config.base_model}",
+ f"--train_data_dir={dataset_dir}",
+ f"--output_dir={output_dir}",
+ f"--output_name={config.name}",
+ f"--resolution={config.resolution}",
+ f"--train_batch_size={config.train_batch_size}",
+ f"--max_train_epochs={config.num_epochs}",
+ f"--learning_rate={config.learning_rate}",
+ f"--network_module=networks.lora",
+ f"--network_dim={config.network_rank}",
+ f"--network_alpha={config.network_alpha}",
+ f"--optimizer_type={config.optimizer}",
+ f"--lr_scheduler={config.lr_scheduler}",
+ f"--save_every_n_epochs={config.save_every_n_epochs}",
+ f"--clip_skip={config.clip_skip}",
+ f"--mixed_precision={config.mixed_precision}",
+ f"--seed={config.seed}",
+ f"--caption_extension={config.caption_extension}",
+ "--cache_latents",
+ "--enable_bucket",
+ "--xformers",
+ "--save_model_as=safetensors",
+ ]
+
+ if config.max_train_steps:
+ cmd.append(f"--max_train_steps={config.max_train_steps}")
+
+ return cmd
+
+ async def _run_training(
+ self, job_id: str, cmd: list[str], output_dir: Path, config: TrainingConfig
+ ):
+ """Run the training process and monitor progress."""
+ job = self._jobs[job_id]
+ try:
+ proc = await asyncio.create_subprocess_exec(
+ *cmd,
+ stdout=asyncio.subprocess.PIPE,
+ stderr=asyncio.subprocess.STDOUT,
+ cwd=str(SD_SCRIPTS_DIR),
+ )
+ self._processes[job_id] = proc
+
+ # Read output lines and parse progress
+ async for line_bytes in proc.stdout:
+ line = line_bytes.decode("utf-8", errors="replace").strip()
+ if not line:
+ continue
+ job.log_lines.append(line)
+ # Keep last 200 lines
+ if len(job.log_lines) > 200:
+ job.log_lines = job.log_lines[-200:]
+
+ # Parse progress from Kohya output
+ if "epoch" in line.lower() and "/" in line:
+ try:
+ # Look for patterns like "epoch 3/10"
+ parts = line.lower().split("epoch")
+ if len(parts) > 1:
+ ep_part = parts[1].strip().split()[0]
+ if "/" in ep_part:
+ current, total = ep_part.split("/")
+ job.current_epoch = int(current)
+ job.total_epochs = int(total)
+ job.progress = job.current_epoch / max(job.total_epochs, 1)
+ except (ValueError, IndexError):
+ pass
+
+ if "loss=" in line or "loss:" in line:
+ try:
+ loss_str = line.split("loss")[1].strip("=: ").split()[0].strip(",")
+ job.loss = float(loss_str)
+ except (ValueError, IndexError):
+ pass
+
+ if "steps:" in line.lower() or "step " in line.lower():
+ try:
+ import re
+ step_match = re.search(r"(\d+)/(\d+)", line)
+ if step_match:
+ job.current_step = int(step_match.group(1))
+ job.total_steps = int(step_match.group(2))
+ if job.total_steps > 0:
+ job.progress = job.current_step / job.total_steps
+ except (ValueError, IndexError):
+ pass
+
+ await proc.wait()
+
+ if proc.returncode == 0:
+ job.status = "completed"
+ job.progress = 1.0
+ job.completed_at = time.time()
+
+ # Find the output LoRA file and copy to ComfyUI
+ lora_file = output_dir / f"{config.name}.safetensors"
+ if lora_file.exists():
+ dest = LORA_OUTPUT_DIR / lora_file.name
+ LORA_OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(lora_file, dest)
+ job.output_path = str(dest)
+ logger.info("Training complete! LoRA saved to %s", dest)
+ else:
+ # Check for epoch-saved versions
+ for f in sorted(output_dir.glob("*.safetensors")):
+ dest = LORA_OUTPUT_DIR / f.name
+ shutil.copy2(f, dest)
+ job.output_path = str(dest)
+ logger.info("Training complete! Output in %s", output_dir)
+ else:
+ job.status = "failed"
+ job.error = f"Training process exited with code {proc.returncode}"
+ logger.error("Training failed: %s", job.error)
+
+ except Exception as e:
+ job.status = "failed"
+ job.error = str(e)
+ logger.error("Training error: %s", e, exc_info=True)
+ finally:
+ self._processes.pop(job_id, None)
+
+ def get_job(self, job_id: str) -> TrainingJob | None:
+ return self._jobs.get(job_id)
+
+ def list_jobs(self) -> list[TrainingJob]:
+ return list(self._jobs.values())
+
+ async def cancel_job(self, job_id: str) -> bool:
+ """Cancel a running training job."""
+ proc = self._processes.get(job_id)
+ if proc:
+ proc.terminate()
+ self._processes.pop(job_id, None)
+ job = self._jobs.get(job_id)
+ if job:
+ job.status = "failed"
+ job.error = "Cancelled by user"
+ return True
+ return False
diff --git a/src/content_engine/services/publisher/__init__.py b/src/content_engine/services/publisher/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b56840554da759b076cda7d83a16466cede9f1f
--- /dev/null
+++ b/src/content_engine/services/publisher/__init__.py
@@ -0,0 +1 @@
+"""Publishing integrations for content platforms."""
diff --git a/src/content_engine/services/publisher/base.py b/src/content_engine/services/publisher/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..263905014f48864be41204e7825b7f56ff940c96
--- /dev/null
+++ b/src/content_engine/services/publisher/base.py
@@ -0,0 +1,45 @@
+"""Abstract base class for content publishers."""
+
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from pathlib import Path
+
+
+@dataclass
+class PublishResult:
+ """Result of a publish operation."""
+
+ success: bool
+ platform: str
+ post_url: str | None = None
+ error_message: str | None = None
+
+
+class Publisher(ABC):
+ """Abstract interface for publishing to content platforms.
+
+ Implementations can use direct API calls or browser automation (Playwright).
+ """
+
+ @property
+ @abstractmethod
+ def platform_name(self) -> str:
+ """Platform identifier (e.g., 'fanvue')."""
+
+ @abstractmethod
+ async def publish(
+ self,
+ *,
+ image_path: Path,
+ caption: str,
+ content_rating: str = "sfw",
+ is_teaser: bool = False,
+ tags: list[str] | None = None,
+ ) -> PublishResult:
+ """Publish a single image to the platform."""
+
+ @abstractmethod
+ async def is_authenticated(self) -> bool:
+ """Check if we have valid credentials for the platform."""
diff --git a/src/content_engine/services/router.py b/src/content_engine/services/router.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a1d3821dc5a5a33e78899c7f2be137a90e0f5da
--- /dev/null
+++ b/src/content_engine/services/router.py
@@ -0,0 +1,66 @@
+"""Job routing service — decides whether to run generation locally or in the cloud.
+
+For v1 (single GPU, no cloud accounts), everything routes locally.
+The router checks ComfyUI's queue depth and VRAM before dispatching.
+"""
+
+from __future__ import annotations
+
+import logging
+from enum import Enum
+
+from content_engine.config import settings
+from content_engine.services.comfyui_client import ComfyUIClient
+
+logger = logging.getLogger(__name__)
+
+
+class Backend(str, Enum):
+ LOCAL = "local"
+ CLOUD = "cloud"
+
+
+class RouterService:
+ """Routes generation jobs to local GPU or cloud APIs."""
+
+ def __init__(self, comfyui_client: ComfyUIClient):
+ self.comfyui = comfyui_client
+ self.max_queue_depth = settings.comfyui.max_local_queue_depth
+ self.min_vram_gb = settings.comfyui.min_vram_gb
+
+ async def route(self, estimated_vram_gb: float = 4.0) -> Backend:
+ """Decide where to run a generation job.
+
+ Returns Backend.LOCAL if the local GPU is available,
+ Backend.CLOUD if it should be offloaded.
+ """
+ # v1: No cloud providers configured — always local
+ if not settings.cloud_providers:
+ return Backend.LOCAL
+
+ # Check if ComfyUI is reachable
+ if not await self.comfyui.is_available():
+ logger.warning("ComfyUI unavailable, routing to cloud")
+ return Backend.CLOUD
+
+ # Check queue depth
+ queue_depth = await self.comfyui.get_queue_depth()
+ if queue_depth >= self.max_queue_depth:
+ logger.info(
+ "Local queue full (%d/%d), routing to cloud",
+ queue_depth,
+ self.max_queue_depth,
+ )
+ return Backend.CLOUD
+
+ # Check VRAM
+ vram_free = await self.comfyui.get_vram_free_gb()
+ if vram_free is not None and vram_free < self.min_vram_gb:
+ logger.info(
+ "Insufficient VRAM (%.1f GB free, need %.1f GB), routing to cloud",
+ vram_free,
+ self.min_vram_gb,
+ )
+ return Backend.CLOUD
+
+ return Backend.LOCAL
diff --git a/src/content_engine/services/runpod_trainer.py b/src/content_engine/services/runpod_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..16f88d071aa82953799616f62e2925eeb571f39c
--- /dev/null
+++ b/src/content_engine/services/runpod_trainer.py
@@ -0,0 +1,625 @@
+"""RunPod cloud LoRA training — offload training to RunPod GPU pods.
+
+Creates a temporary GPU pod, uploads training images, runs Kohya sd-scripts,
+downloads the finished LoRA, then terminates the pod. No local GPU needed.
+
+Supports multiple base models (FLUX, SD 1.5, SDXL) via model registry.
+
+Usage:
+ Set RUNPOD_API_KEY in .env
+ Select "Cloud (RunPod)" in the training UI
+"""
+
+from __future__ import annotations
+
+import asyncio
+import logging
+import time
+import uuid
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any
+
+import runpod
+import yaml
+
+logger = logging.getLogger(__name__)
+
+import os
+from content_engine.config import settings, IS_HF_SPACES
+
+LORA_OUTPUT_DIR = settings.paths.lora_dir
+if IS_HF_SPACES:
+ CONFIG_DIR = Path("/app/config")
+else:
+ CONFIG_DIR = Path("D:/AI automation/content_engine/config")
+
+# RunPod GPU options (id -> display name, approx cost/hr)
+GPU_OPTIONS = {
+ "NVIDIA GeForce RTX 3090": "RTX 3090 (~$0.22/hr)",
+ "NVIDIA GeForce RTX 4090": "RTX 4090 (~$0.44/hr)",
+ "NVIDIA RTX A4000": "RTX A4000 (~$0.20/hr)",
+ "NVIDIA RTX A5000": "RTX A5000 (~$0.28/hr)",
+ "NVIDIA RTX A6000": "RTX A6000 (~$0.76/hr)",
+ "NVIDIA A100 80GB PCIe": "A100 80GB (~$1.89/hr)",
+}
+
+DEFAULT_GPU = "NVIDIA GeForce RTX 4090"
+
+# Docker image with PyTorch + CUDA pre-installed
+DOCKER_IMAGE = "runpod/pytorch:2.4.0-py3.11-cuda12.4.1-devel-ubuntu22.04"
+
+
+def load_model_registry() -> dict[str, dict]:
+ """Load training model configurations from config/models.yaml."""
+ models_file = CONFIG_DIR / "models.yaml"
+ if not models_file.exists():
+ logger.warning("Model registry not found: %s", models_file)
+ return {}
+ with open(models_file) as f:
+ config = yaml.safe_load(f)
+ return config.get("training_models", {})
+
+
+@dataclass
+class CloudTrainingJob:
+ """Tracks state of a RunPod cloud training job."""
+
+ id: str
+ name: str
+ status: str = "pending" # pending, creating_pod, uploading, installing, training, downloading, completed, failed
+ progress: float = 0.0
+ current_epoch: int = 0
+ total_epochs: int = 0
+ current_step: int = 0
+ total_steps: int = 0
+ loss: float | None = None
+ started_at: float | None = None
+ completed_at: float | None = None
+ output_path: str | None = None
+ error: str | None = None
+ log_lines: list[str] = field(default_factory=list)
+ pod_id: str | None = None
+ gpu_type: str = DEFAULT_GPU
+ cost_estimate: str | None = None
+ base_model: str = "sd15_realistic"
+ model_type: str = "sd15"
+
+ def _log(self, msg: str):
+ self.log_lines.append(msg)
+ if len(self.log_lines) > 200:
+ self.log_lines = self.log_lines[-200:]
+ logger.info("[%s] %s", self.id, msg)
+
+
+class RunPodTrainer:
+ """Manages LoRA training on RunPod cloud GPUs."""
+
+ def __init__(self, api_key: str):
+ self._api_key = api_key
+ runpod.api_key = api_key
+ self._jobs: dict[str, CloudTrainingJob] = {}
+ self._model_registry = load_model_registry()
+
+ @property
+ def available(self) -> bool:
+ """Check if RunPod is configured."""
+ # Re-set module-level key in case uvicorn reload cleared it
+ if self._api_key:
+ runpod.api_key = self._api_key
+ return bool(self._api_key)
+
+ def list_gpu_options(self) -> dict[str, str]:
+ return GPU_OPTIONS
+
+ def list_training_models(self) -> dict[str, dict]:
+ """List available base models for training with their parameters."""
+ return {
+ key: {
+ "name": cfg.get("name", key),
+ "description": cfg.get("description", ""),
+ "model_type": cfg.get("model_type", "sd15"),
+ "resolution": cfg.get("resolution", 512),
+ "learning_rate": cfg.get("learning_rate", 1e-4),
+ "network_rank": cfg.get("network_rank", 32),
+ "network_alpha": cfg.get("network_alpha", 16),
+ "vram_required_gb": cfg.get("vram_required_gb", 8),
+ "recommended_images": cfg.get("recommended_images", "15-30 photos"),
+ }
+ for key, cfg in self._model_registry.items()
+ }
+
+ def get_model_config(self, model_key: str) -> dict | None:
+ """Get configuration for a specific training model."""
+ return self._model_registry.get(model_key)
+
+ async def start_training(
+ self,
+ *,
+ name: str,
+ image_paths: list[str],
+ trigger_word: str = "",
+ base_model: str = "sd15_realistic",
+ resolution: int | None = None,
+ num_epochs: int = 10,
+ max_train_steps: int | None = None,
+ learning_rate: float | None = None,
+ network_rank: int | None = None,
+ network_alpha: int | None = None,
+ optimizer: str | None = None,
+ save_every_n_epochs: int = 2,
+ gpu_type: str = DEFAULT_GPU,
+ ) -> str:
+ """Start a cloud training job. Returns job ID.
+
+ Parameters use model registry defaults if not specified.
+ """
+ job_id = str(uuid.uuid4())[:8]
+
+ # Get model config (fall back to sd15_realistic if not found)
+ model_cfg = self._model_registry.get(base_model, self._model_registry.get("sd15_realistic", {}))
+ model_type = model_cfg.get("model_type", "sd15")
+
+ # Use provided values or model defaults
+ final_resolution = resolution or model_cfg.get("resolution", 512)
+ final_lr = learning_rate or model_cfg.get("learning_rate", 1e-4)
+ final_rank = network_rank or model_cfg.get("network_rank", 32)
+ final_alpha = network_alpha or model_cfg.get("network_alpha", 16)
+ final_optimizer = optimizer or model_cfg.get("optimizer", "AdamW8bit")
+ final_steps = max_train_steps or model_cfg.get("max_train_steps")
+
+ job = CloudTrainingJob(
+ id=job_id,
+ name=name,
+ status="pending",
+ total_epochs=num_epochs,
+ gpu_type=gpu_type,
+ started_at=time.time(),
+ base_model=base_model,
+ model_type=model_type,
+ )
+ self._jobs[job_id] = job
+
+ # Launch the full pipeline as a background task
+ asyncio.create_task(self._run_cloud_training(
+ job=job,
+ image_paths=image_paths,
+ trigger_word=trigger_word,
+ model_cfg=model_cfg,
+ resolution=final_resolution,
+ num_epochs=num_epochs,
+ max_train_steps=final_steps,
+ learning_rate=final_lr,
+ network_rank=final_rank,
+ network_alpha=final_alpha,
+ optimizer=final_optimizer,
+ save_every_n_epochs=save_every_n_epochs,
+ ))
+
+ return job_id
+
+ async def _run_cloud_training(
+ self,
+ job: CloudTrainingJob,
+ image_paths: list[str],
+ trigger_word: str,
+ model_cfg: dict,
+ resolution: int,
+ num_epochs: int,
+ max_train_steps: int | None,
+ learning_rate: float,
+ network_rank: int,
+ network_alpha: int,
+ optimizer: str,
+ save_every_n_epochs: int,
+ ):
+ """Full cloud training pipeline: create pod -> upload -> train -> download -> cleanup."""
+ ssh = None
+ sftp = None
+ model_type = model_cfg.get("model_type", "sd15")
+ name = job.name
+
+ try:
+ # Step 1: Create pod
+ job.status = "creating_pod"
+ job._log(f"Creating RunPod with {job.gpu_type}...")
+
+ pod = await asyncio.to_thread(
+ runpod.create_pod,
+ f"lora-train-{job.id}",
+ DOCKER_IMAGE,
+ job.gpu_type,
+ volume_in_gb=75, # Increased for FLUX models
+ container_disk_in_gb=30,
+ ports="22/tcp",
+ docker_args="bash -c 'apt-get update && apt-get install -y openssh-server && mkdir -p /run/sshd && echo \"root:runpod\" | chpasswd && echo \"PermitRootLogin yes\" >> /etc/ssh/sshd_config && /usr/sbin/sshd && sleep infinity'",
+ )
+
+ job.pod_id = pod["id"]
+ job._log(f"Pod created: {job.pod_id}")
+
+ # Wait for pod to be ready and get SSH info
+ job._log("Waiting for pod to start...")
+ ssh_host, ssh_port = await self._wait_for_pod_ready(job)
+ job._log(f"Pod ready at {ssh_host}:{ssh_port}")
+
+ # Step 2: Connect via SSH
+ import paramiko
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ for attempt in range(30):
+ try:
+ await asyncio.to_thread(
+ ssh.connect,
+ ssh_host, port=ssh_port,
+ username="root", password="runpod",
+ timeout=10,
+ )
+ break
+ except Exception:
+ if attempt == 29:
+ raise RuntimeError("Could not SSH into pod after 30 attempts")
+ await asyncio.sleep(5)
+
+ job._log("SSH connected")
+ sftp = ssh.open_sftp()
+
+ # Step 3: Upload training images
+ job.status = "uploading"
+ job._log(f"Uploading {len(image_paths)} training images...")
+
+ folder_name = f"10_{trigger_word or 'character'}"
+ self._ssh_exec(ssh, f"mkdir -p /workspace/dataset/{folder_name}")
+ for img_path in image_paths:
+ p = Path(img_path)
+ if p.exists():
+ remote_path = f"/workspace/dataset/{folder_name}/{p.name}"
+ sftp.put(str(p), remote_path)
+ # Upload matching caption .txt file if it exists locally
+ local_caption = p.with_suffix(".txt")
+ if local_caption.exists():
+ remote_caption = f"/workspace/dataset/{folder_name}/{local_caption.name}"
+ sftp.put(str(local_caption), remote_caption)
+ else:
+ # Fallback: create caption from trigger word
+ remote_caption = remote_path.rsplit(".", 1)[0] + ".txt"
+ with sftp.open(remote_caption, "w") as f:
+ f.write(trigger_word or "")
+
+ job._log("Images uploaded")
+
+ # Step 4: Install Kohya sd-scripts on the pod
+ job.status = "installing"
+ job._log("Installing Kohya sd-scripts (this takes a few minutes)...")
+ job.progress = 0.05
+
+ install_cmds = [
+ "cd /workspace && git clone --depth 1 https://github.com/kohya-ss/sd-scripts.git",
+ "cd /workspace/sd-scripts && pip install -r requirements.txt 2>&1 | tail -1",
+ "pip install accelerate lion-pytorch prodigy-optimizer safetensors bitsandbytes xformers 2>&1 | tail -1",
+ ]
+ for cmd in install_cmds:
+ out = self._ssh_exec(ssh, cmd, timeout=600)
+ job._log(out[:200] if out else "done")
+
+ # Download base model from HuggingFace
+ hf_repo = model_cfg.get("hf_repo", "SG161222/Realistic_Vision_V5.1_noVAE")
+ hf_filename = model_cfg.get("hf_filename", "Realistic_Vision_V5.1_fp16-no-ema.safetensors")
+ model_name = model_cfg.get("name", job.base_model)
+
+ job._log(f"Downloading base model: {model_name}...")
+ job.progress = 0.1
+
+ self._ssh_exec(ssh, """pip install huggingface_hub 2>&1 | tail -1""", timeout=120)
+
+ # Download main model
+ self._ssh_exec(ssh, f"""
+ python -c "
+from huggingface_hub import hf_hub_download
+hf_hub_download('{hf_repo}', '{hf_filename}', local_dir='/workspace/models')
+" 2>&1 | tail -5
+ """, timeout=1200) # Longer timeout for large models
+
+ # For FLUX, download additional required models (CLIP, T5, VAE)
+ if model_type == "flux":
+ job._log("Downloading FLUX auxiliary models (CLIP, T5, VAE)...")
+ job.progress = 0.12
+
+ self._ssh_exec(ssh, """
+ python -c "
+from huggingface_hub import hf_hub_download
+# CLIP text encoder
+hf_hub_download('comfyanonymous/flux_text_encoders', 'clip_l.safetensors', local_dir='/workspace/models')
+# T5 text encoder (fp16)
+hf_hub_download('comfyanonymous/flux_text_encoders', 't5xxl_fp16.safetensors', local_dir='/workspace/models')
+# VAE/AutoEncoder
+hf_hub_download('black-forest-labs/FLUX.1-dev', 'ae.safetensors', local_dir='/workspace/models')
+" 2>&1 | tail -5
+ """, timeout=1200)
+
+ job._log("Base model downloaded")
+ job.progress = 0.15
+
+ # Step 5: Run training
+ job.status = "training"
+ job._log(f"Starting {model_type.upper()} LoRA training...")
+
+ model_path = f"/workspace/models/{hf_filename}"
+
+ # Build training command based on model type
+ train_cmd = self._build_training_command(
+ model_type=model_type,
+ model_path=model_path,
+ name=name,
+ resolution=resolution,
+ num_epochs=num_epochs,
+ max_train_steps=max_train_steps,
+ learning_rate=learning_rate,
+ network_rank=network_rank,
+ network_alpha=network_alpha,
+ optimizer=optimizer,
+ save_every_n_epochs=save_every_n_epochs,
+ model_cfg=model_cfg,
+ )
+
+ # Execute training and stream output
+ job._log("Training command submitted...")
+ transport = ssh.get_transport()
+ channel = transport.open_session()
+ channel.exec_command(train_cmd)
+
+ # Read output progressively
+ buffer = ""
+ while not channel.exit_status_ready() or channel.recv_ready():
+ if channel.recv_ready():
+ chunk = channel.recv(4096).decode("utf-8", errors="replace")
+ buffer += chunk
+ # Process complete lines
+ while "\n" in buffer:
+ line, buffer = buffer.split("\n", 1)
+ line = line.strip()
+ if not line:
+ continue
+ job._log(line)
+ self._parse_progress(job, line)
+ else:
+ await asyncio.sleep(2)
+
+ exit_code = channel.recv_exit_status()
+ if exit_code != 0:
+ raise RuntimeError(f"Training failed with exit code {exit_code}")
+
+ job._log("Training completed on RunPod!")
+ job.progress = 0.9
+
+ # Step 6: Download the LoRA file
+ job.status = "downloading"
+ job._log("Downloading trained LoRA...")
+
+ LORA_OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
+ local_path = LORA_OUTPUT_DIR / f"{name}.safetensors"
+
+ # Try the main output file first, then look for any .safetensors
+ try:
+ sftp.get(f"/workspace/output/{name}.safetensors", str(local_path))
+ except FileNotFoundError:
+ # Find any safetensors file
+ remote_files = sftp.listdir("/workspace/output")
+ safetensors = [f for f in remote_files if f.endswith(".safetensors")]
+ if safetensors:
+ sftp.get(f"/workspace/output/{safetensors[-1]}", str(local_path))
+ else:
+ raise RuntimeError("No .safetensors output found")
+
+ job.output_path = str(local_path)
+ job._log(f"LoRA saved to {local_path}")
+
+ # Done!
+ job.status = "completed"
+ job.progress = 1.0
+ job.completed_at = time.time()
+ elapsed = (job.completed_at - job.started_at) / 60
+ job._log(f"Cloud training complete in {elapsed:.1f} minutes")
+
+ except Exception as e:
+ job.status = "failed"
+ job.error = str(e)
+ job._log(f"ERROR: {e}")
+ logger.error("Cloud training failed: %s", e, exc_info=True)
+
+ finally:
+ # Cleanup: close SSH and terminate pod
+ if sftp:
+ try:
+ sftp.close()
+ except Exception:
+ pass
+ if ssh:
+ try:
+ ssh.close()
+ except Exception:
+ pass
+
+ if job.pod_id:
+ try:
+ job._log("Terminating RunPod...")
+ await asyncio.to_thread(runpod.terminate_pod, job.pod_id)
+ job._log("Pod terminated")
+ except Exception as e:
+ job._log(f"Warning: Failed to terminate pod {job.pod_id}: {e}")
+
+ def _build_training_command(
+ self,
+ *,
+ model_type: str,
+ model_path: str,
+ name: str,
+ resolution: int,
+ num_epochs: int,
+ max_train_steps: int | None,
+ learning_rate: float,
+ network_rank: int,
+ network_alpha: int,
+ optimizer: str,
+ save_every_n_epochs: int,
+ model_cfg: dict,
+ ) -> str:
+ """Build the training command based on model type."""
+
+ # Common parameters
+ base_args = f"""
+ --train_data_dir="/workspace/dataset" \
+ --output_dir="/workspace/output" \
+ --output_name="{name}" \
+ --resolution={resolution} \
+ --train_batch_size=1 \
+ --learning_rate={learning_rate} \
+ --network_module=networks.lora \
+ --network_dim={network_rank} \
+ --network_alpha={network_alpha} \
+ --optimizer_type={optimizer} \
+ --save_every_n_epochs={save_every_n_epochs} \
+ --mixed_precision=fp16 \
+ --seed=42 \
+ --caption_extension=.txt \
+ --cache_latents \
+ --enable_bucket \
+ --save_model_as=safetensors"""
+
+ # Steps vs epochs
+ if max_train_steps:
+ base_args += f" \\\n --max_train_steps={max_train_steps}"
+ else:
+ base_args += f" \\\n --max_train_epochs={num_epochs}"
+
+ # LR scheduler
+ lr_scheduler = model_cfg.get("lr_scheduler", "cosine_with_restarts")
+ base_args += f" \\\n --lr_scheduler={lr_scheduler}"
+
+ if model_type == "flux":
+ # FLUX-specific training
+ script = "flux_train_network.py"
+ flux_args = f"""
+ --pretrained_model_name_or_path="{model_path}" \
+ --clip_l="/workspace/models/clip_l.safetensors" \
+ --t5xxl="/workspace/models/t5xxl_fp16.safetensors" \
+ --ae="/workspace/models/ae.safetensors" \
+ --cache_text_encoder_outputs \
+ --cache_text_encoder_outputs_to_disk \
+ --fp8_base \
+ --split_mode"""
+
+ # Text encoder learning rate
+ text_encoder_lr = model_cfg.get("text_encoder_lr", 4e-5)
+ flux_args += f" \\\n --text_encoder_lr={text_encoder_lr}"
+
+ # Min SNR gamma if specified
+ min_snr = model_cfg.get("min_snr_gamma")
+ if min_snr:
+ flux_args += f" \\\n --min_snr_gamma={min_snr}"
+
+ return f"cd /workspace/sd-scripts && accelerate launch --num_cpu_threads_per_process 1 {script} {flux_args} {base_args} 2>&1"
+
+ elif model_type == "sdxl":
+ # SDXL-specific training
+ script = "sdxl_train_network.py"
+ clip_skip = model_cfg.get("clip_skip", 2)
+ return f"""cd /workspace/sd-scripts && accelerate launch --num_cpu_threads_per_process 1 {script} \
+ --pretrained_model_name_or_path="{model_path}" \
+ --clip_skip={clip_skip} \
+ --xformers {base_args} 2>&1"""
+
+ else:
+ # SD 1.5 / default training
+ script = "train_network.py"
+ clip_skip = model_cfg.get("clip_skip", 1)
+ return f"""cd /workspace/sd-scripts && accelerate launch --num_cpu_threads_per_process 1 {script} \
+ --pretrained_model_name_or_path="{model_path}" \
+ --clip_skip={clip_skip} \
+ --xformers {base_args} 2>&1"""
+
+ async def _wait_for_pod_ready(self, job: CloudTrainingJob, timeout: int = 300) -> tuple[str, int]:
+ """Wait for pod to be running and return (ssh_host, ssh_port)."""
+ start = time.time()
+ while time.time() - start < timeout:
+ pod = await asyncio.to_thread(runpod.get_pod, job.pod_id)
+
+ status = pod.get("desiredStatus", "")
+ runtime = pod.get("runtime")
+
+ if status == "RUNNING" and runtime:
+ ports = runtime.get("ports", [])
+ for port_info in ports:
+ if port_info.get("privatePort") == 22:
+ ip = port_info.get("ip")
+ public_port = port_info.get("publicPort")
+ if ip and public_port:
+ return ip, int(public_port)
+
+ await asyncio.sleep(5)
+
+ raise RuntimeError(f"Pod did not become ready within {timeout}s")
+
+ def _ssh_exec(self, ssh, cmd: str, timeout: int = 120) -> str:
+ """Execute a command over SSH and return stdout."""
+ _, stdout, stderr = ssh.exec_command(cmd, timeout=timeout)
+ out = stdout.read().decode("utf-8", errors="replace")
+ err = stderr.read().decode("utf-8", errors="replace")
+ exit_code = stdout.channel.recv_exit_status()
+ if exit_code != 0 and "warning" not in err.lower():
+ logger.warning("SSH cmd failed (code %d): %s\nstderr: %s", exit_code, cmd[:100], err[:500])
+ return out.strip()
+
+ def _parse_progress(self, job: CloudTrainingJob, line: str):
+ """Parse Kohya training output for progress info."""
+ lower = line.lower()
+ if "epoch" in lower and "/" in line:
+ try:
+ parts = lower.split("epoch")
+ if len(parts) > 1:
+ ep_part = parts[1].strip().split()[0]
+ if "/" in ep_part:
+ current, total = ep_part.split("/")
+ job.current_epoch = int(current)
+ job.total_epochs = int(total)
+ job.progress = 0.15 + 0.75 * (job.current_epoch / max(job.total_epochs, 1))
+ except (ValueError, IndexError):
+ pass
+
+ if "loss=" in line or "loss:" in line:
+ try:
+ loss_str = line.split("loss")[1].strip("=: ").split()[0].strip(",")
+ job.loss = float(loss_str)
+ except (ValueError, IndexError):
+ pass
+
+ if "steps:" in lower or "step " in lower:
+ try:
+ import re
+ step_match = re.search(r"(\d+)/(\d+)", line)
+ if step_match:
+ job.current_step = int(step_match.group(1))
+ job.total_steps = int(step_match.group(2))
+ except (ValueError, IndexError):
+ pass
+
+ def get_job(self, job_id: str) -> CloudTrainingJob | None:
+ return self._jobs.get(job_id)
+
+ def list_jobs(self) -> list[CloudTrainingJob]:
+ return list(self._jobs.values())
+
+ async def cancel_job(self, job_id: str) -> bool:
+ """Cancel a cloud training job and terminate its pod."""
+ job = self._jobs.get(job_id)
+ if not job:
+ return False
+ if job.pod_id:
+ try:
+ await asyncio.to_thread(runpod.terminate_pod, job.pod_id)
+ except Exception:
+ pass
+ job.status = "failed"
+ job.error = "Cancelled by user"
+ return True
diff --git a/src/content_engine/services/template_engine.py b/src/content_engine/services/template_engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..95f0285ae35cc8aa40bb82c81bf001ebb38df117
--- /dev/null
+++ b/src/content_engine/services/template_engine.py
@@ -0,0 +1,206 @@
+"""Prompt template system using YAML definitions and Jinja2 rendering.
+
+Templates define structured prompts with variable slots for character traits,
+poses, outfits, emotions, camera angles, lighting, and scenes. The engine
+renders these templates with provided variables to produce final prompts
+for ComfyUI workflows.
+"""
+
+from __future__ import annotations
+
+import logging
+import os
+from dataclasses import dataclass, field
+from pathlib import Path
+from typing import Any
+
+import yaml
+from jinja2 import Environment, BaseLoader
+
+logger = logging.getLogger(__name__)
+
+IS_HF_SPACES = os.environ.get("HF_SPACES") == "1" or os.environ.get("SPACE_ID") is not None
+PROMPTS_DIR = Path("/app/config/templates/prompts") if IS_HF_SPACES else Path("D:/AI automation/content_engine/config/templates/prompts")
+
+
+@dataclass
+class VariableDefinition:
+ """Definition of a template variable with its allowed values."""
+
+ name: str
+ type: str = "choice" # choice | string | number
+ options: list[str] = field(default_factory=list)
+ default: str = ""
+ required: bool = False
+ description: str = ""
+
+
+@dataclass
+class PromptTemplate:
+ """A parsed prompt template."""
+
+ id: str
+ name: str
+ category: str = ""
+ rating: str = "sfw" # sfw | nsfw
+ base_model: str = "realistic_vision"
+
+ # LoRA specs (with Jinja2 variable references)
+ loras: list[dict[str, Any]] = field(default_factory=list)
+
+ # Prompt text (Jinja2 templates)
+ positive_prompt: str = ""
+ negative_prompt: str = ""
+
+ # Sampler defaults
+ steps: int | None = None
+ cfg: float | None = None
+ sampler_name: str | None = None
+ scheduler: str | None = None
+ width: int | None = None
+ height: int | None = None
+
+ # Variable definitions
+ variables: dict[str, VariableDefinition] = field(default_factory=dict)
+
+ # Motion (for future video support)
+ motion: dict[str, Any] = field(default_factory=dict)
+
+
+class TemplateEngine:
+ """Loads, manages, and renders prompt templates."""
+
+ def __init__(self, templates_dir: Path | None = None):
+ self.templates_dir = templates_dir or PROMPTS_DIR
+ self._templates: dict[str, PromptTemplate] = {}
+ self._jinja_env = Environment(loader=BaseLoader())
+
+ def load_all(self) -> None:
+ """Load all YAML templates from the templates directory."""
+ if not self.templates_dir.exists():
+ logger.warning("Templates directory does not exist: %s", self.templates_dir)
+ return
+
+ for path in self.templates_dir.glob("*.yaml"):
+ try:
+ template = self._parse_template(path)
+ self._templates[template.id] = template
+ logger.info("Loaded template: %s", template.id)
+ except Exception:
+ logger.error("Failed to load template %s", path, exc_info=True)
+
+ def _parse_template(self, path: Path) -> PromptTemplate:
+ """Parse a YAML file into a PromptTemplate."""
+ with open(path) as f:
+ data = yaml.safe_load(f)
+
+ variables = {}
+ for var_name, var_def in data.get("variables", {}).items():
+ variables[var_name] = VariableDefinition(
+ name=var_name,
+ type=var_def.get("type", "string"),
+ options=var_def.get("options", []),
+ default=var_def.get("default", ""),
+ required=var_def.get("required", False),
+ description=var_def.get("description", ""),
+ )
+
+ sampler = data.get("sampler", {})
+
+ return PromptTemplate(
+ id=data.get("id", path.stem),
+ name=data.get("name", path.stem),
+ category=data.get("category", ""),
+ rating=data.get("rating", "sfw"),
+ base_model=data.get("base_model", "realistic_vision"),
+ loras=data.get("loras", []),
+ positive_prompt=data.get("positive_prompt", ""),
+ negative_prompt=data.get("negative_prompt", ""),
+ steps=sampler.get("steps"),
+ cfg=sampler.get("cfg"),
+ sampler_name=sampler.get("sampler_name"),
+ scheduler=sampler.get("scheduler"),
+ width=sampler.get("width"),
+ height=sampler.get("height"),
+ variables=variables,
+ motion=data.get("motion", {}),
+ )
+
+ def get(self, template_id: str) -> PromptTemplate:
+ """Get a loaded template by ID."""
+ if template_id not in self._templates:
+ raise KeyError(f"Template not found: {template_id}")
+ return self._templates[template_id]
+
+ def list_templates(self) -> list[PromptTemplate]:
+ """List all loaded templates."""
+ return list(self._templates.values())
+
+ def render(
+ self,
+ template_id: str,
+ variables: dict[str, str],
+ ) -> RenderedPrompt:
+ """Render a template with the given variables.
+
+ Returns the rendered positive/negative prompts and resolved LoRA specs.
+ """
+ template = self.get(template_id)
+
+ # Fill in defaults for missing variables
+ resolved_vars = {}
+ for var_name, var_def in template.variables.items():
+ if var_name in variables:
+ resolved_vars[var_name] = variables[var_name]
+ elif var_def.default:
+ resolved_vars[var_name] = var_def.default
+ elif var_def.required:
+ # Character-specific vars default to empty when no character selected
+ if var_name in ("character_trigger", "character_lora"):
+ resolved_vars[var_name] = ""
+ else:
+ raise ValueError(f"Required variable '{var_name}' not provided")
+
+ # Also pass through any extra variables not in the definition
+ for k, v in variables.items():
+ if k not in resolved_vars:
+ resolved_vars[k] = v
+
+ # Render prompts
+ positive = self._render_string(template.positive_prompt, resolved_vars)
+ negative = self._render_string(template.negative_prompt, resolved_vars)
+
+ # Render LoRA names (they may contain {{character_lora}} etc.)
+ rendered_loras = []
+ for lora_spec in template.loras:
+ rendered_loras.append({
+ "name": self._render_string(lora_spec.get("name", ""), resolved_vars),
+ "strength_model": lora_spec.get("strength_model", 0.85),
+ "strength_clip": lora_spec.get("strength_clip", 0.85),
+ })
+
+ return RenderedPrompt(
+ positive_prompt=positive,
+ negative_prompt=negative,
+ loras=rendered_loras,
+ variables=resolved_vars,
+ template=template,
+ )
+
+ def _render_string(self, template_str: str, variables: dict[str, str]) -> str:
+ """Render a Jinja2 template string with variables."""
+ if not template_str:
+ return ""
+ tmpl = self._jinja_env.from_string(template_str)
+ return tmpl.render(**variables)
+
+
+@dataclass
+class RenderedPrompt:
+ """Result of rendering a template with variables."""
+
+ positive_prompt: str
+ negative_prompt: str
+ loras: list[dict[str, Any]]
+ variables: dict[str, str]
+ template: PromptTemplate
diff --git a/src/content_engine/services/variation_engine.py b/src/content_engine/services/variation_engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..860e3e941eb5df42f3c5ec3e8d87838218cc119c
--- /dev/null
+++ b/src/content_engine/services/variation_engine.py
@@ -0,0 +1,189 @@
+"""Content variation engine for generating combinatorial batches.
+
+Given a prompt template and a character, the variation engine produces
+multiple generation jobs with different combinations of poses, outfits,
+emotions, camera angles, and other variable attributes.
+"""
+
+from __future__ import annotations
+
+import itertools
+import random
+import uuid
+from dataclasses import dataclass, field
+from typing import Any
+
+from content_engine.services.template_engine import TemplateEngine
+
+
+@dataclass
+class CharacterProfile:
+ """Character configuration loaded from YAML."""
+
+ id: str
+ name: str
+ trigger_word: str
+ lora_filename: str
+ lora_strength: float = 0.85
+ default_checkpoint: str | None = None
+ style_loras: list[dict[str, Any]] = field(default_factory=list)
+ description: str = ""
+ physical_traits: dict[str, str] = field(default_factory=dict)
+
+
+@dataclass
+class VariationJob:
+ """A single generation job produced by the variation engine."""
+
+ job_id: str
+ batch_id: str
+ character: CharacterProfile
+ template_id: str
+ content_rating: str
+ variables: dict[str, str]
+ seed: int
+ loras: list[dict[str, Any]]
+
+
+class VariationEngine:
+ """Generates batches of variation jobs from templates."""
+
+ def __init__(self, template_engine: TemplateEngine):
+ self.template_engine = template_engine
+
+ def generate_batch(
+ self,
+ template_id: str,
+ character: CharacterProfile,
+ *,
+ content_rating: str = "sfw",
+ count: int = 10,
+ variation_mode: str = "random", # curated | random | exhaustive
+ pin: dict[str, str] | None = None,
+ seed_strategy: str = "random", # random | sequential | fixed
+ base_seed: int | None = None,
+ ) -> list[VariationJob]:
+ """Generate a batch of variation jobs.
+
+ Args:
+ template_id: Which prompt template to use.
+ character: Character profile for LoRA and trigger word.
+ content_rating: "sfw" or "nsfw".
+ count: Number of variations to generate.
+ variation_mode: How to select variable combinations.
+ pin: Variables to keep fixed across all variations.
+ seed_strategy: How to assign seeds.
+ base_seed: Starting seed for sequential strategy.
+ """
+ template = self.template_engine.get(template_id)
+ pin = pin or {}
+ batch_id = str(uuid.uuid4())
+
+ # Build variable combinations
+ combos = self._select_combinations(template_id, count, variation_mode, pin)
+
+ # Inject character-specific variables
+ for combo in combos:
+ combo["character_trigger"] = character.trigger_word
+ combo["character_lora"] = character.lora_filename
+
+ # Build LoRA list for each job
+ base_loras = [
+ {
+ "name": character.lora_filename,
+ "strength_model": character.lora_strength,
+ "strength_clip": character.lora_strength,
+ }
+ ]
+ for style_lora in character.style_loras:
+ base_loras.append(style_lora)
+
+ # Create jobs
+ jobs = []
+ for i, combo in enumerate(combos):
+ seed = self._get_seed(seed_strategy, base_seed, i)
+ jobs.append(
+ VariationJob(
+ job_id=str(uuid.uuid4()),
+ batch_id=batch_id,
+ character=character,
+ template_id=template_id,
+ content_rating=content_rating,
+ variables=combo,
+ seed=seed,
+ loras=list(base_loras),
+ )
+ )
+
+ return jobs
+
+ def _select_combinations(
+ self,
+ template_id: str,
+ count: int,
+ mode: str,
+ pin: dict[str, str],
+ ) -> list[dict[str, str]]:
+ """Select variable combinations based on mode."""
+ template = self.template_engine.get(template_id)
+
+ if mode == "random":
+ return self._random_combos(template.variables, count, pin)
+ elif mode == "exhaustive":
+ return self._exhaustive_combos(template.variables, count, pin)
+ else:
+ # "curated" falls back to random for now
+ return self._random_combos(template.variables, count, pin)
+
+ def _random_combos(
+ self,
+ variables: dict,
+ count: int,
+ pin: dict[str, str],
+ ) -> list[dict[str, str]]:
+ """Generate random combinations."""
+ combos = []
+ for _ in range(count):
+ combo: dict[str, str] = {}
+ for var_name, var_def in variables.items():
+ if var_name in pin:
+ combo[var_name] = pin[var_name]
+ elif var_def.type == "choice" and var_def.options:
+ combo[var_name] = random.choice(var_def.options)
+ elif var_def.default:
+ combo[var_name] = var_def.default
+ combos.append(combo)
+ return combos
+
+ def _exhaustive_combos(
+ self,
+ variables: dict,
+ count: int,
+ pin: dict[str, str],
+ ) -> list[dict[str, str]]:
+ """Generate exhaustive (cartesian product) combinations, capped at count."""
+ axes: list[list[tuple[str, str]]] = []
+ for var_name, var_def in variables.items():
+ if var_name in pin:
+ axes.append([(var_name, pin[var_name])])
+ elif var_def.type == "choice" and var_def.options:
+ axes.append([(var_name, opt) for opt in var_def.options])
+
+ if not axes:
+ return [{}] * count
+
+ all_combos = [dict(combo) for combo in itertools.product(*axes)]
+ if len(all_combos) > count:
+ all_combos = random.sample(all_combos, count)
+ return all_combos
+
+ def _get_seed(
+ self, strategy: str, base_seed: int | None, index: int
+ ) -> int:
+ """Generate a seed based on strategy."""
+ if strategy == "fixed" and base_seed is not None:
+ return base_seed
+ elif strategy == "sequential" and base_seed is not None:
+ return base_seed + index
+ else:
+ return random.randint(0, 2**32 - 1)
diff --git a/src/content_engine/services/workflow_builder.py b/src/content_engine/services/workflow_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a2b0dfe7e230193b5b8d31ce6cc677cd27ae319
--- /dev/null
+++ b/src/content_engine/services/workflow_builder.py
@@ -0,0 +1,164 @@
+"""Builds ComfyUI API-format workflow JSON from templates and parameters.
+
+The workflow builder loads base workflow templates (JSON files representing
+ComfyUI node graphs) and injects generation-specific values: checkpoint,
+LoRAs, prompts, seeds, dimensions, and output filenames.
+"""
+
+from __future__ import annotations
+
+import copy
+import json
+import logging
+import os
+from pathlib import Path
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+IS_HF_SPACES = os.environ.get("HF_SPACES") == "1" or os.environ.get("SPACE_ID") is not None
+WORKFLOWS_DIR = Path("/app/config/templates/workflows") if IS_HF_SPACES else Path("D:/AI automation/content_engine/config/templates/workflows")
+
+
+class WorkflowBuilder:
+ """Constructs ComfyUI workflows from base templates + per-job parameters."""
+
+ def __init__(self, workflows_dir: Path | None = None):
+ self.workflows_dir = workflows_dir or WORKFLOWS_DIR
+ self._cache: dict[str, dict] = {}
+
+ def _load_template(self, name: str) -> dict:
+ """Load and cache a base workflow JSON template."""
+ if name not in self._cache:
+ path = self.workflows_dir / f"{name}.json"
+ if not path.exists():
+ raise FileNotFoundError(f"Workflow template not found: {path}")
+ with open(path) as f:
+ self._cache[name] = json.load(f)
+ return self._cache[name]
+
+ def build(
+ self,
+ template_name: str = "sd15_base_sfw",
+ *,
+ checkpoint: str = "realisticVisionV51_v51VAE.safetensors",
+ positive_prompt: str = "",
+ negative_prompt: str = "",
+ loras: list[dict[str, Any]] | None = None,
+ seed: int = -1,
+ steps: int = 28,
+ cfg: float = 7.0,
+ sampler_name: str = "dpmpp_2m",
+ scheduler: str = "karras",
+ width: int = 832,
+ height: int = 1216,
+ batch_size: int = 1,
+ filename_prefix: str = "content_engine",
+ denoise: float | None = None,
+ reference_image: str | None = None,
+ ) -> dict:
+ """Build a complete workflow dict ready for ComfyUI /prompt endpoint.
+
+ The base template must have these node IDs (by convention):
+ - "1": CheckpointLoaderSimple
+ - "2": CLIPTextEncode (positive)
+ - "3": CLIPTextEncode (negative)
+ - "4": EmptyLatentImage (txt2img) or absent for img2img
+ - "5": KSampler
+ - "6": VAEDecode
+ - "7": SaveImage
+ - "8": LoadImage (img2img only)
+ - "9": VAEEncode (img2img only)
+ - "10", "11", ...: LoraLoader chain (optional, added dynamically)
+ """
+ base = copy.deepcopy(self._load_template(template_name))
+
+ # Checkpoint
+ if "1" in base:
+ base["1"]["inputs"]["ckpt_name"] = checkpoint
+
+ # Prompts
+ if "2" in base:
+ base["2"]["inputs"]["text"] = positive_prompt
+ if "3" in base:
+ base["3"]["inputs"]["text"] = negative_prompt
+
+ # Latent image dimensions (txt2img only)
+ if "4" in base:
+ base["4"]["inputs"]["width"] = width
+ base["4"]["inputs"]["height"] = height
+ base["4"]["inputs"]["batch_size"] = batch_size
+
+ # KSampler
+ if "5" in base:
+ base["5"]["inputs"]["seed"] = seed if seed >= 0 else _random_seed()
+ base["5"]["inputs"]["steps"] = steps
+ base["5"]["inputs"]["cfg"] = cfg
+ base["5"]["inputs"]["sampler_name"] = sampler_name
+ base["5"]["inputs"]["scheduler"] = scheduler
+ if denoise is not None:
+ base["5"]["inputs"]["denoise"] = denoise
+
+ # Reference image for img2img (LoadImage node)
+ if "8" in base and reference_image:
+ base["8"]["inputs"]["image"] = reference_image
+
+ # SaveImage filename prefix
+ if "7" in base:
+ base["7"]["inputs"]["filename_prefix"] = filename_prefix
+
+ # Inject LoRA chain
+ if loras:
+ base = self._inject_loras(base, loras)
+
+ return base
+
+ def _inject_loras(
+ self, workflow: dict, loras: list[dict[str, Any]]
+ ) -> dict:
+ """Dynamically insert LoraLoader nodes into the workflow graph.
+
+ Each LoRA gets a node ID starting at "10". The chain connects:
+ checkpoint -> lora_10 -> lora_11 -> ... -> KSampler/CLIP nodes.
+ """
+ if not loras:
+ return workflow
+
+ # Determine where model and clip currently flow from
+ # By default, KSampler (node 5) takes model from checkpoint (node 1, slot 0)
+ # and CLIP encoders (nodes 2,3) take clip from checkpoint (node 1, slot 1)
+ prev_model_ref = ["1", 0] # checkpoint model output
+ prev_clip_ref = ["1", 1] # checkpoint clip output
+
+ for i, lora_spec in enumerate(loras):
+ node_id = str(10 + i)
+ workflow[node_id] = {
+ "class_type": "LoraLoader",
+ "inputs": {
+ "lora_name": lora_spec["name"],
+ "strength_model": lora_spec.get("strength_model", 0.85),
+ "strength_clip": lora_spec.get("strength_clip", 0.85),
+ "model": prev_model_ref,
+ "clip": prev_clip_ref,
+ },
+ }
+ prev_model_ref = [node_id, 0]
+ prev_clip_ref = [node_id, 1]
+
+ # Rewire KSampler to take model from last LoRA
+ if "5" in workflow:
+ workflow["5"]["inputs"]["model"] = prev_model_ref
+
+ # Rewire CLIP text encoders to take clip from last LoRA
+ if "2" in workflow:
+ workflow["2"]["inputs"]["clip"] = prev_clip_ref
+ if "3" in workflow:
+ workflow["3"]["inputs"]["clip"] = prev_clip_ref
+
+ return workflow
+
+
+def _random_seed() -> int:
+ """Generate a random seed in ComfyUI's expected range."""
+ import random
+ return random.randint(0, 2**32 - 1)
diff --git a/src/content_engine/workers/__init__.py b/src/content_engine/workers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8b0408c1189d5d5e53f9a2a555f1f7b790e742d
--- /dev/null
+++ b/src/content_engine/workers/__init__.py
@@ -0,0 +1 @@
+"""Worker processes for handling generation jobs."""
diff --git a/src/content_engine/workers/__pycache__/__init__.cpython-311.pyc b/src/content_engine/workers/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75c97ea2e6c9e2ee29104592355b9f9e3b93886a
Binary files /dev/null and b/src/content_engine/workers/__pycache__/__init__.cpython-311.pyc differ
diff --git a/src/content_engine/workers/__pycache__/local_worker.cpython-311.pyc b/src/content_engine/workers/__pycache__/local_worker.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14da557b7b370aa32cc1f4a3f70644f690b3900e
Binary files /dev/null and b/src/content_engine/workers/__pycache__/local_worker.cpython-311.pyc differ
diff --git a/src/content_engine/workers/cloud_worker.py b/src/content_engine/workers/cloud_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ad65f36adb631a7bc7cd9e1836e36c458dd68b9
--- /dev/null
+++ b/src/content_engine/workers/cloud_worker.py
@@ -0,0 +1,22 @@
+"""Cloud worker stub — processes generation jobs via cloud APIs.
+
+This is a placeholder for Phase 4 implementation. When cloud providers
+are configured, this worker handles generation through external APIs.
+"""
+
+from __future__ import annotations
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class CloudWorker:
+ """Processes generation jobs via cloud GPU APIs (Phase 4)."""
+
+ async def process_job(self, **kwargs) -> str:
+ """Placeholder — cloud generation not yet implemented."""
+ raise NotImplementedError(
+ "Cloud generation is not yet implemented. "
+ "Configure cloud providers in settings.yaml (Phase 4)."
+ )
diff --git a/src/content_engine/workers/local_worker.py b/src/content_engine/workers/local_worker.py
new file mode 100644
index 0000000000000000000000000000000000000000..13fcf51051d28f043e469f7b9c105307cd486d74
--- /dev/null
+++ b/src/content_engine/workers/local_worker.py
@@ -0,0 +1,172 @@
+"""Local worker that processes generation jobs through ComfyUI.
+
+The local worker takes a generation job, builds a ComfyUI workflow,
+submits it to the local ComfyUI instance, waits for completion,
+downloads the result, saves it to the output directory, and records
+metadata in the catalog.
+"""
+
+from __future__ import annotations
+
+import logging
+import time
+import uuid
+from typing import Any
+
+from content_engine.config import settings
+from content_engine.services.catalog import CatalogService
+from content_engine.services.comfyui_client import ComfyUIClient
+from content_engine.services.template_engine import TemplateEngine
+from content_engine.services.workflow_builder import WorkflowBuilder
+
+logger = logging.getLogger(__name__)
+
+
+class LocalWorker:
+ """Processes generation jobs on the local GPU via ComfyUI."""
+
+ def __init__(
+ self,
+ comfyui_client: ComfyUIClient,
+ workflow_builder: WorkflowBuilder,
+ template_engine: TemplateEngine,
+ catalog: CatalogService,
+ ):
+ self.comfyui = comfyui_client
+ self.workflow_builder = workflow_builder
+ self.template_engine = template_engine
+ self.catalog = catalog
+
+ async def process_job(
+ self,
+ *,
+ job_id: str | None = None,
+ batch_id: str | None = None,
+ character_id: str | None = None,
+ template_id: str | None = None,
+ content_rating: str = "sfw",
+ positive_prompt: str | None = None,
+ negative_prompt: str | None = None,
+ checkpoint: str | None = None,
+ loras: list[dict[str, Any]] | None = None,
+ seed: int = -1,
+ steps: int | None = None,
+ cfg: float | None = None,
+ sampler: str | None = None,
+ scheduler: str | None = None,
+ width: int | None = None,
+ height: int | None = None,
+ variables: dict[str, str] | None = None,
+ denoise: float | None = None,
+ reference_image: str | None = None,
+ ) -> str:
+ """Process a single generation job. Returns the catalog image ID."""
+ job_id = job_id or str(uuid.uuid4())
+ gen = settings.generation
+
+ # Resolve prompt from template if template_id provided
+ rendered_loras = loras or []
+ if template_id and self.template_engine:
+ rendered = self.template_engine.render(template_id, variables or {})
+ if not positive_prompt:
+ positive_prompt = rendered.positive_prompt
+ if not negative_prompt:
+ negative_prompt = rendered.negative_prompt
+ if not rendered_loras:
+ rendered_loras = rendered.loras
+
+ # Apply defaults
+ checkpoint = checkpoint or gen.default_checkpoint
+ steps = steps or gen.default_steps
+ cfg = cfg or gen.default_cfg
+ sampler = sampler or gen.default_sampler
+ scheduler = scheduler or gen.default_scheduler
+ width = width or gen.default_width
+ height = height or gen.default_height
+
+ # Build filename
+ short_id = job_id[:8]
+ seed_val = seed if seed >= 0 else 0
+ char_prefix = character_id or "gen"
+ tmpl_prefix = template_id or "direct"
+ filename_prefix = f"{char_prefix}_{tmpl_prefix}_{short_id}_{seed_val}"
+
+ # Select workflow template based on content rating and mode
+ if reference_image:
+ workflow_template = f"sd15_img2img_{content_rating}"
+ else:
+ workflow_template = f"sd15_base_{content_rating}"
+
+ # Build ComfyUI workflow
+ workflow = self.workflow_builder.build(
+ template_name=workflow_template,
+ checkpoint=checkpoint,
+ positive_prompt=positive_prompt or "",
+ negative_prompt=negative_prompt or "",
+ loras=rendered_loras,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ sampler_name=sampler,
+ scheduler=scheduler,
+ width=width,
+ height=height,
+ filename_prefix=filename_prefix,
+ denoise=denoise,
+ reference_image=reference_image,
+ )
+
+ # Submit to ComfyUI and wait
+ logger.info("Submitting job %s to ComfyUI", job_id)
+ start_time = time.time()
+ result = await self.comfyui.generate(workflow)
+ generation_time = time.time() - start_time
+ logger.info(
+ "Job %s completed in %.1fs, %d images",
+ job_id,
+ generation_time,
+ len(result.images),
+ )
+
+ # Download and save each output image
+ image_id = None
+ for img_output in result.images:
+ image_bytes = await self.comfyui.download_image(img_output)
+
+ # Resolve output path
+ output_path = self.catalog.resolve_output_path(
+ character_id=character_id or "uncategorized",
+ content_rating=content_rating,
+ filename=img_output.filename,
+ )
+
+ # Save to disk
+ output_path.write_bytes(image_bytes)
+ logger.info("Saved image to %s", output_path)
+
+ # Record in catalog
+ image_id = await self.catalog.insert_image(
+ file_path=str(output_path),
+ image_bytes=image_bytes,
+ character_id=character_id,
+ template_id=template_id,
+ content_rating=content_rating,
+ batch_id=batch_id,
+ positive_prompt=positive_prompt,
+ negative_prompt=negative_prompt,
+ checkpoint=checkpoint,
+ loras=rendered_loras,
+ seed=seed,
+ steps=steps,
+ cfg=cfg,
+ sampler=sampler,
+ scheduler=scheduler,
+ width=width,
+ height=height,
+ generation_backend="local",
+ comfyui_prompt_id=result.prompt_id,
+ generation_time_seconds=generation_time,
+ variables=variables,
+ )
+
+ return image_id or ""