Spaces:
Running
Running
File size: 2,952 Bytes
ed37502 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | """Pydantic request/response schemas for the API."""
from __future__ import annotations
from datetime import datetime
from pydantic import BaseModel, Field
# --- Request schemas ---
class LoRASpec(BaseModel):
name: str
strength_model: float = 0.85
strength_clip: float = 0.85
class GenerationRequest(BaseModel):
"""Single image generation request."""
character_id: str | None = None
template_id: str | None = None
content_rating: str = "sfw" # sfw | nsfw
# Direct prompt override (if not using template)
positive_prompt: str | None = None
negative_prompt: str | None = None
# Model configuration
checkpoint: str | None = None
loras: list[LoRASpec] = Field(default_factory=list)
# Sampler settings
seed: int | None = None
steps: int | None = None
cfg: float | None = None
sampler: str | None = None
scheduler: str | None = None
width: int | None = None
height: int | None = None
# Variation variables (for template rendering)
variables: dict[str, str] = Field(default_factory=dict)
class BatchRequest(BaseModel):
"""Batch generation request."""
character_id: str
template_id: str
content_rating: str = "sfw"
count: int = 10
variation_mode: str = "random" # curated | random | exhaustive
pin: dict[str, str] = Field(default_factory=dict)
seed_strategy: str = "random" # random | sequential | fixed
# --- Response schemas ---
class GenerationResponse(BaseModel):
job_id: str
batch_id: str | None = None
status: str
backend: str | None = None
class JobStatus(BaseModel):
job_id: str
batch_id: str | None = None
status: str # pending | queued | running | completed | failed
backend: str | None = None
progress: float | None = None # 0.0 - 1.0
result_image_id: str | None = None
error_message: str | None = None
created_at: datetime | None = None
started_at: datetime | None = None
completed_at: datetime | None = None
class ImageResponse(BaseModel):
id: str
character_id: str | None = None
template_id: str | None = None
content_rating: str
file_path: str
seed: int | None = None
pose: str | None = None
outfit: str | None = None
emotion: str | None = None
camera_angle: str | None = None
lighting: str | None = None
scene: str | None = None
quality_score: float | None = None
is_approved: bool
is_published: bool
created_at: datetime | None = None
class SystemStatus(BaseModel):
comfyui_connected: bool
gpu_name: str | None = None
vram_total_gb: float | None = None
vram_free_gb: float | None = None
local_queue_depth: int = 0
cloud_available: bool = False
total_images: int = 0
pending_jobs: int = 0
class BatchStatusResponse(BaseModel):
batch_id: str
total_jobs: int
completed: int
failed: int
pending: int
running: int
|