import json import uuid import urllib.request import urllib.parse import urllib.error import os import random import time import shutil import asyncio import requests import httpx from typing import List, Dict, Any, Optional from threading import Lock from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, UploadFile, File from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse, Response from pydantic import BaseModel from fastapi.middleware.cors import CORSMiddleware app = FastAPI() # 允许跨域 app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) # --- WebSocket 状态管理器 --- class ConnectionManager: def __init__(self): self.active_connections: List[WebSocket] = [] self.user_connections: Dict[str, WebSocket] = {} async def connect(self, websocket: WebSocket, client_id: str = None): await websocket.accept() self.active_connections.append(websocket) if client_id: self.user_connections[client_id] = websocket print(f"WS Connected. Total: {len(self.active_connections)}") await self.broadcast_count() async def disconnect(self, websocket: WebSocket, client_id: str = None): if websocket in self.active_connections: self.active_connections.remove(websocket) if client_id and client_id in self.user_connections: del self.user_connections[client_id] print(f"WS Disconnected. Total: {len(self.active_connections)}") await self.broadcast_count() async def send_personal_message(self, message: dict, client_id: str): if client_id in self.user_connections: try: await self.user_connections[client_id].send_text(json.dumps(message)) except Exception as e: print(f"WS Send Error ({client_id}): {e}") self.disconnect(self.user_connections[client_id], client_id) async def broadcast_count(self): count = len(self.active_connections) data = json.dumps({"type": "stats", "online_count": count}) print(f"Broadcasting online count: {count}") # 创建副本进行遍历,防止遍历时修改列表 for connection in self.active_connections[:]: try: await connection.send_text(data) except Exception as e: print(f"Broadcast error for client {id(connection)}: {e}") self.active_connections.remove(connection) async def broadcast_new_image(self, image_data: dict): """广播新生成的图片数据给所有客户端""" data = json.dumps({"type": "new_image", "data": image_data}) print(f"Broadcasting new image to {len(self.active_connections)} clients") for connection in self.active_connections[:]: try: await connection.send_text(data) except Exception as e: print(f"Broadcast image error for client {id(connection)}: {e}") self.active_connections.remove(connection) manager = ConnectionManager() # 全局事件循环引用 GLOBAL_LOOP = None @app.on_event("startup") async def startup_event(): global GLOBAL_LOOP GLOBAL_LOOP = asyncio.get_running_loop() @app.websocket("/ws/stats") async def websocket_endpoint(websocket: WebSocket, client_id: str = None): await manager.connect(websocket, client_id) try: while True: # 接收客户端心跳包 data = await websocket.receive_text() if data == "ping": await websocket.send_text(json.dumps({"type": "pong"})) except WebSocketDisconnect: print(f"WebSocket disconnected normally: {id(websocket)}") await manager.disconnect(websocket, client_id) except Exception as e: print(f"WS Error for {id(websocket)}: {e}") await manager.disconnect(websocket, client_id) # --- 配置区域 --- # 支持多卡负载均衡:配置多个 ComfyUI 地址 COMFYUI_INSTANCES = [ "127.0.0.1:8188", # 本机默认端口 "127.0.0.1:4090", # 显卡分流端口 ] # 保持向后兼容,默认使用第一个 COMFYUI_ADDRESS = COMFYUI_INSTANCES[0] CLIENT_ID = str(uuid.uuid4()) BASE_DIR = os.path.dirname(os.path.abspath(__file__)) WORKFLOW_DIR = os.path.join(BASE_DIR, "workflows") WORKFLOW_PATH = os.path.join(WORKFLOW_DIR, "Z-Image.json") STATIC_DIR = os.path.join(BASE_DIR, "static") OUTPUT_DIR = os.path.join(BASE_DIR, "output") HISTORY_FILE = os.path.join(BASE_DIR, "history.json") QUEUE = [] QUEUE_LOCK = Lock() HISTORY_LOCK = Lock() # 移除全局执行锁,允许并发以便分发到不同显卡 # EXECUTION_LOCK = Lock() NEXT_TASK_ID = 1 # 负载均衡:本地任务计数(解决 ComfyUI 队列更新延迟导致的竞态问题) BACKEND_LOCAL_LOAD = {addr: 0 for addr in COMFYUI_INSTANCES} LOAD_LOCK = Lock() # 确保必要的目录存在 os.makedirs(OUTPUT_DIR, exist_ok=True) os.makedirs(STATIC_DIR, exist_ok=True) os.makedirs(WORKFLOW_DIR, exist_ok=True) GLOBAL_CONFIG_FILE = os.path.join(BASE_DIR, "global_config.json") GLOBAL_CONFIG_LOCK = Lock() # 挂载静态文件 app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static") app.mount("/output", StaticFiles(directory=OUTPUT_DIR), name="output") class GenerateRequest(BaseModel): prompt: str = "" width: int = 1024 height: int = 1024 workflow_json: str = "Z-Image.json" params: Dict[str, Any] = {} type: str = "zimage" client_id: str = "" class CloudGenRequest(BaseModel): prompt: str api_key: str resolution: str = "1024x1024" client_id: Optional[str] = None type: str = "default" image_urls: List[str] = [] model: str = "" class DeleteHistoryRequest(BaseModel): timestamp: float # --- 负载均衡辅助功能 --- def get_best_backend(): """选择队列压力最小的后端""" best_backend = COMFYUI_INSTANCES[0] min_queue_size = float('inf') for addr in COMFYUI_INSTANCES: try: # 获取 ComfyUI 队列状态 with urllib.request.urlopen(f"http://{addr}/queue", timeout=1) as response: data = json.loads(response.read()) # 计算总任务数:运行中 + 等待中 remote_load = len(data.get('queue_running', [])) + len(data.get('queue_pending', [])) # 获取本地记录的负载(解决并发请求时的竞态条件) with LOAD_LOCK: local_load = BACKEND_LOCAL_LOAD.get(addr, 0) # 使用两者的最大值作为有效负载 # 这样既能感知外部提交的任务(remote),也能感知刚提交但未显示的内部任务(local) effective_load = max(remote_load, local_load) print(f"Backend {addr} load: {effective_load} (Remote: {remote_load}, Local: {local_load})") if effective_load < min_queue_size: min_queue_size = effective_load best_backend = addr except Exception as e: print(f"Backend {addr} unreachable: {e}") continue print(f"Selected backend: {best_backend}") return best_backend # --- 辅助功能 --- def download_image(comfy_address, comfy_url_path, prefix="studio_"): """将远程 ComfyUI 图片保存到本地并返回相对路径""" filename = f"{prefix}{uuid.uuid4().hex[:10]}.png" local_path = os.path.join(OUTPUT_DIR, filename) full_url = f"http://{comfy_address}{comfy_url_path}" try: with urllib.request.urlopen(full_url) as response, open(local_path, 'wb') as out_file: shutil.copyfileobj(response, out_file) return f"/output/{filename}" # 返回前端可用的路径 except Exception as e: print(f"下载图片失败: {e} (URL: {full_url})") # 如果下载失败,返回通过本服务代理的地址,而不是直接返回 ComfyUI 地址 # ComfyUI 地址 (127.0.0.1) 在外部无法访问 # 将 /view?xxx 替换为 /api/view?xxx if comfy_url_path.startswith("/view"): return comfy_url_path.replace("/view", "/api/view", 1) return full_url def save_to_history(record): """保存记录到 JSON 文件""" with HISTORY_LOCK: history = [] if os.path.exists(HISTORY_FILE): try: with open(HISTORY_FILE, 'r', encoding='utf-8') as f: history = json.load(f) except: pass # 确保时间戳是浮点数,方便排序 if "timestamp" not in record: record["timestamp"] = time.time() history.insert(0, record) # 限制总记录数,避免文件过大 with open(HISTORY_FILE, 'w', encoding='utf-8') as f: json.dump(history[:5000], f, ensure_ascii=False, indent=4) def get_comfy_history(comfy_address, prompt_id): try: with urllib.request.urlopen(f"http://{comfy_address}/history/{prompt_id}") as response: return json.loads(response.read()) except Exception as e: # print(f"获取 ComfyUI 历史失败: {e}") return {} # --- 接口路由 --- @app.get("/api/view") def view_image(filename: str, type: str = "input", subfolder: str = ""): try: # 默认尝试从第一个实例查看(通常输入图片上传后各处都有,或者我们只看第一个) # 如果是 output 图片,实际上 generate 接口已经搬运到本地了,不会走这个接口查看结果 # 这个接口主要用于查看上传的原图 url = f"http://{COMFYUI_INSTANCES[0]}/view" params = {"filename": filename, "type": type, "subfolder": subfolder} r = requests.get(url, params=params) return Response(content=r.content, media_type=r.headers.get('Content-Type')) except Exception as e: raise HTTPException(status_code=404, detail="Image not found") @app.post("/api/upload") async def upload_image(files: List[UploadFile] = File(...)): uploaded_files = [] # 只需要读取一次文件内容 files_content = [] for file in files: content = await file.read() files_content.append((file, content)) # 遍历所有后端实例进行上传 for file, content in files_content: success_count = 0 last_result = None for addr in COMFYUI_INSTANCES: try: # Prepare multipart upload for ComfyUI files_data = {'image': (file.filename, content, file.content_type)} # Upload to specific backend response = requests.post(f"http://{addr}/upload/image", files=files_data, timeout=5) if response.status_code == 200: last_result = response.json() success_count += 1 else: print(f"Upload to {addr} failed: {response.text}") except Exception as e: print(f"Upload error for {addr}: {e}") if success_count > 0 and last_result: uploaded_files.append({"comfy_name": last_result.get("name", file.filename)}) else: raise HTTPException(status_code=500, detail=f"Failed to upload to any backend") return {"files": uploaded_files} @app.get("/") async def index(): return FileResponse(os.path.join(STATIC_DIR, "index.html")) @app.get("/api/history") async def get_history_api(type: str = None): if os.path.exists(HISTORY_FILE): try: with open(HISTORY_FILE, 'r', encoding='utf-8') as f: data = json.load(f) # 过滤类型 if type: # 如果请求 zimage,同时返回 cloud 类型的记录 target_types = [type] if type == "zimage": target_types.append("cloud") data = [item for item in data if item.get("type", "zimage") in target_types] # 过滤无效数据(无图片) data = [item for item in data if item.get("images") and len(item["images"]) > 0] # 后端进行排序,确保顺序正确 # 处理兼容性:旧数据可能是字符串时间,新数据是浮点时间戳 def sort_key(item): ts = item.get("timestamp", 0) if isinstance(ts, (int, float)): return float(ts) return 0 # 旧数据排在最后 data.sort(key=sort_key, reverse=True) # 补充 is_cloud 字段:如果历史记录中没有标记,但文件名包含特征字符,则补充标记 for item in data: if "is_cloud" not in item and item.get("images"): # 检查是否有任意一张图片符合 cloud 特征 if any("cloud_angle" in img or "cloud_" in img for img in item["images"]): item["is_cloud"] = True return data except Exception as e: print(f"读取历史文件失败: {e}") return [] return [] @app.get("/api/queue_status") async def get_queue_status(client_id: str): with QUEUE_LOCK: total = len(QUEUE) positions = [i + 1 for i, t in enumerate(QUEUE) if t["client_id"] == client_id] position = positions[0] if positions else 0 return {"total": total, "position": position} @app.post("/api/history/delete") async def delete_history(req: DeleteHistoryRequest): if not os.path.exists(HISTORY_FILE): return {"success": False, "message": "History file not found"} try: with HISTORY_LOCK: with open(HISTORY_FILE, 'r', encoding='utf-8') as f: history = json.load(f) # Find and remove target_record = None new_history = [] for item in history: is_match = False item_ts = item.get("timestamp", 0) # 尝试数字匹配 if isinstance(req.timestamp, (int, float)) and isinstance(item_ts, (int, float)): if abs(float(item_ts) - float(req.timestamp)) < 0.001: is_match = True # 尝试字符串匹配 elif str(item_ts) == str(req.timestamp): is_match = True if is_match: target_record = item else: new_history.append(item) if target_record: # Save history first (atomic-like) with open(HISTORY_FILE, 'w', encoding='utf-8') as f: json.dump(new_history, f, ensure_ascii=False, indent=4) # Delete files outside lock (IO operation) if target_record: for img_url in target_record.get("images", []): # img_url is like "/output/filename.png" if img_url.startswith("/output/"): filename = img_url.split("/")[-1] file_path = os.path.join(OUTPUT_DIR, filename) if os.path.exists(file_path): try: os.remove(file_path) except Exception as e: print(f"Failed to delete file {file_path}: {e}") return {"success": True} else: return {"success": False, "message": "Record not found"} except Exception as e: print(f"Delete history error: {e}") return {"success": False, "message": str(e)} class TokenRequest(BaseModel): token: str @app.get("/api/config/token") async def get_global_token(): if os.path.exists(GLOBAL_CONFIG_FILE): try: with open(GLOBAL_CONFIG_FILE, 'r', encoding='utf-8') as f: config = json.load(f) return {"token": config.get("modelscope_token", "")} except: return {"token": ""} return {"token": ""} @app.post("/api/config/token") async def set_global_token(req: TokenRequest): with GLOBAL_CONFIG_LOCK: config = {} if os.path.exists(GLOBAL_CONFIG_FILE): try: with open(GLOBAL_CONFIG_FILE, 'r', encoding='utf-8') as f: config = json.load(f) except: pass config["modelscope_token"] = req.token.strip() with open(GLOBAL_CONFIG_FILE, 'w', encoding='utf-8') as f: json.dump(config, f, indent=4) return {"success": True} @app.delete("/api/config/token") async def delete_global_token(): with GLOBAL_CONFIG_LOCK: if os.path.exists(GLOBAL_CONFIG_FILE): try: config = {} with open(GLOBAL_CONFIG_FILE, 'r', encoding='utf-8') as f: config = json.load(f) if "modelscope_token" in config: del config["modelscope_token"] with open(GLOBAL_CONFIG_FILE, 'w', encoding='utf-8') as f: json.dump(config, f, indent=4) except: pass return {"success": True} class CloudPollRequest(BaseModel): task_id: str api_key: str client_id: Optional[str] = None @app.post("/api/angle/poll_status") async def poll_angle_cloud(req: CloudPollRequest): """ Resume polling for an existing Angle task. """ base_url = 'https://api-inference.modelscope.cn/' clean_token = req.api_key.strip() headers = { "Authorization": f"Bearer {clean_token}", "Content-Type": "application/json", "X-ModelScope-Async-Mode": "true" } task_id = req.task_id print(f"Resuming polling for Angle Task: {task_id}") try: async with httpx.AsyncClient(timeout=30) as client: # Poll Status (Another 300 retries) for i in range(300): await asyncio.sleep(2) try: result = await client.get( f"{base_url}v1/tasks/{task_id}", headers={**headers, "X-ModelScope-Task-Type": "image_generation"}, ) data = result.json() status = data.get("task_status") if status == "SUCCEED": img_url = data["output_images"][0] print(f"Angle Task SUCCEED: {img_url}") if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "SUCCEED", "task_id": task_id }, req.client_id) # Download logic local_path = "" try: async with httpx.AsyncClient() as dl_client: img_res = await dl_client.get(img_url) if img_res.status_code == 200: filename = f"cloud_angle_{int(time.time())}.png" file_path = os.path.join(OUTPUT_DIR, filename) with open(file_path, "wb") as f: f.write(img_res.content) local_path = f"/output/{filename}" else: local_path = img_url except Exception: local_path = img_url record = { "timestamp": time.time(), "prompt": f"Resumed {task_id}", "images": [local_path], "type": "angle" } save_to_history(record) return {"url": local_path} elif status == "FAILED": if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "FAILED", "task_id": task_id }, req.client_id) raise Exception(f"ModelScope task failed: {data}") if i % 5 == 0: print(f"Angle Task {task_id} status: {status} ({i}/150)") if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": f"{status} ({i}/150)", "task_id": task_id, "progress": i, "total": 150 }, req.client_id) except Exception as loop_e: print(f"Angle polling error: {loop_e}") continue print(f"Angle Task Timeout Again: {task_id}") if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "TIMEOUT", "task_id": task_id }, req.client_id) return {"status": "timeout", "task_id": task_id, "message": "Task still pending"} except Exception as e: print(f"Angle polling error: {e}") raise HTTPException(status_code=400, detail=str(e)) @app.post("/api/angle/generate") async def generate_angle_cloud(req: CloudGenRequest): """ Dedicated endpoint for Angle/Qwen-Image-Edit tasks. Logic mirrors test/main.py but uses async httpx. """ base_url = 'https://api-inference.modelscope.cn/' clean_token = req.api_key.strip() headers = { "Authorization": f"Bearer {clean_token}", "Content-Type": "application/json", "X-ModelScope-Async-Mode": "true" } # Prepare payload exactly as in test/main.py # test/main.py: "image_url": [data_uri] # req.image_urls is already a list of strings payload = { "model": "Qwen/Qwen-Image-Edit-2511", "prompt": req.prompt.strip(), "image_url": req.image_urls } print(f"Angle Cloud Request: {payload['model']}, Prompt: {payload['prompt'][:20]}...") try: async with httpx.AsyncClient(timeout=30) as client: # 1. Submit Task submit_res = await client.post( f"{base_url}v1/images/generations", headers=headers, json=payload # httpx handles json serialization ) if submit_res.status_code != 200: try: detail = submit_res.json() except: detail = submit_res.text print(f"Angle Submit Error: {detail}") raise HTTPException(status_code=submit_res.status_code, detail=detail) task_id = submit_res.json().get("task_id") print(f"Angle Task Submitted, ID: {task_id}") # Notify frontend via WS if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "SUBMITTED", "task_id": task_id, "progress": 0, "total": 150 }, req.client_id) # 2. Poll Status (300 retries * 2s = 600s / 10min) for i in range(300): await asyncio.sleep(2) try: result = await client.get( f"{base_url}v1/tasks/{task_id}", headers={**headers, "X-ModelScope-Task-Type": "image_generation"}, ) data = result.json() status = data.get("task_status") if status == "SUCCEED": img_url = data["output_images"][0] print(f"Angle Task SUCCEED: {img_url}") # Notify WS success if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "SUCCEED", "task_id": task_id }, req.client_id) # Download and Save Logic (reused from original generate) local_path = "" try: # 异步下载 async with httpx.AsyncClient() as dl_client: img_res = await dl_client.get(img_url) if img_res.status_code == 200: filename = f"cloud_angle_{int(time.time())}.png" file_path = os.path.join(OUTPUT_DIR, filename) with open(file_path, "wb") as f: f.write(img_res.content) local_path = f"/output/{filename}" print(f"Angle Image saved: {local_path}") else: local_path = img_url except Exception as dl_e: print(f"Download error: {dl_e}") local_path = img_url # Save history record = { "timestamp": time.time(), "prompt": req.prompt, "images": [local_path], "type": "angle", # Distinct type "is_cloud": True } save_to_history(record) return {"url": local_path} elif status == "FAILED": if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "FAILED", "task_id": task_id }, req.client_id) raise Exception(f"ModelScope task failed: {data}") # Log polling status every 5 times (10 seconds) if i % 5 == 0: print(f"Angle Task {task_id} status: {status} ({i}/150)") if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": f"{status} ({i}/150)", "task_id": task_id, "progress": i, "total": 150 }, req.client_id) except Exception as loop_e: # Log polling errors print(f"Angle polling error (retrying): {loop_e}") continue # Timeout Handling print(f"Angle Task Timeout: {task_id}") if req.client_id: await manager.send_personal_message({ "type": "cloud_status", "status": "TIMEOUT", "task_id": task_id }, req.client_id) # Instead of raising Exception, return special status return {"status": "timeout", "task_id": task_id, "message": "Task still pending"} except Exception as e: print(f"Angle generation error: {e}") raise HTTPException(status_code=400, detail=str(e)) @app.post("/generate") async def generate_cloud(req: CloudGenRequest): base_url = 'https://api-inference.modelscope.cn/' clean_token = req.api_key.strip() headers = { "Authorization": f"Bearer {clean_token}", "Content-Type": "application/json", } # 按照官方 Z-Image 标准版参数 # if req.type == "angle" or req.model == "Qwen/Qwen-Image-Edit-2511": # # Deprecated: Angle logic moved to /api/angle/generate # pass # 默认 Z-Image Turbo 模式 payload = { "model": "Tongyi-MAI/Z-Image-Turbo", "prompt": req.prompt.strip(), "size": req.resolution, "n": 1 } try: async with httpx.AsyncClient(timeout=30) as client: # A. 提交异步任务 print(f"Submitting ModelScope task for prompt: {req.prompt[:20]}...") # Use json parameter to ensure standard serialization (matching requests behavior) # This handles Content-Type and ensure_ascii default behavior submit_res = await client.post( f"{base_url}v1/images/generations", headers={**headers, "X-ModelScope-Async-Mode": "true"}, json=payload ) if submit_res.status_code != 200: # 尝试解析错误详情 try: detail = submit_res.json() except: detail = submit_res.text print(f"ModelScope Submit Error: {detail}") raise HTTPException(status_code=submit_res.status_code, detail=detail) task_id = submit_res.json().get("task_id") print(f"Task submitted, ID: {task_id}") # B. 轮询任务状态 # 增加到 200 次轮询 * 3秒 = 600秒 (10分钟) 超时 for i in range(200): await asyncio.sleep(3) try: result = await client.get( f"{base_url}v1/tasks/{task_id}", headers={**headers, "X-ModelScope-Task-Type": "image_generation"}, ) data = result.json() status = data.get("task_status") if i % 5 == 0: print(f"Task {task_id} status check {i}: {status}") if status == "SUCCEED": img_url = data["output_images"][0] print(f"Task {task_id} SUCCEED: {img_url}") # 下载保存到本地 output local_path = "" try: # 异步下载 async with httpx.AsyncClient() as dl_client: img_res = await dl_client.get(img_url) if img_res.status_code == 200: filename = f"cloud_{int(time.time())}.png" file_path = os.path.join(OUTPUT_DIR, filename) with open(file_path, "wb") as f: f.write(img_res.content) local_path = f"/output/{filename}" print(f"Image saved locally: {local_path}") else: print(f"Failed to download image: {img_res.status_code}") local_path = img_url # Fallback to remote URL except Exception as dl_e: print(f"Download error: {dl_e}") local_path = img_url # Fallback # 保存到本地历史 (使用本地路径以便前端更好加载,或者仍然存 remote 但 download 不 work?) # 需求是 output 目录保存,前端展示通常优先用本地快 record = { "timestamp": time.time(), "prompt": req.prompt, "images": [local_path], # 使用本地路径 "type": "cloud" } save_to_history(record) # 广播新图片 try: await manager.broadcast_new_image(record) except Exception as e: print(f"Broadcast error: {e}") return {"url": local_path} elif status == "FAILED": raise Exception(f"ModelScope task failed: {data}") except Exception as loop_e: print(f"Polling error (retrying): {loop_e}") continue raise Exception("Cloud generation timeout (180s)") except Exception as e: print(f"Cloud generation error: {e}") raise HTTPException(status_code=400, detail=str(e)) @app.post("/api/generate") def generate(req: GenerateRequest): global NEXT_TASK_ID # 1. 入队 current_task = None target_backend = None with QUEUE_LOCK: task_id = NEXT_TASK_ID NEXT_TASK_ID += 1 current_task = {"task_id": task_id, "client_id": req.client_id} QUEUE.append(current_task) try: # 2. 负载均衡:选择最佳后端(移除 EXECUTION_LOCK 全局锁以支持并发) target_backend = get_best_backend() # 增加本地负载计数 with LOAD_LOCK: BACKEND_LOCAL_LOAD[target_backend] += 1 # 3. 开始执行任务 workflow_path = os.path.join(WORKFLOW_DIR, req.workflow_json) # 兼容性处理:如果文件不存在且是默认值,尝试使用 WORKFLOW_PATH if not os.path.exists(workflow_path) and req.workflow_json == "Z-Image.json": workflow_path = WORKFLOW_PATH if not os.path.exists(workflow_path): raise Exception(f"Workflow file not found: {req.workflow_json}") with open(workflow_path, 'r', encoding='utf-8') as f: workflow = json.load(f) seed = random.randint(1, 10**15) # 参数注入 # 基础参数兼容 (针对 Z-Image.json) if "23" in workflow and req.prompt: workflow["23"]["inputs"]["text"] = req.prompt if "144" in workflow: workflow["144"]["inputs"]["width"] = req.width workflow["144"]["inputs"]["height"] = req.height if "22" in workflow: workflow["22"]["inputs"]["seed"] = seed # 兼容 Flux2-Klein 工作流 if "158" in workflow: workflow["158"]["inputs"]["noise_seed"] = seed for node_id in ["146", "181"]: if node_id in workflow and "inputs" in workflow[node_id] and "seed" in workflow[node_id]["inputs"]: workflow[node_id]["inputs"]["seed"] = seed if "184" in workflow and "inputs" in workflow["184"] and "seed" in workflow["184"]["inputs"]: workflow["184"]["inputs"]["seed"] = seed if "172" in workflow and "inputs" in workflow["172"] and "seed" in workflow["172"]["inputs"]: # SeedVR2VideoUpscaler 限制 seed 最大为 2^32 - 1 workflow["172"]["inputs"]["seed"] = seed % 4294967295 if "14" in workflow and "inputs" in workflow["14"] and "seed" in workflow["14"]["inputs"]: workflow["14"]["inputs"]["seed"] = seed # 动态参数注入 (支持所有工作流) for node_id, node_inputs in req.params.items(): if node_id in workflow: if "inputs" not in workflow[node_id]: workflow[node_id]["inputs"] = {} for input_name, value in node_inputs.items(): workflow[node_id]["inputs"][input_name] = value # 提交任务 p = {"prompt": workflow, "client_id": CLIENT_ID} data = json.dumps(p).encode('utf-8') try: post_req = urllib.request.Request(f"http://{target_backend}/prompt", data=data) prompt_id = json.loads(urllib.request.urlopen(post_req, timeout=10).read())['prompt_id'] except urllib.error.HTTPError as e: error_body = e.read().decode('utf-8') print(f"ComfyUI API Error ({e.code}): {error_body}") raise Exception(f"HTTP Error {e.code}: {error_body}") except Exception as e: raise e # 轮询结果 history_data = None for i in range(300): # 最多等待300秒 (5分钟) try: res = get_comfy_history(target_backend, prompt_id) if prompt_id in res: history_data = res[prompt_id] break except Exception as e: pass time.sleep(1) if not history_data: raise Exception("ComfyUI 渲染超时") # 处理图片:下载到本地 local_urls = [] current_timestamp = time.time() if 'outputs' in history_data: for node_id in history_data['outputs']: node_output = history_data['outputs'][node_id] if 'images' in node_output: for img in node_output['images']: comfy_url_path = f"/view?filename={img['filename']}&subfolder={img['subfolder']}&type={img['type']}" # 搬家:从 ComfyUI 下载到我们的 output # 增加文件名标识 prefix = f"{req.type}_{int(current_timestamp)}_" local_path = download_image(target_backend, comfy_url_path, prefix=prefix) local_urls.append(local_path) # 存储并返回 result = { "prompt": req.prompt if req.prompt else "Detail Enhance", # 默认标题 "images": local_urls, "seed": seed, "timestamp": current_timestamp, "type": req.type, # 存储类型 "params": req.params # 存储参数以支持“做同款” } save_to_history(result) # 广播新图片 if GLOBAL_LOOP: asyncio.run_coroutine_threadsafe(manager.broadcast_new_image(result), GLOBAL_LOOP) return result except Exception as e: return {"images": [], "error": str(e)} finally: # 减少本地负载计数 if target_backend: with LOAD_LOCK: if BACKEND_LOCAL_LOAD.get(target_backend, 0) > 0: BACKEND_LOCAL_LOAD[target_backend] -= 1 # 任务结束(无论成功失败),移除队列 if current_task: with QUEUE_LOCK: if current_task in QUEUE: QUEUE.remove(current_task) if __name__ == "__main__": import uvicorn # 强制单进程模式确保 WebSocket 计数准确 uvicorn.run(app, host="0.0.0.0", port=7860)