claude-mat commited on
Commit
65840d6
·
verified ·
1 Parent(s): 328282b

Upload 12 files

Browse files
.dockerignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore Python cache files
2
+ __pycache__/
3
+ *.pyc
4
+
5
+ # Ignore user-generated data
6
+ data/outputs/
7
+ data/user_config.json
8
+ data/presets.json
9
+ data/workflows/*.json
10
+ !data/workflows/workflow_template.json
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.11-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file into the container at /app
8
+ COPY requirements.txt .
9
+
10
+ # Install any needed packages specified in requirements.txt
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Copy the rest of the application's code into the container at /app
14
+ COPY . .
15
+
16
+ # Make port 7860 available to the world outside this container
17
+ EXPOSE 7860
18
+
19
+ # Define environment variable for data directory (optional, can be set at runtime)
20
+ ENV WEBUI_DATA_DIR=/data
21
+
22
+ # Create the data directory
23
+ RUN mkdir -p /data
24
+
25
+ # Run webui.py when the container launches
26
+ # Use the --host 0.0.0.0 flag to make it accessible outside the container
27
+ CMD ["python", "webui.py", "--host", "0.0.0.0"]
README.md CHANGED
@@ -1,11 +1,36 @@
1
- ---
2
- title: Webui
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ComfyUI Web Interface
3
+ emoji: 🚀
4
+ colorFrom: blue
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 4.31.0
8
+ app_file: webui.py
9
+ pinned: false
10
+ ---
11
+
12
+ # ComfyUI Web Interface
13
+
14
+ This is a user-friendly web interface for ComfyUI, designed to be easily deployed as a Hugging Face Space or run locally.
15
+
16
+ ## Features
17
+
18
+ - **Intuitive UI**: A Gradio-based interface that simplifies the ComfyUI workflow.
19
+ - **Style Presets**: Save and load positive/negative prompt prefixes.
20
+ - **Workflow Management**: Edit, save, and switch between different workflow templates.
21
+ - **OpenAI-compatible API**: Exposes an API endpoint compatible with OpenAI's Chat Completions format for image generation.
22
+ - **Scheduler**: Keep remote servers alive with scheduled generation tasks.
23
+ - **History**: View and manage previously generated images.
24
+
25
+ ## How to Use
26
+
27
+ 1. **Generator Tab**: Configure your generation settings, including model, sampler, dimensions, and prompts.
28
+ 2. **Settings Tab**: Manage workflow templates and user preferences.
29
+ 3. **OpenAI API Tab**: Start a local server that mimics the OpenAI API for programmatic image generation.
30
+ 4. **History Tab**: Browse, preview, and delete your generated images.
31
+
32
+ ## Configuration
33
+
34
+ - The application saves all user data (configs, workflows, outputs) to the `data/` directory.
35
+ - You can configure the data directory by setting the `WEBUI_DATA_DIR` environment variable.
36
+ - You can configure the autosave interval in the Settings tab.
__pycache__/comfyui_client.cpython-311.pyc ADDED
Binary file (7.06 kB). View file
 
__pycache__/webui.cpython-311.pyc ADDED
Binary file (59.3 kB). View file
 
data/outputs/zhanwei ADDED
File without changes
data/presets.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "None": {
3
+ "positive": "",
4
+ "negative": ""
5
+ },
6
+ "\u2728 \u63a8\u8350\u98ce\u683c": {
7
+ "positive": "best quality, very aesthetic, highres, absurdres, sensitive",
8
+ "negative": "lowres, (bad), bad feet, text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, artistic error, username, scan, [abstract], english text, shiny_skin"
9
+ },
10
+ "\ud83c\udfa8 \u52a8\u6f2b\u98ce\u683c": {
11
+ "positive": "masterpiece, best quality, anime, 1girl, beautiful detailed eyes, detailed face",
12
+ "negative": "photorealistic, 3d, extra limbs, bad anatomy, ugly, deformed"
13
+ },
14
+ "\ud83d\udcf8 \u5199\u5b9e\u98ce\u683c": {
15
+ "positive": "photorealistic, high quality, detailed, professional photography",
16
+ "negative": "anime, cartoon, drawing, painting, sketch"
17
+ }
18
+ }
data/user_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "server_address": "https://15ibfgkubo-8188.cnb.run",
3
+ "model": "il/WAI_NSFW-illustrious-SDXL_v15.safetensors",
4
+ "sampler": "euler",
5
+ "scheduler": "simple",
6
+ "steps": 30,
7
+ "cfg": 6.0,
8
+ "width": 768,
9
+ "height": 1280,
10
+ "batch_size": 1,
11
+ "batch_count": 1,
12
+ "seed": 757831338432565,
13
+ "after_generate": "randomize",
14
+ "positive_prefix": "",
15
+ "negative_prefix": "",
16
+ "positive_prompt": "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master\uff0c",
17
+ "negative_prompt": "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,",
18
+ "preset_name": "None",
19
+ "current_workflow": "workflow_template",
20
+ "language": "zh",
21
+ "config_save_interval": 20,
22
+ "api_return": "url",
23
+ "api_n": 1,
24
+ "api_server_address": "",
25
+ "api_model": "",
26
+ "api_sampler": "",
27
+ "api_scheduler": "",
28
+ "api_steps": 30,
29
+ "api_cfg": 6.0,
30
+ "api_width": 768,
31
+ "api_height": 1280,
32
+ "api_seed": 757831338432565,
33
+ "api_after_generate": "randomize",
34
+ "api_positive_prefix": "",
35
+ "api_negative_prefix": "",
36
+ "api_workflow": "workflow_template"
37
+ }
data/workflows/workflow_template.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "3": {
3
+ "inputs": {
4
+ "seed": "%seed%",
5
+ "steps": "%steps%",
6
+ "cfg": "%cfg%",
7
+ "sampler_name": "%sampler%",
8
+ "scheduler": "%scheduler%",
9
+ "denoise": 1,
10
+ "model": [
11
+ "4",
12
+ 0
13
+ ],
14
+ "positive": [
15
+ "6",
16
+ 0
17
+ ],
18
+ "negative": [
19
+ "7",
20
+ 0
21
+ ],
22
+ "latent_image": [
23
+ "5",
24
+ 0
25
+ ]
26
+ },
27
+ "class_type": "KSampler",
28
+ "_meta": {
29
+ "title": "KSampler"
30
+ }
31
+ },
32
+ "4": {
33
+ "inputs": {
34
+ "ckpt_name": "%model%"
35
+ },
36
+ "class_type": "CheckpointLoaderSimple",
37
+ "_meta": {
38
+ "title": "Load Checkpoint"
39
+ }
40
+ },
41
+ "5": {
42
+ "inputs": {
43
+ "width": "%width%",
44
+ "height": "%height%",
45
+ "batch_size": "%batch_size%"
46
+ },
47
+ "class_type": "EmptyLatentImage",
48
+ "_meta": {
49
+ "title": "Empty Latent Image"
50
+ }
51
+ },
52
+ "6": {
53
+ "inputs": {
54
+ "text": "%prompt%",
55
+ "speak_and_recognation": {
56
+ "__value__": [
57
+ false,
58
+ true
59
+ ]
60
+ },
61
+ "clip": [
62
+ "4",
63
+ 1
64
+ ]
65
+ },
66
+ "class_type": "CLIPTextEncode",
67
+ "_meta": {
68
+ "title": "CLIP Text Encode (Prompt)"
69
+ }
70
+ },
71
+ "7": {
72
+ "inputs": {
73
+ "text": "%negative_prompt%",
74
+ "speak_and_recognation": {
75
+ "__value__": [
76
+ false,
77
+ true
78
+ ]
79
+ },
80
+ "clip": [
81
+ "4",
82
+ 1
83
+ ]
84
+ },
85
+ "class_type": "CLIPTextEncode",
86
+ "_meta": {
87
+ "title": "CLIP Text Encode (Prompt)"
88
+ }
89
+ },
90
+ "8": {
91
+ "inputs": {
92
+ "samples": [
93
+ "3",
94
+ 0
95
+ ],
96
+ "vae": [
97
+ "4",
98
+ 2
99
+ ]
100
+ },
101
+ "class_type": "VAEDecode",
102
+ "_meta": {
103
+ "title": "VAE Decode"
104
+ }
105
+ },
106
+ "10": {
107
+ "inputs": {
108
+ "filename_prefix": "ComfyUI",
109
+ "images": [
110
+ "8",
111
+ 0
112
+ ]
113
+ },
114
+ "class_type": "SaveImage",
115
+ "_meta": {
116
+ "title": "Save Image"
117
+ }
118
+ }
119
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ gradio
2
+ websocket-client
3
+ Pillow
4
+ fastapi
5
+ uvicorn
user_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "server_address": "https://15ibfgkubo-8188.cnb.run",
3
+ "model": "il/WAI_NSFW-illustrious-SDXL_v15.safetensors",
4
+ "sampler": "euler",
5
+ "scheduler": "simple",
6
+ "steps": 30,
7
+ "cfg": 6.0,
8
+ "width": 768,
9
+ "height": 1280,
10
+ "batch_size": 1,
11
+ "batch_count": 1,
12
+ "seed": 757831338432565,
13
+ "after_generate": "randomize",
14
+ "positive_prefix": "",
15
+ "negative_prefix": "",
16
+ "positive_prompt": "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master\uff0c",
17
+ "negative_prompt": "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,",
18
+ "preset_name": "None"
19
+ }
webui.py ADDED
@@ -0,0 +1,1227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import websocket
3
+ import uuid
4
+ import json
5
+ import urllib.request
6
+ import urllib.parse
7
+ from PIL import Image
8
+ import io
9
+ import os
10
+ import random
11
+ import time
12
+ import threading
13
+ import base64
14
+
15
+ try:
16
+ from fastapi import FastAPI, Request
17
+ from fastapi.responses import JSONResponse, FileResponse
18
+ import uvicorn
19
+ except Exception:
20
+ FastAPI = None
21
+ uvicorn = None
22
+
23
+ # --- Constants and Setup ---
24
+ BASE_DIR = os.path.dirname(__file__)
25
+ # Allow overriding data directory via environment variable WEBUI_DATA_DIR
26
+ DATA_DIR = os.path.abspath(os.getenv('WEBUI_DATA_DIR', os.path.join(BASE_DIR, 'data')))
27
+ WORKFLOWS_DIR = os.path.join(DATA_DIR, 'workflows')
28
+ OUTPUT_DIR = os.path.join(DATA_DIR, 'outputs')
29
+ PRESETS_FILE = os.path.join(DATA_DIR, 'presets.json')
30
+ USER_CONFIG_FILE = os.path.join(DATA_DIR, 'user_config.json')
31
+ os.makedirs(OUTPUT_DIR, exist_ok=True)
32
+ os.makedirs(WORKFLOWS_DIR, exist_ok=True)
33
+ SCHEDULER_STOP = False
34
+
35
+ # --- Auto-save Config Manager (20s interval, debounced) ---
36
+ CONFIG_SAVE_INTERVAL = int(os.getenv('WEBUI_CONFIG_INTERVAL', '20')) # seconds
37
+ _pending_config = {}
38
+ _config_lock = threading.Lock()
39
+ _config_saver_thread = None
40
+ _config_changed = False
41
+
42
+ def start_config_saver():
43
+ """Start the background config saver thread."""
44
+ global _config_saver_thread
45
+ if _config_saver_thread is None or not _config_saver_thread.is_alive():
46
+ _config_saver_thread = threading.Thread(target=_config_saver_loop, daemon=True)
47
+ _config_saver_thread.start()
48
+
49
+ def _flush_pending_config():
50
+ """Flush any pending config changes immediately."""
51
+ global _config_changed
52
+ with _config_lock:
53
+ if _config_changed and _pending_config:
54
+ try:
55
+ config = load_user_config()
56
+ config.update(_pending_config)
57
+ save_user_config(config)
58
+ print(f"[Auto-save] Configuration saved at {time.strftime('%H:%M:%S')}")
59
+ except Exception as e:
60
+ print(f"[Auto-save] Error saving config: {e}")
61
+ finally:
62
+ _pending_config.clear()
63
+ _config_changed = False
64
+
65
+ def _config_saver_loop():
66
+ """Background thread that saves config every CONFIG_SAVE_INTERVAL if changed."""
67
+ while True:
68
+ time.sleep(CONFIG_SAVE_INTERVAL)
69
+ _flush_pending_config()
70
+
71
+ def set_config_save_interval(seconds: int):
72
+ """Update autosave interval at runtime (min 5s)."""
73
+ global CONFIG_SAVE_INTERVAL
74
+ try:
75
+ seconds = int(seconds)
76
+ if seconds < 5:
77
+ seconds = 5
78
+ except Exception:
79
+ seconds = 20
80
+ CONFIG_SAVE_INTERVAL = seconds
81
+ queue_config_update(config_save_interval=seconds)
82
+ return seconds
83
+
84
+ def queue_config_update(**kwargs):
85
+ """Queue config updates to be saved in the next interval."""
86
+ global _config_changed
87
+ with _config_lock:
88
+ _pending_config.update(kwargs)
89
+ _config_changed = True
90
+
91
+ # --- Preset Management Functions ---
92
+ def load_presets():
93
+ """Loads presets from the JSON file. If not found, creates it with defaults."""
94
+ default_presets = {
95
+ "None": {"positive": "", "negative": ""},
96
+ "✨ 推荐风格": {
97
+ "positive": "best quality, very aesthetic, highres, absurdres, sensitive",
98
+ "negative": "lowres, (bad), bad feet, text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, artistic error, username, scan, [abstract], english text, shiny_skin"
99
+ },
100
+ "🎨 动漫风格": {
101
+ "positive": "masterpiece, best quality, anime, 1girl, beautiful detailed eyes, detailed face",
102
+ "negative": "photorealistic, 3d, extra limbs, bad anatomy, ugly, deformed"
103
+ },
104
+ "📸 写实风格": {
105
+ "positive": "photorealistic, high quality, detailed, professional photography",
106
+ "negative": "anime, cartoon, drawing, painting, sketch"
107
+ }
108
+ }
109
+
110
+ if not os.path.exists(PRESETS_FILE):
111
+ with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
112
+ json.dump(default_presets, f, indent=4)
113
+ return default_presets
114
+ else:
115
+ try:
116
+ with open(PRESETS_FILE, 'r', encoding='utf-8') as f:
117
+ presets = json.load(f)
118
+ if "None" not in presets:
119
+ presets["None"] = {"positive": "", "negative": ""}
120
+ return presets
121
+ except (json.JSONDecodeError, IOError):
122
+ with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
123
+ json.dump(default_presets, f, indent=4)
124
+ return default_presets
125
+
126
+ def save_presets(presets_dict):
127
+ """Saves the given dictionary to the presets JSON file."""
128
+ with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
129
+ json.dump(presets_dict, f, indent=4)
130
+
131
+ def combine_prompts(prefix, main_prompt):
132
+ """Combines prefix and main prompt intelligently."""
133
+ if prefix and main_prompt:
134
+ return f"{prefix.strip()}, {main_prompt.strip()}"
135
+ elif prefix:
136
+ return prefix.strip()
137
+ elif main_prompt:
138
+ return main_prompt.strip()
139
+ return ""
140
+
141
+ def select_preset(preset_name):
142
+ """Selects a preset and returns its values."""
143
+ preset_data = GLOBAL_PRESETS.get(preset_name, {"positive": "", "negative": ""})
144
+ return preset_name, preset_data["positive"], preset_data["negative"]
145
+
146
+ def save_or_update_preset(preset_name, positive_prefix, negative_prefix):
147
+ """Saves or updates a preset."""
148
+ if not preset_name or not preset_name.strip():
149
+ return gr.update(), "Preset name cannot be empty."
150
+
151
+ preset_name = preset_name.strip()
152
+ GLOBAL_PRESETS[preset_name] = {"positive": positive_prefix, "negative": negative_prefix}
153
+ save_presets(GLOBAL_PRESETS)
154
+ return gr.update(choices=list(GLOBAL_PRESETS.keys()), value=preset_name), f"Preset '{preset_name}' saved."
155
+
156
+ def delete_preset(preset_name):
157
+ """Deletes a preset."""
158
+ if not preset_name or preset_name.strip() in ["", "None"]:
159
+ return gr.update(), gr.update(), gr.update(), gr.update(), "Cannot delete this preset."
160
+
161
+ preset_name = preset_name.strip()
162
+ if preset_name in GLOBAL_PRESETS:
163
+ del GLOBAL_PRESETS[preset_name]
164
+ save_presets(GLOBAL_PRESETS)
165
+ return (gr.update(choices=list(GLOBAL_PRESETS.keys()), value="None"),
166
+ "None", "", "", f"Preset '{preset_name}' deleted.")
167
+ return gr.update(), gr.update(), gr.update(), gr.update(), f"Preset '{preset_name}' not found."
168
+
169
+ # Load presets globally
170
+ GLOBAL_PRESETS = load_presets()
171
+
172
+ # --- User Config Management Functions ---
173
+ def load_user_config():
174
+ """Loads user configuration from JSON file."""
175
+ default_config = {
176
+ "server_address": "127.0.0.1:8188",
177
+ "model": "",
178
+ "sampler": "euler",
179
+ "scheduler": "normal",
180
+ "steps": 30,
181
+ "cfg": 6.0,
182
+ "width": 768,
183
+ "height": 1280,
184
+ "batch_size": 1,
185
+ "batch_count": 1,
186
+ "seed": 757831338432565,
187
+ "after_generate": "randomize",
188
+ "positive_prefix": "",
189
+ "negative_prefix": "",
190
+ "positive_prompt": "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,",
191
+ "negative_prompt": "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,",
192
+ "preset_name": "None",
193
+ "current_workflow": "workflow_template",
194
+ "language": "en",
195
+ "config_save_interval": 20,
196
+ # OpenAI API defaults (can override generator settings)
197
+ "api_server_address": "",
198
+ "api_model": "",
199
+ "api_sampler": "",
200
+ "api_scheduler": "",
201
+ "api_steps": 30,
202
+ "api_cfg": 6.0,
203
+ "api_width": 768,
204
+ "api_height": 1280,
205
+ "api_seed": 757831338432565,
206
+ "api_after_generate": "randomize",
207
+ "api_positive_prefix": "",
208
+ "api_negative_prefix": "",
209
+ "api_workflow": "workflow_template",
210
+ "api_return": "url",
211
+ "api_n": 1
212
+ }
213
+
214
+ if not os.path.exists(USER_CONFIG_FILE):
215
+ save_user_config(default_config)
216
+ return default_config
217
+
218
+ try:
219
+ with open(USER_CONFIG_FILE, 'r', encoding='utf-8') as f:
220
+ config = json.load(f)
221
+ # 合并默认配置,确保新字段存在
222
+ for key, value in default_config.items():
223
+ if key not in config:
224
+ config[key] = value
225
+ return config
226
+ except (json.JSONDecodeError, IOError):
227
+ save_user_config(default_config)
228
+ return default_config
229
+
230
+ def save_user_config(config_dict):
231
+ """Saves user configuration to JSON file."""
232
+ with open(USER_CONFIG_FILE, 'w', encoding='utf-8') as f:
233
+ json.dump(config_dict, f, indent=4)
234
+
235
+ def update_user_config(**kwargs):
236
+ """Updates specific configuration values."""
237
+ config = load_user_config()
238
+ for key, value in kwargs.items():
239
+ config[key] = value
240
+ save_user_config(config)
241
+
242
+ # Load user config globally
243
+ USER_CONFIG = load_user_config()
244
+
245
+ # --- Workflow Management Functions ---
246
+ def load_workflows():
247
+ """Loads all workflow files from the workflows directory."""
248
+ workflows = {}
249
+ if not os.path.exists(WORKFLOWS_DIR):
250
+ return workflows
251
+
252
+ for filename in os.listdir(WORKFLOWS_DIR):
253
+ if filename.endswith('.json'):
254
+ workflow_name = filename[:-5] # Remove .json extension
255
+ workflow_path = os.path.join(WORKFLOWS_DIR, filename)
256
+ try:
257
+ with open(workflow_path, 'r', encoding='utf-8') as f:
258
+ workflows[workflow_name] = json.load(f)
259
+ except (json.JSONDecodeError, IOError) as e:
260
+ print(f"Error loading workflow {workflow_name}: {e}")
261
+
262
+ return workflows
263
+
264
+ def save_workflow(workflow_name, workflow_content):
265
+ """Saves a workflow to the workflows directory."""
266
+ if not workflow_name or not workflow_name.strip():
267
+ return False, "Workflow name cannot be empty."
268
+
269
+ workflow_name = workflow_name.strip()
270
+ workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
271
+
272
+ try:
273
+ # Validate JSON content
274
+ json.loads(workflow_content)
275
+ with open(workflow_path, 'w', encoding='utf-8') as f:
276
+ f.write(workflow_content)
277
+ return True, f"Workflow '{workflow_name}' saved successfully."
278
+ except json.JSONDecodeError as e:
279
+ return False, f"Invalid JSON format: {e}"
280
+ except IOError as e:
281
+ return False, f"Error saving workflow: {e}"
282
+
283
+ def delete_workflow(workflow_name):
284
+ """Deletes a workflow file."""
285
+ if not workflow_name or workflow_name.strip() in ["", "workflow_template"]:
286
+ return False, "Cannot delete this workflow."
287
+
288
+ workflow_name = workflow_name.strip()
289
+ workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
290
+
291
+ if os.path.exists(workflow_path):
292
+ try:
293
+ os.remove(workflow_path)
294
+ return True, f"Workflow '{workflow_name}' deleted successfully."
295
+ except IOError as e:
296
+ return False, f"Error deleting workflow: {e}"
297
+ else:
298
+ return False, f"Workflow '{workflow_name}' not found."
299
+
300
+ def load_workflow_content(workflow_name):
301
+ """Loads the content of a specific workflow."""
302
+ if not workflow_name or workflow_name == "workflow_template":
303
+ # Load default template
304
+ workflow_path = os.path.join(WORKFLOWS_DIR, "workflow_template.json")
305
+ else:
306
+ workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
307
+
308
+ if os.path.exists(workflow_path):
309
+ try:
310
+ with open(workflow_path, 'r', encoding='utf-8') as f:
311
+ return json.load(f)
312
+ except (json.JSONDecodeError, IOError) as e:
313
+ print(f"Error loading workflow {workflow_name}: {e}")
314
+ return None
315
+ return None
316
+
317
+ # Load workflows globally
318
+ GLOBAL_WORKFLOWS = load_workflows()
319
+
320
+ # --- OpenAI-compatible API Server (FastAPI) ---
321
+ OPENAI_SERVER_THREAD = None
322
+ OPENAI_UVICORN_SERVER = None
323
+ OPENAI_API_APP = None
324
+
325
+ def _ensure_fastapi_available():
326
+ if FastAPI is None or uvicorn is None:
327
+ raise RuntimeError("FastAPI/uvicorn not installed. Please install with: pip install fastapi uvicorn")
328
+
329
+ def _encode_image_b64(path: str) -> str:
330
+ with open(path, 'rb') as f:
331
+ return base64.b64encode(f.read()).decode('utf-8')
332
+
333
+ def generate_image_sync(server_address, positive_prefix, negative_prefix, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, current_workflow):
334
+ """Synchronous image generation that returns list of saved file paths."""
335
+ # Normalize server address
336
+ if not server_address.startswith("http://") and not server_address.startswith("https://"):
337
+ server_address = "http://" + server_address
338
+ server_address = server_address.rstrip('/')
339
+
340
+ ws_address = "ws://" + server_address[len("http://"):]
341
+ if server_address.startswith("https://"):
342
+ ws_address = "wss://" + server_address[len("https://"):]
343
+
344
+ client_id = str(uuid.uuid4())
345
+ all_generated_images = []
346
+ initial_seed = seed
347
+
348
+ for i in range(batch_count):
349
+ if after_generate == "randomize":
350
+ current_seed = random.randint(0, 2**32 - 1)
351
+ elif after_generate == "increment":
352
+ current_seed = initial_seed + i
353
+ elif after_generate == "decrement":
354
+ current_seed = initial_seed - i
355
+ else: # "fixed"
356
+ current_seed = initial_seed
357
+
358
+ ws = websocket.WebSocket()
359
+ try:
360
+ ws.connect(f"{ws_address}/ws?clientId={client_id}")
361
+
362
+ workflow_content = load_workflow_content(current_workflow)
363
+ if workflow_content is None:
364
+ break
365
+ workflow_content = json.dumps(workflow_content)
366
+
367
+ final_positive_prompt = combine_prompts(positive_prefix, positive_prompt)
368
+ final_negative_prompt = combine_prompts(negative_prefix, negative_prompt)
369
+
370
+ workflow_content = workflow_content.replace('%prompt%', final_positive_prompt)
371
+ workflow_content = workflow_content.replace('%negative_prompt%', final_negative_prompt)
372
+ workflow_content = workflow_content.replace('%model%', model)
373
+ workflow_content = workflow_content.replace('%width%', str(width))
374
+ workflow_content = workflow_content.replace('%height%', str(height))
375
+ workflow_content = workflow_content.replace('%batch_size%', str(batch_size))
376
+ workflow_content = workflow_content.replace('%seed%', str(current_seed))
377
+ workflow_content = workflow_content.replace('%steps%', str(steps))
378
+ workflow_content = workflow_content.replace('%cfg%', str(cfg))
379
+ workflow_content = workflow_content.replace('%sampler%', sampler)
380
+ workflow_content = workflow_content.replace('%scheduler%', scheduler)
381
+
382
+ prompt_workflow = json.loads(workflow_content)
383
+ prompt_data = queue_prompt(prompt_workflow, client_id, server_address)
384
+ prompt_id = prompt_data['prompt_id']
385
+
386
+ while True:
387
+ out = ws.recv()
388
+ if not isinstance(out, str):
389
+ continue
390
+ message = json.loads(out)
391
+ if message['type'] == 'executing':
392
+ data = message['data']
393
+ if data['node'] is None and data['prompt_id'] == prompt_id:
394
+ break
395
+
396
+ history = get_history(prompt_id, server_address)[prompt_id]
397
+ images_output = []
398
+ for node_id in history['outputs']:
399
+ if 'images' in history['outputs'][node_id]:
400
+ for image in history['outputs'][node_id]['images']:
401
+ image_data = get_image(image['filename'], image['subfolder'], image['type'], server_address)
402
+ images_output.append(image_data)
403
+
404
+ if not images_output:
405
+ continue
406
+
407
+ pil_images = [Image.open(io.BytesIO(data)) for data in images_output]
408
+ for img_idx, img in enumerate(pil_images):
409
+ filename = f"{int(time.time())}_{current_seed}_{img_idx}.png"
410
+ filepath = os.path.join(OUTPUT_DIR, filename)
411
+ img.save(filepath)
412
+ all_generated_images.append(filepath)
413
+
414
+ finally:
415
+ if ws.connected:
416
+ ws.close()
417
+
418
+ return all_generated_images
419
+
420
+ def _create_openai_app(get_config):
421
+ """Create FastAPI app with OpenAI-compatible routes."""
422
+ app = FastAPI()
423
+
424
+ @app.get("/v1/files/{filename}")
425
+ def get_file(filename: str):
426
+ path = os.path.join(OUTPUT_DIR, filename)
427
+ if os.path.exists(path):
428
+ return FileResponse(path)
429
+ return JSONResponse(status_code=404, content={"error": {"message": "File not found"}})
430
+
431
+ @app.post("/v1/chat/completions")
432
+ async def chat_completions(req: Request):
433
+ body = await req.json()
434
+ model = body.get("model", "gpt-image-proxy")
435
+ messages = body.get("messages", [])
436
+ n = int(body.get("n", get_config()["api_n"]))
437
+ return_type = body.get("response_format", {}).get("type", get_config()["api_return"]) # "b64_json" or "url"
438
+
439
+ # latest user message
440
+ user_text = ""
441
+ for m in reversed(messages):
442
+ if m.get("role") == "user":
443
+ content = m.get("content", "")
444
+ if isinstance(content, list):
445
+ user_text = " ".join([item.get("text", "") for item in content if item.get("type") == "text"]).strip()
446
+ else:
447
+ user_text = str(content)
448
+ break
449
+
450
+ cfg = get_config()
451
+ # Use a random seed for each request and randomize across images in n
452
+ req_seed = random.randint(0, 2**32 - 1)
453
+ filepaths = generate_image_sync(
454
+ cfg["server_address"], cfg["positive_prefix"], cfg["negative_prefix"], user_text, cfg["negative_prompt"],
455
+ cfg["model"], cfg["sampler"], cfg["scheduler"], cfg["steps"], cfg["cfg"], cfg["width"], cfg["height"],
456
+ int(req_seed), "randomize", 1, n, cfg["current_workflow"]
457
+ )
458
+
459
+ choices = []
460
+ for idx, fp in enumerate(filepaths):
461
+ if return_type == "url":
462
+ content = f"/v1/files/{os.path.basename(fp)}"
463
+ else:
464
+ content = _encode_image_b64(fp)
465
+ choices.append({
466
+ "index": idx,
467
+ "finish_reason": "stop",
468
+ "message": {"role": "assistant", "content": content}
469
+ })
470
+
471
+ resp = {
472
+ "id": f"chatcmpl-{uuid.uuid4().hex[:12]}",
473
+ "object": "chat.completion",
474
+ "created": int(time.time()),
475
+ "model": model,
476
+ "choices": choices,
477
+ "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
478
+ "x_comfy": {"seed": int(req_seed), "after_generate": "randomize"}
479
+ }
480
+ return JSONResponse(content=resp)
481
+
482
+ return app
483
+
484
+ def start_openai_server(host: str, port: int, get_config):
485
+ global OPENAI_SERVER_THREAD, OPENAI_UVICORN_SERVER, OPENAI_API_APP
486
+ _ensure_fastapi_available()
487
+ if OPENAI_SERVER_THREAD and OPENAI_SERVER_THREAD.is_alive():
488
+ return True, f"Already running on {host}:{port}"
489
+ OPENAI_API_APP = _create_openai_app(get_config)
490
+ config = uvicorn.Config(OPENAI_API_APP, host=host, port=port, log_level="info")
491
+ server = uvicorn.Server(config)
492
+ OPENAI_UVICORN_SERVER = server
493
+ def _run():
494
+ server.run()
495
+ t = threading.Thread(target=_run, daemon=True)
496
+ OPENAI_SERVER_THREAD = t
497
+ t.start()
498
+ return True, f"OpenAI API running on http://{host}:{port}"
499
+
500
+ def stop_openai_server():
501
+ global OPENAI_UVICORN_SERVER, OPENAI_SERVER_THREAD
502
+ if OPENAI_UVICORN_SERVER:
503
+ OPENAI_UVICORN_SERVER.should_exit = True
504
+ return True, "OpenAI API stopping..."
505
+
506
+ # --- Debounced Save Functions (queued, saved every 20s) ---
507
+ def save_server_address(server_address):
508
+ queue_config_update(server_address=server_address)
509
+ return server_address
510
+
511
+ def save_model(model):
512
+ queue_config_update(model=model)
513
+ return model
514
+
515
+ def save_sampler(sampler):
516
+ queue_config_update(sampler=sampler)
517
+ return sampler
518
+
519
+ def save_scheduler(scheduler):
520
+ queue_config_update(scheduler=scheduler)
521
+ return scheduler
522
+
523
+ def save_steps(steps):
524
+ queue_config_update(steps=steps)
525
+ return steps
526
+
527
+ def save_cfg(cfg):
528
+ queue_config_update(cfg=cfg)
529
+ return cfg
530
+
531
+ def save_width(width):
532
+ queue_config_update(width=width)
533
+ return width
534
+
535
+ def save_height(height):
536
+ queue_config_update(height=height)
537
+ return height
538
+
539
+ def save_batch_size(batch_size):
540
+ queue_config_update(batch_size=batch_size)
541
+ return batch_size
542
+
543
+ def save_batch_count(batch_count):
544
+ queue_config_update(batch_count=batch_count)
545
+ return batch_count
546
+
547
+ def save_seed(seed):
548
+ queue_config_update(seed=seed)
549
+ return seed
550
+
551
+ def save_after_generate(after_generate):
552
+ queue_config_update(after_generate=after_generate)
553
+ return after_generate
554
+
555
+ def save_positive_prefix(positive_prefix):
556
+ queue_config_update(positive_prefix=positive_prefix)
557
+ return positive_prefix
558
+
559
+ def save_negative_prefix(negative_prefix):
560
+ queue_config_update(negative_prefix=negative_prefix)
561
+ return negative_prefix
562
+
563
+ def save_positive_prompt(positive_prompt):
564
+ queue_config_update(positive_prompt=positive_prompt)
565
+ return positive_prompt
566
+
567
+ def save_negative_prompt(negative_prompt):
568
+ queue_config_update(negative_prompt=negative_prompt)
569
+ return negative_prompt
570
+
571
+ def save_preset_name(preset_name):
572
+ queue_config_update(preset_name=preset_name)
573
+ return preset_name
574
+
575
+ def save_current_workflow(current_workflow):
576
+ queue_config_update(current_workflow=current_workflow)
577
+ return current_workflow
578
+
579
+ def load_ui_config():
580
+ """Loads user configuration and returns it for UI initialization."""
581
+ config = load_user_config()
582
+
583
+ # Get server address and fetch available options
584
+ server_address = config.get("server_address", "127.0.0.1:8188")
585
+ if not server_address.startswith("http://") and not server_address.startswith("https://"):
586
+ server_address = "http://" + server_address
587
+
588
+ object_info = get_object_info(server_address)
589
+ available_models = get_models(object_info)
590
+ available_samplers = get_samplers(object_info)
591
+ available_schedulers = get_schedulers(object_info)
592
+
593
+ return (
594
+ config.get("server_address", "127.0.0.1:8188"),
595
+ config.get("model", ""),
596
+ config.get("sampler", "euler"),
597
+ config.get("scheduler", "normal"),
598
+ config.get("steps", 30),
599
+ config.get("cfg", 6.0),
600
+ config.get("width", 768),
601
+ config.get("height", 1280),
602
+ config.get("batch_size", 1),
603
+ config.get("batch_count", 1),
604
+ config.get("seed", 757831338432565),
605
+ config.get("after_generate", "randomize"),
606
+ config.get("positive_prefix", ""),
607
+ config.get("negative_prefix", ""),
608
+ config.get("positive_prompt", "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,"),
609
+ config.get("negative_prompt", "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,"),
610
+ config.get("preset_name", "None"),
611
+ config.get("current_workflow", "workflow_template"),
612
+ gr.update(choices=available_models),
613
+ gr.update(choices=available_samplers),
614
+ gr.update(choices=available_schedulers),
615
+ gr.update(choices=list(GLOBAL_WORKFLOWS.keys()))
616
+ )
617
+
618
+ # --- ComfyUI API Functions ---
619
+ def get_image(filename, subfolder, folder_type, server_address):
620
+ """Fetches an image from the ComfyUI server."""
621
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
622
+ url_values = urllib.parse.urlencode(data)
623
+ with urllib.request.urlopen(f"{server_address}/view?{url_values}") as response:
624
+ return response.read()
625
+
626
+ def queue_prompt(prompt, client_id, server_address):
627
+ """Queues a prompt on the ComfyUI server."""
628
+ p = {"prompt": prompt, "client_id": client_id}
629
+ data = json.dumps(p).encode('utf-8')
630
+ req = urllib.request.Request(f"{server_address}/prompt", data=data)
631
+ response = urllib.request.urlopen(req)
632
+ return json.loads(response.read())
633
+
634
+ def get_history(prompt_id, server_address):
635
+ """Gets the history for a given prompt ID."""
636
+ with urllib.request.urlopen(f"{server_address}/history/{prompt_id}") as response:
637
+ return json.loads(response.read())
638
+
639
+ def get_object_info(server_address):
640
+ """Gets object info from the ComfyUI server."""
641
+ try:
642
+ with urllib.request.urlopen(f"{server_address}/object_info") as response:
643
+ return json.loads(response.read())
644
+ except Exception as e:
645
+ print(f"Failed to fetch object info: {e}")
646
+ return None
647
+
648
+ def get_models(object_info):
649
+ """Extracts a comprehensive list of models from object_info."""
650
+ models = []
651
+ if not object_info:
652
+ return ["model.safetensors"]
653
+
654
+ if "CheckpointLoaderSimple" in object_info and "ckpt_name" in object_info["CheckpointLoaderSimple"]["input"]["required"]:
655
+ models.extend(object_info["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0])
656
+ if "UNETLoader" in object_info and "unet_name" in object_info["UNETLoader"]["input"]["required"]:
657
+ models.extend(object_info["UNETLoader"]["input"]["required"]["unet_name"][0])
658
+ if "UnetLoaderGGUF" in object_info and "unet_name" in object_info["UnetLoaderGGUF"]["input"]["required"]:
659
+ models.extend(object_info["UnetLoaderGGUF"]["input"]["required"]["unet_name"][0])
660
+
661
+ if not models:
662
+ return ["model.safetensors"]
663
+
664
+ return list(dict.fromkeys(models))
665
+
666
+ def get_samplers(object_info):
667
+ if object_info and "KSampler" in object_info:
668
+ return object_info["KSampler"]["input"]["required"]["sampler_name"][0]
669
+ return ["euler"]
670
+
671
+ def get_schedulers(object_info):
672
+ if object_info and "KSampler" in object_info:
673
+ return object_info["KSampler"]["input"]["required"]["scheduler"][0]
674
+ return ["normal"]
675
+
676
+ # --- UI Callback Functions ---
677
+ def update_choices(server_address):
678
+ """Callback function to update dropdown choices."""
679
+ if not server_address:
680
+ return (gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]))
681
+
682
+ if not server_address.startswith("http://") and not server_address.startswith("https://"):
683
+ http_server_address = "http://" + server_address
684
+ else:
685
+ http_server_address = server_address
686
+ http_server_address = http_server_address.rstrip('/')
687
+
688
+ object_info = get_object_info(http_server_address)
689
+ available_models = get_models(object_info)
690
+ available_samplers = get_samplers(object_info)
691
+ available_schedulers = get_schedulers(object_info)
692
+
693
+ return (
694
+ gr.update(choices=available_models, value=available_models[0] if available_models else None),
695
+ gr.update(choices=available_samplers, value=available_samplers[0] if available_samplers else None),
696
+ gr.update(choices=available_schedulers, value=available_schedulers[0] if available_schedulers else None)
697
+ )
698
+
699
+ # --- Scheduler Functions ---
700
+ def stop_scheduler():
701
+ """Sets the global flag to stop the scheduler."""
702
+ global SCHEDULER_STOP
703
+ SCHEDULER_STOP = True
704
+ print("Scheduler stop requested.")
705
+ return "Scheduler stopping..."
706
+
707
+ def run_scheduled_generation(interval, server_address, *gen_args):
708
+ """Runs the generation task on a schedule."""
709
+ global SCHEDULER_STOP
710
+ SCHEDULER_STOP = False
711
+ print("Scheduler started.")
712
+
713
+ # Prepend server_address back to gen_args for generate_images call
714
+ full_gen_args = [server_address] + list(gen_args)
715
+
716
+ while not SCHEDULER_STOP:
717
+ yield "Running scheduled generation...", None
718
+
719
+ # Call the main generation function, but for a single image/batch
720
+ gen_with_single_batch = list(full_gen_args)
721
+ # The index for batch_count is the last one
722
+ gen_with_single_batch[-1] = 1
723
+
724
+ gen = generate_images(*gen_with_single_batch)
725
+ final_gallery = None
726
+ for _, gallery in gen:
727
+ if SCHEDULER_STOP: break
728
+ final_gallery = gallery
729
+
730
+ if SCHEDULER_STOP: break
731
+
732
+ yield "Generation complete. Waiting for next run...", final_gallery
733
+
734
+ wait_seconds = int(interval * 60)
735
+ for i in range(wait_seconds):
736
+ if SCHEDULER_STOP: break
737
+ if (wait_seconds - i) % 60 == 0:
738
+ remaining_minutes = (wait_seconds - i) // 60
739
+ yield f"Next run in {remaining_minutes} minute(s)...", final_gallery
740
+ time.sleep(1)
741
+
742
+ if SCHEDULER_STOP: break
743
+
744
+ print("Scheduler stopped.")
745
+ yield "Scheduler stopped.", None
746
+
747
+
748
+ # --- History Management Functions ---
749
+ def get_history_images():
750
+ """Returns a sorted list of images from the output directory."""
751
+ if not os.path.exists(OUTPUT_DIR):
752
+ return []
753
+ images = [os.path.join(OUTPUT_DIR, f) for f in os.listdir(OUTPUT_DIR) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))]
754
+ images.sort(key=os.path.getmtime, reverse=True)
755
+ return images
756
+
757
+ def delete_image(filepaths):
758
+ """Deletes selected images and returns the updated list of images."""
759
+ if isinstance(filepaths, list) and filepaths:
760
+ for item in filepaths:
761
+ # Handle both string paths and tuple (path, metadata) formats
762
+ if isinstance(item, tuple):
763
+ filepath = item[0] # Extract the file path from tuple
764
+ else:
765
+ filepath = item
766
+
767
+ if filepath and os.path.exists(filepath):
768
+ try:
769
+ os.remove(filepath)
770
+ except Exception as e:
771
+ print(f"Error deleting file {filepath}: {e}")
772
+ return get_history_images()
773
+
774
+ # --- Core Generation Logic ---
775
+ def generate_images(server_address, positive_prefix, negative_prefix, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, current_workflow):
776
+ """Main function to generate images based on UI inputs."""
777
+ # Normalize server address
778
+ if not server_address.startswith("http://") and not server_address.startswith("https://"):
779
+ server_address = "http://" + server_address
780
+ server_address = server_address.rstrip('/')
781
+
782
+ ws_address = "ws://" + server_address[len("http://"):]
783
+ if server_address.startswith("https://"):
784
+ ws_address = "wss://" + server_address[len("https://"):]
785
+
786
+ client_id = str(uuid.uuid4())
787
+ all_generated_images = []
788
+ initial_seed = seed
789
+
790
+ for i in range(batch_count):
791
+ yield f"Running batch {i+1}/{batch_count}...", all_generated_images
792
+
793
+ if after_generate == "randomize":
794
+ current_seed = random.randint(0, 2**32 - 1)
795
+ elif after_generate == "increment":
796
+ current_seed = initial_seed + i
797
+ elif after_generate == "decrement":
798
+ current_seed = initial_seed - i
799
+ else: # "fixed"
800
+ current_seed = initial_seed
801
+
802
+ ws = websocket.WebSocket()
803
+ try:
804
+ yield f"Batch {i+1}: Connecting...", all_generated_images
805
+ ws.connect(f"{ws_address}/ws?clientId={client_id}")
806
+
807
+ # Load workflow content
808
+ workflow_content = load_workflow_content(current_workflow)
809
+ if workflow_content is None:
810
+ yield f"Error: Could not load workflow '{current_workflow}'", all_generated_images
811
+ break
812
+ workflow_content = json.dumps(workflow_content)
813
+
814
+ # Combine prefix and main prompts
815
+ final_positive_prompt = combine_prompts(positive_prefix, positive_prompt)
816
+ final_negative_prompt = combine_prompts(negative_prefix, negative_prompt)
817
+
818
+ # Replace placeholders with actual values
819
+ workflow_content = workflow_content.replace('%prompt%', final_positive_prompt)
820
+ workflow_content = workflow_content.replace('%negative_prompt%', final_negative_prompt)
821
+ workflow_content = workflow_content.replace('%model%', model)
822
+ workflow_content = workflow_content.replace('%width%', str(width))
823
+ workflow_content = workflow_content.replace('%height%', str(height))
824
+ workflow_content = workflow_content.replace('%batch_size%', str(batch_size))
825
+ workflow_content = workflow_content.replace('%seed%', str(current_seed))
826
+ workflow_content = workflow_content.replace('%steps%', str(steps))
827
+ workflow_content = workflow_content.replace('%cfg%', str(cfg))
828
+ workflow_content = workflow_content.replace('%sampler%', sampler)
829
+ workflow_content = workflow_content.replace('%scheduler%', scheduler)
830
+
831
+ # Parse the modified workflow
832
+ prompt_workflow = json.loads(workflow_content)
833
+
834
+ prompt_data = queue_prompt(prompt_workflow, client_id, server_address)
835
+ prompt_id = prompt_data['prompt_id']
836
+
837
+ while True:
838
+ out = ws.recv()
839
+ if not isinstance(out, str): continue
840
+ message = json.loads(out)
841
+ if message['type'] == 'executing':
842
+ data = message['data']
843
+ if data['node'] is None and data['prompt_id'] == prompt_id:
844
+ break
845
+ else:
846
+ node_id = data['node']
847
+ node_title = prompt_workflow.get(node_id, {}).get('_meta', {}).get('title', f"Node {node_id}")
848
+ yield f"Batch {i+1}: Executing {node_title}...", all_generated_images
849
+
850
+ history = get_history(prompt_id, server_address)[prompt_id]
851
+ images_output = []
852
+ for node_id in history['outputs']:
853
+ if 'images' in history['outputs'][node_id]:
854
+ for image in history['outputs'][node_id]['images']:
855
+ image_data = get_image(image['filename'], image['subfolder'], image['type'], server_address)
856
+ images_output.append(image_data)
857
+
858
+ if not images_output:
859
+ continue
860
+
861
+ pil_images = [Image.open(io.BytesIO(data)) for data in images_output]
862
+ for img_idx, img in enumerate(pil_images):
863
+ filename = f"{int(time.time())}_{current_seed}_{img_idx}.png"
864
+ filepath = os.path.join(OUTPUT_DIR, filename)
865
+ img.save(filepath)
866
+ all_generated_images.insert(0, filepath) # Insert at beginning to show newest first
867
+
868
+ except Exception as e:
869
+ yield f"Error in batch {i+1}: {e}", all_generated_images
870
+ break # Stop on error
871
+ finally:
872
+ if ws.connected:
873
+ ws.close()
874
+
875
+ yield "Done!", all_generated_images
876
+
877
+ # --- Gradio UI ---
878
+ def create_ui():
879
+ # Load initial configuration
880
+ config = load_user_config()
881
+ # Start auto-save background thread (debounced every 20s)
882
+ start_config_saver()
883
+ # Set initial default values (will be overridden by load_ui_config on page load)
884
+ # Don't fetch from server during initialization to avoid validation errors
885
+ available_models = []
886
+ available_samplers = []
887
+ available_schedulers = []
888
+
889
+ # Set initial default values (will be overridden by load_ui_config on page load)
890
+ default_server_address = "127.0.0.1:8188"
891
+ default_model = ""
892
+ default_sampler = "euler"
893
+ default_scheduler = "normal"
894
+ default_steps = 30
895
+ default_cfg = 6.0
896
+ default_width = 768
897
+ default_height = 1280
898
+ default_batch_size = 1
899
+ default_batch_count = 1
900
+ default_seed = 757831338432565
901
+ default_after_generate = "randomize"
902
+ default_positive_prefix = ""
903
+ default_negative_prefix = ""
904
+ default_positive = "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,"
905
+ default_negative = "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,"
906
+ default_preset_name = "None"
907
+ default_workflow = "workflow_template"
908
+
909
+ css = """
910
+ :root { font-family: sans-serif; }
911
+ #output_gallery img, #history_gallery img { border: 2px solid #e0e0e0; border-radius: 8px; }
912
+ """
913
+
914
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
915
+ gr.Markdown("<h1>ComfyUI Web Interface</h1>")
916
+
917
+ with gr.Tabs():
918
+ with gr.TabItem("Generator"):
919
+ with gr.Row():
920
+ with gr.Column(scale=1):
921
+ gr.Markdown("<h3>⚙️ Settings</h3>")
922
+ with gr.Row():
923
+ server_addr = gr.Textbox(label="Server Address", value=default_server_address, scale=3)
924
+ refresh_btn = gr.Button("🔄 Refresh", scale=1)
925
+ model = gr.Dropdown(label="Model (Checkpoint Name)", choices=[], value="")
926
+
927
+ with gr.Accordion("Workflow", open=True):
928
+ workflow_selector = gr.Dropdown(label="Workflow Template", choices=list(GLOBAL_WORKFLOWS.keys()), value=default_workflow)
929
+
930
+ with gr.Accordion("Sampling Parameters", open=True):
931
+ sampler = gr.Dropdown(label="Sampler", choices=[], value="euler")
932
+ scheduler = gr.Dropdown(label="Scheduler", choices=[], value="normal")
933
+ steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=default_steps)
934
+ cfg = gr.Slider(label="CFG Scale", minimum=0.0, maximum=20.0, step=0.1, value=default_cfg)
935
+
936
+ with gr.Accordion("Image Dimensions", open=True):
937
+ width = gr.Slider(label="Width", minimum=64, maximum=2048, step=64, value=default_width)
938
+ height = gr.Slider(label="Height", minimum=64, maximum=2048, step=64, value=default_height)
939
+ batch_size = gr.Slider(label="Batch Size (Images per generation)", minimum=1, maximum=16, step=1, value=default_batch_size)
940
+ batch_count = gr.Slider(label="Batch Count (Executions)", minimum=1, maximum=20, step=1, value=default_batch_count)
941
+
942
+ # Place seed and after_generate within the left settings column to keep two-column layout
943
+ with gr.Row():
944
+ seed = gr.Number(label="Seed", value=default_seed, precision=0)
945
+ after_generate = gr.Dropdown(
946
+ label="After Generate",
947
+ choices=["randomize", "increment", "decrement", "fixed"],
948
+ value=default_after_generate
949
+ )
950
+
951
+
952
+ with gr.Column(scale=2):
953
+ gr.Markdown("<h3>🎨 Prompts & Generation</h3>")
954
+
955
+ with gr.Accordion("Style Presets", open=True):
956
+ preset_selector = gr.Dropdown(label="Select Style", choices=list(GLOBAL_PRESETS.keys()), value=default_preset_name)
957
+ preset_name_input = gr.Textbox(label="Style Name (for saving)", lines=1)
958
+ positive_prefix_input = gr.Textbox(label="Positive Prefix", lines=3, interactive=True, value=default_positive_prefix)
959
+ negative_prefix_input = gr.Textbox(label="Negative Prefix", lines=3, interactive=True, value=default_negative_prefix)
960
+ with gr.Row():
961
+ save_preset_btn = gr.Button("💾 Save / Update Style")
962
+ delete_preset_btn = gr.Button("🗑️ Delete Style", variant="stop")
963
+ preset_status_label = gr.Label(value="Select a style to apply, or edit the fields and save a new one.")
964
+
965
+ positive_prompt = gr.Textbox(label="Positive Prompt (Your content)", lines=6, value=default_positive)
966
+ negative_prompt = gr.Textbox(label="Negative Prompt (Your content)", lines=3, value=default_negative)
967
+ generate_btn = gr.Button("Generate Image", variant="primary")
968
+ status_label = gr.Label(value="Idle", label="Status")
969
+ output_gallery = gr.Gallery(label="Generated Images", elem_id="output_gallery", columns=4)
970
+
971
+ with gr.TabItem("Scheduler / Keep-Alive"):
972
+ gr.Markdown("## Scheduled Generation")
973
+ gr.Markdown("This feature will periodically run a generation task with the settings from the 'Generator' tab to keep a remote server active. It will always run with a 'Batch Count' of 1.")
974
+ scheduler_interval = gr.Number(label="Interval (minutes)", value=10, minimum=1, step=1)
975
+ with gr.Row():
976
+ start_scheduler_btn = gr.Button("Start Scheduler")
977
+ stop_scheduler_btn = gr.Button("Stop Scheduler")
978
+ scheduler_status = gr.Label("Scheduler is stopped.")
979
+ scheduler_output = gr.Gallery(label="Last Scheduled Image", columns=1, height="auto")
980
+
981
+ with gr.TabItem("History"):
982
+ with gr.Row():
983
+ refresh_history_btn = gr.Button("🔄 Refresh History")
984
+ delete_btn = gr.Button("🗑️ Delete Selected Images")
985
+ history_gallery = gr.Gallery(label="Image History", elem_id="history_gallery", columns=8, allow_preview=True, preview=True)
986
+
987
+ with gr.TabItem("OpenAI API"):
988
+ gr.Markdown("## OpenAI-compatible API")
989
+ with gr.Row():
990
+ api_host = gr.Textbox(label="Host", value="127.0.0.1")
991
+ api_port = gr.Number(label="Port", value=9000, precision=0)
992
+ gr.Markdown("### Request Mapping and Generation Parameters")
993
+ with gr.Row():
994
+ api_return = gr.Dropdown(label="Response Type", choices=["url", "b64_json"], value=USER_CONFIG.get("api_return", "url"))
995
+ api_n = gr.Slider(label="Images per request (n)", minimum=1, maximum=8, step=1, value=USER_CONFIG.get("api_n", 1))
996
+ with gr.Accordion("Override Generation Parameters (optional)", open=False):
997
+ with gr.Row():
998
+ api_server_addr = gr.Textbox(label="Server Address (override)", value=USER_CONFIG.get("api_server_address", ""))
999
+ api_model = gr.Textbox(label="Model (ckpt)", value=USER_CONFIG.get("api_model", ""))
1000
+ with gr.Row():
1001
+ api_sampler = gr.Textbox(label="Sampler", value=USER_CONFIG.get("api_sampler", ""))
1002
+ api_scheduler = gr.Textbox(label="Scheduler", value=USER_CONFIG.get("api_scheduler", ""))
1003
+ with gr.Row():
1004
+ api_steps = gr.Number(label="Steps", value=USER_CONFIG.get("api_steps", 30), precision=0)
1005
+ api_cfg = gr.Number(label="CFG", value=USER_CONFIG.get("api_cfg", 6.0))
1006
+ with gr.Row():
1007
+ api_width = gr.Number(label="Width", value=USER_CONFIG.get("api_width", 768), precision=0)
1008
+ api_height = gr.Number(label="Height", value=USER_CONFIG.get("api_height", 1280), precision=0)
1009
+ with gr.Row():
1010
+ api_seed = gr.Number(label="Seed", value=USER_CONFIG.get("api_seed", 757831338432565), precision=0)
1011
+ api_after = gr.Dropdown(label="After Generate", choices=["randomize", "increment", "decrement", "fixed"], value=USER_CONFIG.get("api_after_generate", "randomize"))
1012
+ with gr.Row():
1013
+ api_pos_prefix = gr.Textbox(label="Positive Prefix", lines=2, value=USER_CONFIG.get("api_positive_prefix", ""))
1014
+ api_neg_prefix = gr.Textbox(label="Negative Prefix", lines=2, value=USER_CONFIG.get("api_negative_prefix", ""))
1015
+ with gr.Row():
1016
+ api_workflow = gr.Dropdown(label="Workflow Template", choices=list(GLOBAL_WORKFLOWS.keys()), value=USER_CONFIG.get("api_workflow", "workflow_template"))
1017
+ api_status = gr.Label("Server is stopped.")
1018
+ with gr.Row():
1019
+ api_save_cfg_btn = gr.Button("Save API Config")
1020
+ api_start_btn = gr.Button("Start API Server")
1021
+ api_stop_btn = gr.Button("Stop API Server")
1022
+
1023
+ with gr.TabItem("Settings"):
1024
+ gr.Markdown("## Workflow Management")
1025
+ with gr.Row():
1026
+ with gr.Column(scale=1):
1027
+ gr.Markdown("### Workflow List")
1028
+ workflow_list = gr.Dropdown(label="Select Workflow", choices=list(GLOBAL_WORKFLOWS.keys()), value=default_workflow)
1029
+ with gr.Row():
1030
+ load_workflow_btn = gr.Button("📂 Load Workflow")
1031
+ delete_workflow_btn = gr.Button("🗑️ Delete Workflow", variant="stop")
1032
+ workflow_status = gr.Label(value="Select a workflow to edit or create a new one.")
1033
+
1034
+ with gr.Column(scale=2):
1035
+ gr.Markdown("### Workflow Editor")
1036
+ workflow_name_input = gr.Textbox(label="Workflow Name", lines=1, value="workflow_template")
1037
+ workflow_content_input = gr.Textbox(label="Workflow JSON Content", lines=20, value="", max_lines=30)
1038
+ with gr.Row():
1039
+ save_workflow_btn = gr.Button("💾 Save Workflow", variant="primary")
1040
+ new_workflow_btn = gr.Button("➕ New Workflow")
1041
+ workflow_editor_status = gr.Label(value="Edit the workflow JSON content above.")
1042
+
1043
+ gr.Markdown("## Preferences")
1044
+ with gr.Row():
1045
+ language_dropdown = gr.Dropdown(label="Language", choices=["en", "zh"], value=config.get("language", "en"))
1046
+ autosave_interval = gr.Number(label="Autosave Interval (seconds)", value=config.get("config_save_interval", 20), minimum=5, step=1)
1047
+ with gr.Row():
1048
+ save_prefs_btn = gr.Button("Save Preferences")
1049
+ prefs_status = gr.Label("")
1050
+
1051
+ # Define Inputs/Outputs for main generation
1052
+ gen_inputs = [server_addr, positive_prefix_input, negative_prefix_input, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, workflow_selector]
1053
+ gen_outputs = [status_label, output_gallery]
1054
+
1055
+ # Wire up events
1056
+ refresh_btn.click(fn=update_choices, inputs=server_addr, outputs=[model, sampler, scheduler])
1057
+
1058
+ # Real-time save events
1059
+ server_addr.change(fn=save_server_address, inputs=server_addr, outputs=server_addr)
1060
+ model.change(fn=save_model, inputs=model, outputs=model)
1061
+ sampler.change(fn=save_sampler, inputs=sampler, outputs=sampler)
1062
+ scheduler.change(fn=save_scheduler, inputs=scheduler, outputs=scheduler)
1063
+ steps.change(fn=save_steps, inputs=steps, outputs=steps)
1064
+ cfg.change(fn=save_cfg, inputs=cfg, outputs=cfg)
1065
+ width.change(fn=save_width, inputs=width, outputs=width)
1066
+ height.change(fn=save_height, inputs=height, outputs=height)
1067
+ batch_size.change(fn=save_batch_size, inputs=batch_size, outputs=batch_size)
1068
+ batch_count.change(fn=save_batch_count, inputs=batch_count, outputs=batch_count)
1069
+ seed.change(fn=save_seed, inputs=seed, outputs=seed)
1070
+ after_generate.change(fn=save_after_generate, inputs=after_generate, outputs=after_generate)
1071
+ # Save text fields on blur instead of every keystroke
1072
+ positive_prefix_input.blur(fn=save_positive_prefix, inputs=positive_prefix_input, outputs=positive_prefix_input)
1073
+ negative_prefix_input.blur(fn=save_negative_prefix, inputs=negative_prefix_input, outputs=negative_prefix_input)
1074
+ positive_prompt.blur(fn=save_positive_prompt, inputs=positive_prompt, outputs=positive_prompt)
1075
+ negative_prompt.blur(fn=save_negative_prompt, inputs=negative_prompt, outputs=negative_prompt)
1076
+ preset_selector.change(fn=save_preset_name, inputs=preset_selector, outputs=preset_selector)
1077
+ workflow_selector.change(fn=save_current_workflow, inputs=workflow_selector, outputs=workflow_selector)
1078
+
1079
+ # Preset events
1080
+ preset_selector.change(fn=select_preset, inputs=preset_selector, outputs=[preset_name_input, positive_prefix_input, negative_prefix_input])
1081
+ save_preset_btn.click(fn=save_or_update_preset, inputs=[preset_name_input, positive_prefix_input, negative_prefix_input], outputs=[preset_selector, preset_status_label])
1082
+ delete_preset_btn.click(fn=delete_preset, inputs=[preset_name_input], outputs=[preset_selector, preset_name_input, positive_prefix_input, negative_prefix_input, preset_status_label])
1083
+
1084
+ gen_event = generate_btn.click(fn=generate_images, inputs=gen_inputs, outputs=gen_outputs)
1085
+ gen_event.then(fn=get_history_images, outputs=history_gallery)
1086
+
1087
+ # Scheduler Tab Events
1088
+ scheduler_inputs = [scheduler_interval, server_addr, positive_prefix_input, negative_prefix_input, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, workflow_selector]
1089
+ scheduler_outputs = [scheduler_status, scheduler_output]
1090
+
1091
+ start_event = start_scheduler_btn.click(fn=run_scheduled_generation, inputs=scheduler_inputs, outputs=scheduler_outputs)
1092
+ stop_scheduler_btn.click(fn=stop_scheduler, inputs=None, outputs=scheduler_status, cancels=[start_event])
1093
+
1094
+ # History Tab Events
1095
+ app.load(fn=get_history_images, outputs=history_gallery)
1096
+ refresh_history_btn.click(fn=get_history_images, outputs=history_gallery)
1097
+ delete_btn.click(fn=delete_image, inputs=history_gallery, outputs=history_gallery)
1098
+
1099
+ # OpenAI API tab events
1100
+ def _get_api_config():
1101
+ cfg = load_user_config()
1102
+ return {
1103
+ "server_address": cfg.get("api_server_address") or cfg.get("server_address", "127.0.0.1:8188"),
1104
+ "model": cfg.get("api_model") or cfg.get("model", ""),
1105
+ "sampler": cfg.get("api_sampler") or cfg.get("sampler", "euler"),
1106
+ "scheduler": cfg.get("api_scheduler") or cfg.get("scheduler", "normal"),
1107
+ "steps": cfg.get("api_steps") or cfg.get("steps", 30),
1108
+ "cfg": cfg.get("api_cfg") or cfg.get("cfg", 6.0),
1109
+ "width": cfg.get("api_width") or cfg.get("width", 768),
1110
+ "height": cfg.get("api_height") or cfg.get("height", 1280),
1111
+ "seed": cfg.get("api_seed") or cfg.get("seed", 757831338432565),
1112
+ "after_generate": cfg.get("api_after_generate") or cfg.get("after_generate", "randomize"),
1113
+ "positive_prefix": cfg.get("api_positive_prefix") or cfg.get("positive_prefix", ""),
1114
+ "negative_prefix": cfg.get("api_negative_prefix") or cfg.get("negative_prefix", ""),
1115
+ "negative_prompt": cfg.get("negative_prompt", ""),
1116
+ "current_workflow": cfg.get("api_workflow") or cfg.get("current_workflow", "workflow_template"),
1117
+ "api_return": cfg.get("api_return", "url"),
1118
+ "api_n": cfg.get("api_n", 1)
1119
+ }
1120
+
1121
+ def start_api(host, port, return_type, n):
1122
+ queue_config_update(api_return=return_type, api_n=int(n))
1123
+ try:
1124
+ ok, msg = start_openai_server(str(host), int(port), _get_api_config)
1125
+ return msg
1126
+ except Exception as e:
1127
+ return f"Failed to start: {e}"
1128
+
1129
+ def stop_api():
1130
+ ok, msg = stop_openai_server()
1131
+ return msg
1132
+
1133
+ def save_api_config(*vals):
1134
+ queue_config_update(
1135
+ api_server_address=vals[0], api_model=vals[1], api_sampler=vals[2], api_scheduler=vals[3],
1136
+ api_steps=int(vals[4]), api_cfg=float(vals[5]), api_width=int(vals[6]), api_height=int(vals[7]),
1137
+ api_seed=int(vals[8]), api_after_generate=vals[9], api_positive_prefix=vals[10], api_negative_prefix=vals[11],
1138
+ api_workflow=vals[12]
1139
+ )
1140
+ return "API config saved (debounced)."
1141
+
1142
+ api_save_cfg_btn.click(fn=save_api_config, inputs=[api_server_addr, api_model, api_sampler, api_scheduler, api_steps, api_cfg, api_width, api_height, api_seed, api_after, api_pos_prefix, api_neg_prefix, api_workflow], outputs=api_status)
1143
+ api_start_btn.click(fn=start_api, inputs=[api_host, api_port, api_return, api_n], outputs=api_status)
1144
+ api_stop_btn.click(fn=stop_api, outputs=api_status)
1145
+
1146
+ # Workflow management events
1147
+ def load_workflow_to_editor(workflow_name):
1148
+ """Loads a workflow into the editor."""
1149
+ if not workflow_name or workflow_name == "workflow_template":
1150
+ workflow_path = os.path.join(WORKFLOWS_DIR, "workflow_template.json")
1151
+ else:
1152
+ workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
1153
+
1154
+ if os.path.exists(workflow_path):
1155
+ try:
1156
+ with open(workflow_path, 'r', encoding='utf-8') as f:
1157
+ content = f.read()
1158
+ return workflow_name, content, f"Loaded workflow '{workflow_name}'"
1159
+ except Exception as e:
1160
+ return workflow_name, "", f"Error loading workflow: {e}"
1161
+ else:
1162
+ return workflow_name, "", f"Workflow '{workflow_name}' not found"
1163
+
1164
+ def save_workflow_from_editor(workflow_name, workflow_content):
1165
+ """Saves workflow from editor."""
1166
+ success, message = save_workflow(workflow_name, workflow_content)
1167
+ if success:
1168
+ # Reload workflows
1169
+ global GLOBAL_WORKFLOWS
1170
+ GLOBAL_WORKFLOWS = load_workflows()
1171
+ return gr.update(choices=list(GLOBAL_WORKFLOWS.keys()), value=workflow_name), message
1172
+ else:
1173
+ return gr.update(), message
1174
+
1175
+ def delete_workflow_from_editor(workflow_name):
1176
+ """Deletes a workflow."""
1177
+ success, message = delete_workflow(workflow_name)
1178
+ if success:
1179
+ # Reload workflows
1180
+ global GLOBAL_WORKFLOWS
1181
+ GLOBAL_WORKFLOWS = load_workflows()
1182
+ return (gr.update(choices=list(GLOBAL_WORKFLOWS.keys()), value="workflow_template"),
1183
+ "workflow_template", "", message)
1184
+ else:
1185
+ return gr.update(), workflow_name, "", message
1186
+
1187
+ def create_new_workflow():
1188
+ """Creates a new empty workflow."""
1189
+ return "new_workflow", "", "New workflow created. Enter a name and JSON content."
1190
+
1191
+ # Wire up workflow management events
1192
+ load_workflow_btn.click(fn=load_workflow_to_editor, inputs=workflow_list, outputs=[workflow_name_input, workflow_content_input, workflow_status])
1193
+ save_workflow_btn.click(fn=save_workflow_from_editor, inputs=[workflow_name_input, workflow_content_input], outputs=[workflow_list, workflow_editor_status])
1194
+ delete_workflow_btn.click(fn=delete_workflow_from_editor, inputs=workflow_name_input, outputs=[workflow_list, workflow_name_input, workflow_content_input, workflow_editor_status])
1195
+ new_workflow_btn.click(fn=create_new_workflow, outputs=[workflow_name_input, workflow_content_input, workflow_editor_status])
1196
+
1197
+ # Preferences events
1198
+ def save_preferences(lang, interval):
1199
+ queue_config_update(language=lang)
1200
+ seconds = set_config_save_interval(interval)
1201
+ return f"Saved. Language: {lang}, autosave: {seconds}s"
1202
+
1203
+ save_prefs_btn.click(fn=save_preferences, inputs=[language_dropdown, autosave_interval], outputs=prefs_status)
1204
+
1205
+ # Load user config on page load
1206
+ app.load(fn=load_ui_config, outputs=[
1207
+ server_addr, model, sampler, scheduler, steps, cfg, width, height,
1208
+ batch_size, batch_count, seed, after_generate, positive_prefix_input,
1209
+ negative_prefix_input, positive_prompt, negative_prompt, preset_selector, workflow_selector,
1210
+ model, sampler, scheduler, workflow_selector
1211
+ ])
1212
+
1213
+ return app
1214
+
1215
+ if __name__ == "__main__":
1216
+ import argparse
1217
+ parser = argparse.ArgumentParser()
1218
+ parser.add_argument("--host", type=str, default=None, help="Host to run the server on. Defaults to 127.0.0.1.")
1219
+ args = parser.parse_args()
1220
+
1221
+ webui = create_ui()
1222
+ try:
1223
+ # Pass server_name to launch()
1224
+ webui.launch(server_name=args.host)
1225
+ finally:
1226
+ # Flush any pending config updates on exit
1227
+ _flush_pending_config()