claude-mat commited on
Commit
60ca3db
·
verified ·
1 Parent(s): 9babe95

Delete webui.py

Browse files
Files changed (1) hide show
  1. webui.py +0 -1323
webui.py DELETED
@@ -1,1323 +0,0 @@
1
- import gradio as gr
2
- import websocket
3
- import uuid
4
- import json
5
- import urllib.request
6
- import urllib.parse
7
- from PIL import Image
8
- import io
9
- import os
10
- import random
11
- import time
12
- import threading
13
- import base64
14
-
15
- try:
16
- from fastapi import FastAPI, Request
17
- from fastapi.responses import JSONResponse, FileResponse
18
- import uvicorn
19
- except Exception:
20
- FastAPI = None
21
- uvicorn = None
22
-
23
- # --- Constants and Setup ---
24
- BASE_DIR = os.path.dirname(__file__)
25
- # Allow overriding data directory via environment variable WEBUI_DATA_DIR
26
- DATA_DIR = os.path.abspath(os.getenv('WEBUI_DATA_DIR', os.path.join(BASE_DIR, 'data')))
27
- WORKFLOWS_DIR = os.path.join(DATA_DIR, 'workflows')
28
- OUTPUT_DIR = os.path.join(DATA_DIR, 'outputs')
29
- PRESETS_FILE = os.path.join(DATA_DIR, 'presets.json')
30
- USER_CONFIG_FILE = os.path.join(DATA_DIR, 'user_config.json')
31
- os.makedirs(OUTPUT_DIR, exist_ok=True)
32
- os.makedirs(WORKFLOWS_DIR, exist_ok=True)
33
- # --- Scheduler State ---
34
- SCHEDULER_THREAD = None
35
- SCHEDULER_STOP_EVENT = threading.Event()
36
- SCHEDULER_STATUS = {
37
- "running": False,
38
- "interval": 10,
39
- "last_run_time": "N/A",
40
- "last_run_status": "Stopped",
41
- "last_image": None
42
- }
43
- SCHEDULER_LOCK = threading.Lock()
44
-
45
- # --- Auto-save Config Manager (20s interval, debounced) ---
46
- CONFIG_SAVE_INTERVAL = int(os.getenv('WEBUI_CONFIG_INTERVAL', '20')) # seconds
47
- _pending_config = {}
48
- _config_lock = threading.Lock()
49
- _config_saver_thread = None
50
- _config_changed = False
51
-
52
- def start_config_saver():
53
- """Start the background config saver thread."""
54
- global _config_saver_thread
55
- if _config_saver_thread is None or not _config_saver_thread.is_alive():
56
- _config_saver_thread = threading.Thread(target=_config_saver_loop, daemon=True)
57
- _config_saver_thread.start()
58
-
59
- def _flush_pending_config():
60
- """Flush any pending config changes immediately."""
61
- global _config_changed
62
- with _config_lock:
63
- if _config_changed and _pending_config:
64
- try:
65
- config = load_user_config()
66
- config.update(_pending_config)
67
- save_user_config(config)
68
- print(f"[Auto-save] Configuration saved at {time.strftime('%H:%M:%S')}")
69
- except Exception as e:
70
- print(f"[Auto-save] Error saving config: {e}")
71
- finally:
72
- _pending_config.clear()
73
- _config_changed = False
74
-
75
- def _config_saver_loop():
76
- """Background thread that saves config every CONFIG_SAVE_INTERVAL if changed."""
77
- while True:
78
- time.sleep(CONFIG_SAVE_INTERVAL)
79
- _flush_pending_config()
80
-
81
- def set_config_save_interval(seconds: int):
82
- """Update autosave interval at runtime (min 5s)."""
83
- global CONFIG_SAVE_INTERVAL
84
- try:
85
- seconds = int(seconds)
86
- if seconds < 5:
87
- seconds = 5
88
- except Exception:
89
- seconds = 20
90
- CONFIG_SAVE_INTERVAL = seconds
91
- queue_config_update(config_save_interval=seconds)
92
- return seconds
93
-
94
- def queue_config_update(**kwargs):
95
- """Queue config updates to be saved in the next interval."""
96
- global _config_changed
97
- with _config_lock:
98
- _pending_config.update(kwargs)
99
- _config_changed = True
100
-
101
- # --- Preset Management Functions ---
102
- def load_presets():
103
- """Loads presets from the JSON file. If not found, creates it with defaults."""
104
- default_presets = {
105
- "None": {"positive": "", "negative": ""},
106
- "✨ 推荐风格": {
107
- "positive": "best quality, very aesthetic, highres, absurdres, sensitive",
108
- "negative": "lowres, (bad), bad feet, text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, artistic error, username, scan, [abstract], english text, shiny_skin"
109
- },
110
- "🎨 动漫风格": {
111
- "positive": "masterpiece, best quality, anime, 1girl, beautiful detailed eyes, detailed face",
112
- "negative": "photorealistic, 3d, extra limbs, bad anatomy, ugly, deformed"
113
- },
114
- "📸 写实风格": {
115
- "positive": "photorealistic, high quality, detailed, professional photography",
116
- "negative": "anime, cartoon, drawing, painting, sketch"
117
- }
118
- }
119
-
120
- if not os.path.exists(PRESETS_FILE):
121
- with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
122
- json.dump(default_presets, f, indent=4)
123
- return default_presets
124
- else:
125
- try:
126
- with open(PRESETS_FILE, 'r', encoding='utf-8') as f:
127
- presets = json.load(f)
128
- if "None" not in presets:
129
- presets["None"] = {"positive": "", "negative": ""}
130
- return presets
131
- except (json.JSONDecodeError, IOError):
132
- with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
133
- json.dump(default_presets, f, indent=4)
134
- return default_presets
135
-
136
- def save_presets(presets_dict):
137
- """Saves the given dictionary to the presets JSON file."""
138
- with open(PRESETS_FILE, 'w', encoding='utf-8') as f:
139
- json.dump(presets_dict, f, indent=4)
140
-
141
- def combine_prompts(prefix, main_prompt):
142
- """Combines prefix and main prompt intelligently."""
143
- if prefix and main_prompt:
144
- return f"{prefix.strip()}, {main_prompt.strip()}"
145
- elif prefix:
146
- return prefix.strip()
147
- elif main_prompt:
148
- return main_prompt.strip()
149
- return ""
150
-
151
- def select_preset(preset_name):
152
- """Selects a preset and returns its values."""
153
- preset_data = GLOBAL_PRESETS.get(preset_name, {"positive": "", "negative": ""})
154
- return preset_name, preset_data["positive"], preset_data["negative"]
155
-
156
- def save_or_update_preset(preset_name, positive_prefix, negative_prefix):
157
- """Saves or updates a preset."""
158
- if not preset_name or not preset_name.strip():
159
- return gr.update(), "Preset name cannot be empty."
160
-
161
- preset_name = preset_name.strip()
162
- GLOBAL_PRESETS[preset_name] = {"positive": positive_prefix, "negative": negative_prefix}
163
- save_presets(GLOBAL_PRESETS)
164
- return gr.update(choices=list(GLOBAL_PRESETS.keys()), value=preset_name), f"Preset '{preset_name}' saved."
165
-
166
- def delete_preset(preset_name):
167
- """Deletes a preset."""
168
- if not preset_name or preset_name.strip() in ["", "None"]:
169
- return gr.update(), gr.update(), gr.update(), gr.update(), "Cannot delete this preset."
170
-
171
- preset_name = preset_name.strip()
172
- if preset_name in GLOBAL_PRESETS:
173
- del GLOBAL_PRESETS[preset_name]
174
- save_presets(GLOBAL_PRESETS)
175
- return (gr.update(choices=list(GLOBAL_PRESETS.keys()), value="None"),
176
- "None", "", "", f"Preset '{preset_name}' deleted.")
177
- return gr.update(), gr.update(), gr.update(), gr.update(), f"Preset '{preset_name}' not found."
178
-
179
- # Load presets globally
180
- GLOBAL_PRESETS = load_presets()
181
-
182
- # --- User Config Management Functions ---
183
- def load_user_config():
184
- """Loads user configuration from JSON file."""
185
- default_config = {
186
- "server_address": "127.0.0.1:8188",
187
- "model": "",
188
- "sampler": "euler",
189
- "scheduler": "normal",
190
- "steps": 30,
191
- "cfg": 6.0,
192
- "width": 768,
193
- "height": 1280,
194
- "batch_size": 1,
195
- "batch_count": 1,
196
- "seed": 757831338432565,
197
- "after_generate": "randomize",
198
- "positive_prefix": "",
199
- "negative_prefix": "",
200
- "positive_prompt": "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,",
201
- "negative_prompt": "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,",
202
- "preset_name": "None",
203
- "current_workflow": "workflow_template",
204
- "language": "en",
205
- "config_save_interval": 20,
206
- # OpenAI API defaults (can override generator settings)
207
- "api_server_address": "",
208
- "api_model": "",
209
- "api_sampler": "",
210
- "api_scheduler": "",
211
- "api_steps": 30,
212
- "api_cfg": 6.0,
213
- "api_width": 768,
214
- "api_height": 1280,
215
- "api_seed": 757831338432565,
216
- "api_after_generate": "randomize",
217
- "api_positive_prefix": "",
218
- "api_negative_prefix": "",
219
- "api_workflow": "workflow_template",
220
- "api_return": "url",
221
- "api_n": 1
222
- }
223
-
224
- if not os.path.exists(USER_CONFIG_FILE):
225
- save_user_config(default_config)
226
- return default_config
227
-
228
- try:
229
- with open(USER_CONFIG_FILE, 'r', encoding='utf-8') as f:
230
- config = json.load(f)
231
- # 合并默认配置,确保新字段存在
232
- for key, value in default_config.items():
233
- if key not in config:
234
- config[key] = value
235
- return config
236
- except (json.JSONDecodeError, IOError):
237
- save_user_config(default_config)
238
- return default_config
239
-
240
- def save_user_config(config_dict):
241
- """Saves user configuration to JSON file."""
242
- with open(USER_CONFIG_FILE, 'w', encoding='utf-8') as f:
243
- json.dump(config_dict, f, indent=4)
244
-
245
- def update_user_config(**kwargs):
246
- """Updates specific configuration values."""
247
- config = load_user_config()
248
- for key, value in kwargs.items():
249
- config[key] = value
250
- save_user_config(config)
251
-
252
- # Load user config globally
253
- USER_CONFIG = load_user_config()
254
-
255
- # --- Workflow Management Functions ---
256
- def load_workflows():
257
- """Loads all workflow files from the workflows directory."""
258
- workflows = {}
259
- if not os.path.exists(WORKFLOWS_DIR):
260
- return workflows
261
-
262
- for filename in os.listdir(WORKFLOWS_DIR):
263
- if filename.endswith('.json'):
264
- workflow_name = filename[:-5] # Remove .json extension
265
- workflow_path = os.path.join(WORKFLOWS_DIR, filename)
266
- try:
267
- with open(workflow_path, 'r', encoding='utf-8') as f:
268
- workflows[workflow_name] = json.load(f)
269
- except (json.JSONDecodeError, IOError) as e:
270
- print(f"Error loading workflow {workflow_name}: {e}")
271
-
272
- return workflows
273
-
274
- def save_workflow(workflow_name, workflow_content):
275
- """Saves a workflow to the workflows directory."""
276
- if not workflow_name or not workflow_name.strip():
277
- return False, "Workflow name cannot be empty."
278
-
279
- workflow_name = workflow_name.strip()
280
- workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
281
-
282
- try:
283
- # Validate JSON content
284
- json.loads(workflow_content)
285
- with open(workflow_path, 'w', encoding='utf-8') as f:
286
- f.write(workflow_content)
287
- return True, f"Workflow '{workflow_name}' saved successfully."
288
- except json.JSONDecodeError as e:
289
- return False, f"Invalid JSON format: {e}"
290
- except IOError as e:
291
- return False, f"Error saving workflow: {e}"
292
-
293
- def delete_workflow(workflow_name):
294
- """Deletes a workflow file."""
295
- if not workflow_name or workflow_name.strip() in ["", "workflow_template"]:
296
- return False, "Cannot delete this workflow."
297
-
298
- workflow_name = workflow_name.strip()
299
- workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
300
-
301
- if os.path.exists(workflow_path):
302
- try:
303
- os.remove(workflow_path)
304
- return True, f"Workflow '{workflow_name}' deleted successfully."
305
- except IOError as e:
306
- return False, f"Error deleting workflow: {e}"
307
- else:
308
- return False, f"Workflow '{workflow_name}' not found."
309
-
310
- def load_workflow_content(workflow_name):
311
- """Loads the content of a specific workflow."""
312
- if not workflow_name or workflow_name == "workflow_template":
313
- # Load default template
314
- workflow_path = os.path.join(WORKFLOWS_DIR, "workflow_template.json")
315
- else:
316
- workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
317
-
318
- if os.path.exists(workflow_path):
319
- try:
320
- with open(workflow_path, 'r', encoding='utf-8') as f:
321
- return json.load(f)
322
- except (json.JSONDecodeError, IOError) as e:
323
- print(f"Error loading workflow {workflow_name}: {e}")
324
- return None
325
- return None
326
-
327
- # Load workflows globally
328
- GLOBAL_WORKFLOWS = load_workflows()
329
-
330
- # --- OpenAI-compatible API Server (FastAPI) ---
331
-
332
-
333
- def _ensure_fastapi_available():
334
- if FastAPI is None or uvicorn is None:
335
- raise RuntimeError("FastAPI/uvicorn not installed. Please install with: pip install fastapi uvicorn")
336
-
337
- def _encode_image_b64(path: str) -> str:
338
- with open(path, 'rb') as f:
339
- return base64.b64encode(f.read()).decode('utf-8')
340
-
341
- def get_api_config():
342
- """
343
- Loads the user config and creates a consolidated config dictionary for the API,
344
- applying API-specific overrides over the main generator settings.
345
- """
346
- cfg = load_user_config()
347
- return {
348
- "server_address": cfg.get("api_server_address") or cfg.get("server_address", "127.0.0.1:8188"),
349
- "model": cfg.get("api_model") or cfg.get("model", ""),
350
- "sampler": cfg.get("api_sampler") or cfg.get("sampler", "euler"),
351
- "scheduler": cfg.get("api_scheduler") or cfg.get("scheduler", "normal"),
352
- "steps": cfg.get("api_steps", 30),
353
- "cfg": cfg.get("api_cfg", 6.0),
354
- "width": cfg.get("api_width", 768),
355
- "height": cfg.get("api_height", 1280),
356
- "seed": cfg.get("api_seed", 757831338432565),
357
- "after_generate": cfg.get("api_after_generate") or cfg.get("after_generate", "randomize"),
358
- "positive_prefix": cfg.get("api_positive_prefix") or cfg.get("positive_prefix", ""),
359
- "negative_prefix": cfg.get("api_negative_prefix") or cfg.get("negative_prefix", ""),
360
- "negative_prompt": cfg.get("negative_prompt", ""),
361
- "current_workflow": cfg.get("api_workflow") or cfg.get("current_workflow", "workflow_template"),
362
- "api_return": cfg.get("api_return", "url"),
363
- "api_n": cfg.get("api_n", 1)
364
- }
365
-
366
- def generate_image_sync(server_address, positive_prefix, negative_prefix, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, current_workflow):
367
- """Synchronous image generation that returns list of saved file paths."""
368
- # Normalize server address
369
- if not server_address.startswith("http://") and not server_address.startswith("https://"):
370
- server_address = "http://" + server_address
371
- server_address = server_address.rstrip('/')
372
-
373
- ws_address = "ws://" + server_address[len("http://"):]
374
- if server_address.startswith("https://"):
375
- ws_address = "wss://" + server_address[len("https://"):]
376
-
377
- client_id = str(uuid.uuid4())
378
- all_generated_images = []
379
- initial_seed = seed
380
-
381
- for i in range(batch_count):
382
- if after_generate == "randomize":
383
- current_seed = random.randint(0, 2**32 - 1)
384
- elif after_generate == "increment":
385
- current_seed = initial_seed + i
386
- elif after_generate == "decrement":
387
- current_seed = initial_seed - i
388
- else: # "fixed"
389
- current_seed = initial_seed
390
-
391
- ws = websocket.WebSocket()
392
- try:
393
- ws.connect(f"{ws_address}/ws?clientId={client_id}")
394
-
395
- workflow_content = load_workflow_content(current_workflow)
396
- if workflow_content is None:
397
- break
398
- workflow_content = json.dumps(workflow_content)
399
-
400
- final_positive_prompt = combine_prompts(positive_prefix, positive_prompt)
401
- final_negative_prompt = combine_prompts(negative_prefix, negative_prompt)
402
-
403
- workflow_content = workflow_content.replace('%prompt%', final_positive_prompt)
404
- workflow_content = workflow_content.replace('%negative_prompt%', final_negative_prompt)
405
- workflow_content = workflow_content.replace('%model%', model)
406
- workflow_content = workflow_content.replace('%width%', str(width))
407
- workflow_content = workflow_content.replace('%height%', str(height))
408
- workflow_content = workflow_content.replace('%batch_size%', str(batch_size))
409
- workflow_content = workflow_content.replace('%seed%', str(current_seed))
410
- workflow_content = workflow_content.replace('%steps%', str(steps))
411
- workflow_content = workflow_content.replace('%cfg%', str(cfg))
412
- workflow_content = workflow_content.replace('%sampler%', sampler)
413
- workflow_content = workflow_content.replace('%scheduler%', scheduler)
414
-
415
- prompt_workflow = json.loads(workflow_content)
416
- prompt_data = queue_prompt(prompt_workflow, client_id, server_address)
417
- prompt_id = prompt_data['prompt_id']
418
-
419
- while True:
420
- out = ws.recv()
421
- if not isinstance(out, str):
422
- continue
423
- message = json.loads(out)
424
- if message['type'] == 'executing':
425
- data = message['data']
426
- if data['node'] is None and data['prompt_id'] == prompt_id:
427
- break
428
-
429
- history = get_history(prompt_id, server_address)[prompt_id]
430
- images_output = []
431
- for node_id in history['outputs']:
432
- if 'images' in history['outputs'][node_id]:
433
- for image in history['outputs'][node_id]['images']:
434
- image_data = get_image(image['filename'], image['subfolder'], image['type'], server_address)
435
- images_output.append(image_data)
436
-
437
- if not images_output:
438
- continue
439
-
440
- pil_images = [Image.open(io.BytesIO(data)) for data in images_output]
441
- for img_idx, img in enumerate(pil_images):
442
- filename = f"{int(time.time())}_{current_seed}_{img_idx}.png"
443
- filepath = os.path.join(OUTPUT_DIR, filename)
444
- img.save(filepath)
445
- all_generated_images.append(filepath)
446
-
447
- finally:
448
- if ws.connected:
449
- ws.close()
450
-
451
- return all_generated_images
452
-
453
- def _create_openai_app():
454
- """Create FastAPI app with OpenAI-compatible routes."""
455
- app = FastAPI()
456
-
457
- @app.get("/v1/files/{filename}")
458
- def get_file(filename: str):
459
- path = os.path.join(OUTPUT_DIR, filename)
460
- if os.path.exists(path):
461
- return FileResponse(path)
462
- return JSONResponse(status_code=404, content={"error": {"message": "File not found"}})
463
-
464
- @app.post("/v1/chat/completions")
465
- async def chat_completions(req: Request):
466
- body = await req.json()
467
- cfg = get_api_config()
468
- model = body.get("model", "gpt-image-proxy")
469
- messages = body.get("messages", [])
470
- n = int(body.get("n", cfg["api_n"]))
471
- return_type = body.get("response_format", {}).get("type", cfg["api_return"]) # "b64_json" or "url"
472
-
473
- # latest user message
474
- user_text = ""
475
- for m in reversed(messages):
476
- if m.get("role") == "user":
477
- content = m.get("content", "")
478
- if isinstance(content, list):
479
- user_text = " ".join([item.get("text", "") for item in content if item.get("type") == "text"]).strip()
480
- else:
481
- user_text = str(content)
482
- break
483
-
484
- # Use a random seed for each request and randomize across images in n
485
- req_seed = random.randint(0, 2**32 - 1)
486
- filepaths = generate_image_sync(
487
- cfg["server_address"], cfg["positive_prefix"], cfg["negative_prefix"], user_text, cfg["negative_prompt"],
488
- cfg["model"], cfg["sampler"], cfg["scheduler"], cfg["steps"], cfg["cfg"], cfg["width"], cfg["height"],
489
- int(req_seed), "randomize", 1, n, cfg["current_workflow"]
490
- )
491
-
492
- choices = []
493
- for idx, fp in enumerate(filepaths):
494
- if return_type == "url":
495
- content = f"/v1/files/{os.path.basename(fp)}"
496
- else:
497
- content = _encode_image_b64(fp)
498
- choices.append({
499
- "index": idx,
500
- "finish_reason": "stop",
501
- "message": {"role": "assistant", "content": content}
502
- })
503
-
504
- resp = {
505
- "id": f"chatcmpl-{uuid.uuid4().hex[:12]}",
506
- "object": "chat.completion",
507
- "created": int(time.time()),
508
- "model": model,
509
- "choices": choices,
510
- "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
511
- "x_comfy": {"seed": int(req_seed), "after_generate": "randomize"}
512
- }
513
- return JSONResponse(content=resp)
514
-
515
- return app
516
-
517
-
518
-
519
-
520
-
521
- # --- Debounced Save Functions (queued, saved every 20s) ---
522
- def save_server_address(server_address):
523
- queue_config_update(server_address=server_address)
524
- return server_address
525
-
526
- def save_model(model):
527
- queue_config_update(model=model)
528
- return model
529
-
530
- def save_sampler(sampler):
531
- queue_config_update(sampler=sampler)
532
- return sampler
533
-
534
- def save_scheduler(scheduler):
535
- queue_config_update(scheduler=scheduler)
536
- return scheduler
537
-
538
- def save_steps(steps):
539
- queue_config_update(steps=steps)
540
- return steps
541
-
542
- def save_cfg(cfg):
543
- queue_config_update(cfg=cfg)
544
- return cfg
545
-
546
- def save_width(width):
547
- queue_config_update(width=width)
548
- return width
549
-
550
- def save_height(height):
551
- queue_config_update(height=height)
552
- return height
553
-
554
- def save_batch_size(batch_size):
555
- queue_config_update(batch_size=batch_size)
556
- return batch_size
557
-
558
- def save_batch_count(batch_count):
559
- queue_config_update(batch_count=batch_count)
560
- return batch_count
561
-
562
- def save_seed(seed):
563
- queue_config_update(seed=seed)
564
- return seed
565
-
566
- def save_after_generate(after_generate):
567
- queue_config_update(after_generate=after_generate)
568
- return after_generate
569
-
570
- def save_positive_prefix(positive_prefix):
571
- queue_config_update(positive_prefix=positive_prefix)
572
- return positive_prefix
573
-
574
- def save_negative_prefix(negative_prefix):
575
- queue_config_update(negative_prefix=negative_prefix)
576
- return negative_prefix
577
-
578
- def save_positive_prompt(positive_prompt):
579
- queue_config_update(positive_prompt=positive_prompt)
580
- return positive_prompt
581
-
582
- def save_negative_prompt(negative_prompt):
583
- queue_config_update(negative_prompt=negative_prompt)
584
- return negative_prompt
585
-
586
- def save_preset_name(preset_name):
587
- queue_config_update(preset_name=preset_name)
588
- return preset_name
589
-
590
- def save_current_workflow(current_workflow):
591
- queue_config_update(current_workflow=current_workflow)
592
- return current_workflow
593
-
594
- def load_ui_config():
595
- """Loads user configuration and returns it for UI initialization."""
596
- config = load_user_config()
597
-
598
- # Get server address and fetch available options
599
- server_address = config.get("server_address", "127.0.0.1:8188")
600
- if not server_address.startswith("http://") and not server_address.startswith("https://"):
601
- server_address = "http://" + server_address
602
-
603
- object_info = get_object_info(server_address)
604
- available_models = get_models(object_info)
605
- available_samplers = get_samplers(object_info)
606
- available_schedulers = get_schedulers(object_info)
607
-
608
- return (
609
- config.get("server_address", "127.0.0.1:8188"),
610
- gr.update(value=config.get("model", ""), choices=available_models),
611
- gr.update(value=config.get("sampler", "euler"), choices=available_samplers),
612
- gr.update(value=config.get("scheduler", "normal"), choices=available_schedulers),
613
- config.get("steps", 30),
614
- config.get("cfg", 6.0),
615
- config.get("width", 768),
616
- config.get("height", 1280),
617
- config.get("batch_size", 1),
618
- config.get("batch_count", 1),
619
- config.get("seed", 757831338432565),
620
- config.get("after_generate", "randomize"),
621
- config.get("positive_prefix", ""),
622
- config.get("negative_prefix", ""),
623
- config.get("positive_prompt", "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,"),
624
- config.get("negative_prompt", "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,"),
625
- config.get("preset_name", "None"),
626
- gr.update(value=config.get("current_workflow", "workflow_template"), choices=list(GLOBAL_WORKFLOWS.keys())),
627
- # API settings
628
- config.get("api_return", "url"),
629
- config.get("api_n", 1),
630
- config.get("api_server_address", ""),
631
- config.get("api_model", ""),
632
- config.get("api_sampler", ""),
633
- config.get("api_scheduler", ""),
634
- config.get("api_steps", 30),
635
- config.get("api_cfg", 6.0),
636
- config.get("api_width", 768),
637
- config.get("api_height", 1280),
638
- config.get("api_seed", 757831338432565),
639
- config.get("api_after_generate", "randomize"),
640
- config.get("api_positive_prefix", ""),
641
- config.get("api_negative_prefix", ""),
642
- config.get("api_workflow", "workflow_template")
643
- )
644
-
645
- # --- ComfyUI API Functions ---
646
- def get_image(filename, subfolder, folder_type, server_address):
647
- """Fetches an image from the ComfyUI server."""
648
- data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
649
- url_values = urllib.parse.urlencode(data)
650
- with urllib.request.urlopen(f"{server_address}/view?{url_values}") as response:
651
- return response.read()
652
-
653
- def queue_prompt(prompt, client_id, server_address):
654
- """Queues a prompt on the ComfyUI server."""
655
- p = {"prompt": prompt, "client_id": client_id}
656
- data = json.dumps(p).encode('utf-8')
657
- req = urllib.request.Request(f"{server_address}/prompt", data=data)
658
- response = urllib.request.urlopen(req)
659
- return json.loads(response.read())
660
-
661
- def get_history(prompt_id, server_address):
662
- """Gets the history for a given prompt ID."""
663
- with urllib.request.urlopen(f"{server_address}/history/{prompt_id}") as response:
664
- return json.loads(response.read())
665
-
666
- def get_object_info(server_address):
667
- """Gets object info from the ComfyUI server."""
668
- try:
669
- with urllib.request.urlopen(f"{server_address}/object_info") as response:
670
- return json.loads(response.read())
671
- except Exception as e:
672
- print(f"Failed to fetch object info: {e}")
673
- return None
674
-
675
- def get_models(object_info):
676
- """Extracts a comprehensive list of models from object_info."""
677
- models = []
678
- if not object_info:
679
- return ["model.safetensors"]
680
-
681
- if "CheckpointLoaderSimple" in object_info and "ckpt_name" in object_info["CheckpointLoaderSimple"]["input"]["required"]:
682
- models.extend(object_info["CheckpointLoaderSimple"]["input"]["required"]["ckpt_name"][0])
683
- if "UNETLoader" in object_info and "unet_name" in object_info["UNETLoader"]["input"]["required"]:
684
- models.extend(object_info["UNETLoader"]["input"]["required"]["unet_name"][0])
685
- if "UnetLoaderGGUF" in object_info and "unet_name" in object_info["UnetLoaderGGUF"]["input"]["required"]:
686
- models.extend(object_info["UnetLoaderGGUF"]["input"]["required"]["unet_name"][0])
687
-
688
- if not models:
689
- return ["model.safetensors"]
690
-
691
- return list(dict.fromkeys(models))
692
-
693
- def get_samplers(object_info):
694
- if object_info and "KSampler" in object_info:
695
- return object_info["KSampler"]["input"]["required"]["sampler_name"][0]
696
- return ["euler"]
697
-
698
- def get_schedulers(object_info):
699
- if object_info and "KSampler" in object_info:
700
- return object_info["KSampler"]["input"]["required"]["scheduler"][0]
701
- return ["normal"]
702
-
703
- # --- UI Callback Functions ---
704
- def update_choices(server_address):
705
- """Callback function to update dropdown choices."""
706
- if not server_address:
707
- return (gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]))
708
-
709
- if not server_address.startswith("http://") and not server_address.startswith("https://"):
710
- http_server_address = "http://" + server_address
711
- else:
712
- http_server_address = server_address
713
- http_server_address = http_server_address.rstrip('/')
714
-
715
- object_info = get_object_info(http_server_address)
716
- available_models = get_models(object_info)
717
- available_samplers = get_samplers(object_info)
718
- available_schedulers = get_schedulers(object_info)
719
-
720
- return (
721
- gr.update(choices=available_models, value=available_models[0] if available_models else None),
722
- gr.update(choices=available_samplers, value=available_samplers[0] if available_samplers else None),
723
- gr.update(choices=available_schedulers, value=available_schedulers[0] if available_schedulers else None)
724
- )
725
-
726
- # --- Scheduler Functions ---
727
- # --- Scheduler Functions ---
728
- def _scheduler_loop(interval_minutes, gen_args_dict):
729
- """The actual background loop for the scheduler."""
730
- print(f"[Scheduler] Thread started. Interval: {interval_minutes} min.")
731
- # Perform the first run immediately without waiting
732
- first_run = True
733
-
734
- while not SCHEDULER_STOP_EVENT.is_set():
735
- wait_seconds = int(interval_minutes * 60)
736
-
737
- if not first_run:
738
- print(f"[Scheduler] Waiting for {interval_minutes} minute(s)...")
739
- # wait() returns True if the event was set, False if it timed out.
740
- if SCHEDULER_STOP_EVENT.wait(timeout=wait_seconds):
741
- break # Stop was requested during sleep.
742
-
743
- first_run = False
744
- if SCHEDULER_STOP_EVENT.is_set(): break # Check again in case stop was called during generation
745
-
746
- try:
747
- with SCHEDULER_LOCK:
748
- SCHEDULER_STATUS["last_run_status"] = "Running generation..."
749
- print("[Scheduler] Running scheduled generation...")
750
-
751
- # Use generate_image_sync as it's simpler and doesn't yield UI updates
752
- filepaths = generate_image_sync(**gen_args_dict)
753
-
754
- with SCHEDULER_LOCK:
755
- SCHEDULER_STATUS["last_run_time"] = time.strftime('%Y-%m-%d %H:%M:%S')
756
- if filepaths:
757
- SCHEDULER_STATUS["last_run_status"] = "Success"
758
- SCHEDULER_STATUS["last_image"] = filepaths[0]
759
- print(f"[Scheduler] Successfully generated {len(filepaths)} image(s). Last image: {filepaths[0]}")
760
- else:
761
- SCHEDULER_STATUS["last_run_status"] = "Success (no images)"
762
- print("[Scheduler] Generation ran but produced no images.")
763
-
764
- except Exception as e:
765
- error_message = f"Error: {type(e).__name__}"
766
- with SCHEDULER_LOCK:
767
- SCHEDULER_STATUS["last_run_time"] = time.strftime('%Y-%m-%d %H:%M:%S')
768
- SCHEDULER_STATUS["last_run_status"] = error_message
769
- print(f"[Scheduler] An error occurred during generation: {e}")
770
-
771
- print("[Scheduler] Loop finished.")
772
- with SCHEDULER_LOCK:
773
- SCHEDULER_STATUS["running"] = False
774
- SCHEDULER_STATUS["last_run_status"] = "Stopped"
775
-
776
-
777
- def start_scheduler(interval, server_address, *gen_args):
778
- global SCHEDULER_THREAD
779
- with SCHEDULER_LOCK:
780
- if SCHEDULER_STATUS["running"]:
781
- return "Scheduler is already running.", None
782
-
783
- arg_names = ["positive_prefix", "negative_prefix", "positive_prompt", "negative_prompt", "model", "sampler", "scheduler", "steps", "cfg", "width", "height", "seed", "after_generate", "batch_size", "batch_count", "current_workflow"]
784
-
785
- gen_args_dict = dict(zip(arg_names, gen_args))
786
- gen_args_dict["server_address"] = server_address
787
- gen_args_dict["batch_count"] = 1 # Always 1 for scheduler
788
- gen_args_dict["batch_size"] = 1 # Also force batch size to 1 to be safe
789
-
790
- SCHEDULER_STOP_EVENT.clear()
791
- SCHEDULER_THREAD = threading.Thread(target=_scheduler_loop, args=(interval, gen_args_dict), daemon=True)
792
- SCHEDULER_THREAD.start()
793
-
794
- SCHEDULER_STATUS["running"] = True
795
- SCHEDULER_STATUS["interval"] = interval
796
- SCHEDULER_STATUS["last_run_status"] = "Started, running first job..."
797
-
798
- print(f"[Scheduler] Started with interval {interval} minutes.")
799
- # We can't return status here as it's not a generator anymore
800
- return "Scheduler started. Status will update automatically.", None
801
-
802
- def stop_scheduler_global():
803
- global SCHEDULER_THREAD
804
- with SCHEDULER_LOCK:
805
- if not SCHEDULER_STATUS["running"]:
806
- return "Scheduler is not running."
807
-
808
- print("[Scheduler] Stop requested.")
809
- SCHEDULER_STOP_EVENT.set()
810
-
811
- if SCHEDULER_THREAD:
812
- SCHEDULER_THREAD.join(timeout=10)
813
- SCHEDULER_THREAD = None
814
-
815
- # The loop itself will update the status to "Stopped"
816
- print("[Scheduler] Stopped.")
817
- return "Scheduler stopping... Status will update."
818
-
819
- def get_scheduler_status_for_ui():
820
- with SCHEDULER_LOCK:
821
- status = SCHEDULER_STATUS["last_run_status"]
822
- image = SCHEDULER_STATUS["last_image"]
823
- if SCHEDULER_STATUS["running"]:
824
- status_text = f"Running (Interval: {SCHEDULER_STATUS['interval']} min). Last run: {SCHEDULER_STATUS['last_run_time']}. Status: {status}"
825
- else:
826
- status_text = f"Stopped. Last run: {SCHEDULER_STATUS['last_run_time']}. Status: {status}"
827
-
828
- image_list = [image] if image and os.path.exists(image) else None
829
- return status_text, image_list
830
-
831
-
832
- # --- History Management Functions ---
833
- def get_history_images():
834
- """Returns a sorted list of images from the output directory."""
835
- if not os.path.exists(OUTPUT_DIR):
836
- return []
837
- images = [os.path.join(OUTPUT_DIR, f) for f in os.listdir(OUTPUT_DIR) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))]
838
- images.sort(key=os.path.getmtime, reverse=True)
839
- return images
840
-
841
- def delete_image(filepaths):
842
- """Deletes selected images and returns the updated list of images."""
843
- if isinstance(filepaths, list) and filepaths:
844
- for item in filepaths:
845
- # Handle both string paths and tuple (path, metadata) formats
846
- if isinstance(item, tuple):
847
- filepath = item[0] # Extract the file path from tuple
848
- else:
849
- filepath = item
850
-
851
- if filepath and os.path.exists(filepath):
852
- try:
853
- os.remove(filepath)
854
- except Exception as e:
855
- print(f"Error deleting file {filepath}: {e}")
856
- return get_history_images()
857
-
858
- # --- Core Generation Logic ---
859
- def generate_images(server_address, positive_prefix, negative_prefix, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, current_workflow):
860
- """Main function to generate images based on UI inputs."""
861
- # Normalize server address
862
- if not server_address.startswith("http://") and not server_address.startswith("https://"):
863
- server_address = "http://" + server_address
864
- server_address = server_address.rstrip('/')
865
-
866
- ws_address = "ws://" + server_address[len("http://"):]
867
- if server_address.startswith("https://"):
868
- ws_address = "wss://" + server_address[len("https://"):]
869
-
870
- client_id = str(uuid.uuid4())
871
- all_generated_images = []
872
- initial_seed = seed
873
-
874
- for i in range(batch_count):
875
- yield f"Running batch {i+1}/{batch_count}...", all_generated_images
876
-
877
- if after_generate == "randomize":
878
- current_seed = random.randint(0, 2**32 - 1)
879
- elif after_generate == "increment":
880
- current_seed = initial_seed + i
881
- elif after_generate == "decrement":
882
- current_seed = initial_seed - i
883
- else: # "fixed"
884
- current_seed = initial_seed
885
-
886
- ws = websocket.WebSocket()
887
- try:
888
- yield f"Batch {i+1}: Connecting...", all_generated_images
889
- ws.connect(f"{ws_address}/ws?clientId={client_id}")
890
-
891
- # Load workflow content
892
- workflow_content = load_workflow_content(current_workflow)
893
- if workflow_content is None:
894
- yield f"Error: Could not load workflow '{current_workflow}'", all_generated_images
895
- break
896
- workflow_content = json.dumps(workflow_content)
897
-
898
- # Combine prefix and main prompts
899
- final_positive_prompt = combine_prompts(positive_prefix, positive_prompt)
900
- final_negative_prompt = combine_prompts(negative_prefix, negative_prompt)
901
-
902
- # Replace placeholders with actual values
903
- workflow_content = workflow_content.replace('%prompt%', final_positive_prompt)
904
- workflow_content = workflow_content.replace('%negative_prompt%', final_negative_prompt)
905
- workflow_content = workflow_content.replace('%model%', model)
906
- workflow_content = workflow_content.replace('%width%', str(width))
907
- workflow_content = workflow_content.replace('%height%', str(height))
908
- workflow_content = workflow_content.replace('%batch_size%', str(batch_size))
909
- workflow_content = workflow_content.replace('%seed%', str(current_seed))
910
- workflow_content = workflow_content.replace('%steps%', str(steps))
911
- workflow_content = workflow_content.replace('%cfg%', str(cfg))
912
- workflow_content = workflow_content.replace('%sampler%', sampler)
913
- workflow_content = workflow_content.replace('%scheduler%', scheduler)
914
-
915
- # Parse the modified workflow
916
- prompt_workflow = json.loads(workflow_content)
917
-
918
- prompt_data = queue_prompt(prompt_workflow, client_id, server_address)
919
- prompt_id = prompt_data['prompt_id']
920
-
921
- while True:
922
- out = ws.recv()
923
- if not isinstance(out, str): continue
924
- message = json.loads(out)
925
- if message['type'] == 'executing':
926
- data = message['data']
927
- if data['node'] is None and data['prompt_id'] == prompt_id:
928
- break
929
- else:
930
- node_id = data['node']
931
- node_title = prompt_workflow.get(node_id, {}).get('_meta', {}).get('title', f"Node {node_id}")
932
- yield f"Batch {i+1}: Executing {node_title}...", all_generated_images
933
-
934
- history = get_history(prompt_id, server_address)[prompt_id]
935
- images_output = []
936
- for node_id in history['outputs']:
937
- if 'images' in history['outputs'][node_id]:
938
- for image in history['outputs'][node_id]['images']:
939
- image_data = get_image(image['filename'], image['subfolder'], image['type'], server_address)
940
- images_output.append(image_data)
941
-
942
- if not images_output:
943
- continue
944
-
945
- pil_images = [Image.open(io.BytesIO(data)) for data in images_output]
946
- for img_idx, img in enumerate(pil_images):
947
- filename = f"{int(time.time())}_{current_seed}_{img_idx}.png"
948
- filepath = os.path.join(OUTPUT_DIR, filename)
949
- img.save(filepath)
950
- all_generated_images.insert(0, filepath) # Insert at beginning to show newest first
951
-
952
- except Exception as e:
953
- yield f"Error in batch {i+1}: {e}", all_generated_images
954
- break # Stop on error
955
- finally:
956
- if ws.connected:
957
- ws.close()
958
-
959
- yield "Done!", all_generated_images
960
-
961
- # --- Gradio UI ---
962
- def create_ui():
963
- # Load initial configuration
964
- config = load_user_config()
965
- # Start auto-save background thread (debounced every 20s)
966
- start_config_saver()
967
- # Set initial default values (will be overridden by load_ui_config on page load)
968
- # Don't fetch from server during initialization to avoid validation errors
969
- available_models = []
970
- available_samplers = []
971
- available_schedulers = []
972
-
973
- # Set initial default values (will be overridden by load_ui_config on page load)
974
- default_server_address = "127.0.0.1:8188"
975
- default_model = ""
976
- default_sampler = "euler"
977
- default_scheduler = "normal"
978
- default_steps = 30
979
- default_cfg = 6.0
980
- default_width = 768
981
- default_height = 1280
982
- default_batch_size = 1
983
- default_batch_count = 1
984
- default_seed = 757831338432565
985
- default_after_generate = "randomize"
986
- default_positive_prefix = ""
987
- default_negative_prefix = ""
988
- default_positive = "best quality,very aesthetic,highres,absurdres,sensitive,A girl dressed in a maid costume with a personality, kneeling in front of her master,"
989
- default_negative = "lowres,(bad),bad feet,text,error,fewer,extra,missing,worst quality,jpeg artifacts,low quality,watermark,unfinished,displeasing,oldest,early,chromatic aberration,signature,artistic error,username,scan,[abstract],english text,shiny_skin,"
990
- default_preset_name = "None"
991
- default_workflow = "workflow_template"
992
-
993
- css = """
994
- :root { font-family: sans-serif; }
995
- #output_gallery img, #history_gallery img { border: 2px solid #e0e0e0; border-radius: 8px; }
996
- """
997
-
998
- with gr.Blocks(css=css, theme=gr.themes.Soft()) as app:
999
- gr.Markdown("<h1>ComfyUI Web Interface</h1>")
1000
-
1001
- with gr.Tabs():
1002
- with gr.TabItem("Generator"):
1003
- with gr.Row():
1004
- with gr.Column(scale=1):
1005
- gr.Markdown("<h3>⚙️ Settings</h3>")
1006
- with gr.Row():
1007
- server_addr = gr.Textbox(label="Server Address", value=default_server_address, scale=3)
1008
- refresh_btn = gr.Button("🔄 Refresh", scale=1)
1009
- model = gr.Dropdown(label="Model (Checkpoint Name)", choices=[""], value="")
1010
-
1011
- with gr.Accordion("Workflow", open=True):
1012
- workflow_selector = gr.Dropdown(label="Workflow Template", choices=list(GLOBAL_WORKFLOWS.keys()), value=default_workflow)
1013
-
1014
- with gr.Accordion("Sampling Parameters", open=True):
1015
- sampler = gr.Dropdown(label="Sampler", choices=["euler"], value="euler")
1016
- scheduler = gr.Dropdown(label="Scheduler", choices=["normal"], value="normal")
1017
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=default_steps)
1018
- cfg = gr.Slider(label="CFG Scale", minimum=0.0, maximum=20.0, step=0.1, value=default_cfg)
1019
-
1020
- with gr.Accordion("Image Dimensions", open=True):
1021
- width = gr.Slider(label="Width", minimum=64, maximum=2048, step=64, value=default_width)
1022
- height = gr.Slider(label="Height", minimum=64, maximum=2048, step=64, value=default_height)
1023
- batch_size = gr.Slider(label="Batch Size (Images per generation)", minimum=1, maximum=16, step=1, value=default_batch_size)
1024
- batch_count = gr.Slider(label="Batch Count (Executions)", minimum=1, maximum=20, step=1, value=default_batch_count)
1025
-
1026
- # Place seed and after_generate within the left settings column to keep two-column layout
1027
- with gr.Row():
1028
- seed = gr.Number(label="Seed", value=default_seed, precision=0)
1029
- after_generate = gr.Dropdown(
1030
- label="After Generate",
1031
- choices=["randomize", "increment", "decrement", "fixed"],
1032
- value=default_after_generate
1033
- )
1034
-
1035
-
1036
- with gr.Column(scale=2):
1037
- gr.Markdown("<h3>🎨 Prompts & Generation</h3>")
1038
-
1039
- with gr.Accordion("Style Presets", open=True):
1040
- preset_selector = gr.Dropdown(label="Select Style", choices=list(GLOBAL_PRESETS.keys()), value=default_preset_name)
1041
- preset_name_input = gr.Textbox(label="Style Name (for saving)", lines=1)
1042
- positive_prefix_input = gr.Textbox(label="Positive Prefix", lines=3, interactive=True, value=default_positive_prefix)
1043
- negative_prefix_input = gr.Textbox(label="Negative Prefix", lines=3, interactive=True, value=default_negative_prefix)
1044
- with gr.Row():
1045
- save_preset_btn = gr.Button("💾 Save / Update Style")
1046
- delete_preset_btn = gr.Button("🗑️ Delete Style", variant="stop")
1047
- preset_status_label = gr.Label(value="Select a style to apply, or edit the fields and save a new one.")
1048
-
1049
- positive_prompt = gr.Textbox(label="Positive Prompt (Your content)", lines=6, value=default_positive)
1050
- negative_prompt = gr.Textbox(label="Negative Prompt (Your content)", lines=3, value=default_negative)
1051
- generate_btn = gr.Button("Generate Image", variant="primary")
1052
- status_label = gr.Label(value="Idle", label="Status")
1053
- output_gallery = gr.Gallery(label="Generated Images", elem_id="output_gallery", columns=4)
1054
-
1055
- with gr.TabItem("Scheduler / Keep-Alive"):
1056
- gr.Markdown("## Scheduled Generation")
1057
- gr.Markdown("This feature will periodically run a generation task with the settings from the 'Generator' tab to keep a remote server active. It will always run with a 'Batch Count' of 1.")
1058
- scheduler_interval = gr.Number(label="Interval (minutes)", value=10, minimum=1, step=1)
1059
- with gr.Row():
1060
- start_scheduler_btn = gr.Button("Start Scheduler")
1061
- stop_scheduler_btn = gr.Button("Stop Scheduler")
1062
- scheduler_status = gr.Label("Scheduler is stopped.")
1063
- scheduler_output = gr.Gallery(label="Last Scheduled Image", columns=1, height="auto")
1064
-
1065
- with gr.TabItem("History"):
1066
- with gr.Row():
1067
- refresh_history_btn = gr.Button("🔄 Refresh History")
1068
- delete_btn = gr.Button("🗑️ Delete Selected Images")
1069
- history_gallery = gr.Gallery(label="Image History", elem_id="history_gallery", columns=8, allow_preview=True, preview=True)
1070
-
1071
- with gr.TabItem("API Settings"):
1072
- gr.Markdown("## OpenAI-compatible API Settings")
1073
- gr.Markdown("Here you can override the main generator settings for requests made to the OpenAI-compatible API. If a field is left blank, it will use the value from the main 'Generator' tab.")
1074
- with gr.Row():
1075
- api_return = gr.Dropdown(label="Response Type", choices=["url", "b64_json"], value=USER_CONFIG.get("api_return", "url"))
1076
- api_n = gr.Slider(label="Images per request (n)", minimum=1, maximum=8, step=1, value=USER_CONFIG.get("api_n", 1))
1077
- with gr.Accordion("Override Generation Parameters", open=False):
1078
- with gr.Row():
1079
- api_server_addr = gr.Textbox(label="Server Address (override)", value=USER_CONFIG.get("api_server_address", ""))
1080
- api_model = gr.Textbox(label="Model (ckpt)", value=USER_CONFIG.get("api_model", ""))
1081
- with gr.Row():
1082
- api_sampler = gr.Textbox(label="Sampler", value=USER_CONFIG.get("api_sampler", ""))
1083
- api_scheduler = gr.Textbox(label="Scheduler", value=USER_CONFIG.get("api_scheduler", ""))
1084
- with gr.Row():
1085
- api_steps = gr.Number(label="Steps", value=USER_CONFIG.get("api_steps", 30), precision=0)
1086
- api_cfg = gr.Number(label="CFG", value=USER_CONFIG.get("api_cfg", 6.0))
1087
- with gr.Row():
1088
- api_width = gr.Number(label="Width", value=USER_CONFIG.get("api_width", 768), precision=0)
1089
- api_height = gr.Number(label="Height", value=USER_CONFIG.get("api_height", 1280), precision=0)
1090
- with gr.Row():
1091
- api_seed = gr.Number(label="Seed", value=USER_CONFIG.get("api_seed", 757831338432565), precision=0)
1092
- api_after = gr.Dropdown(label="After Generate", choices=["randomize", "increment", "decrement", "fixed"], value=USER_CONFIG.get("api_after_generate", "randomize"))
1093
- with gr.Row():
1094
- api_pos_prefix = gr.Textbox(label="Positive Prefix", lines=2, value=USER_CONFIG.get("api_positive_prefix", ""))
1095
- api_neg_prefix = gr.Textbox(label="Negative Prefix", lines=2, value=USER_CONFIG.get("api_negative_prefix", ""))
1096
- with gr.Row():
1097
- api_workflow = gr.Dropdown(label="Workflow Template", choices=list(GLOBAL_WORKFLOWS.keys()), value=USER_CONFIG.get("api_workflow", "workflow_template"))
1098
- api_status = gr.Label("")
1099
- with gr.Row():
1100
- api_save_cfg_btn = gr.Button("Save API Settings")
1101
-
1102
- with gr.TabItem("Settings"):
1103
- gr.Markdown("## Workflow Management")
1104
- with gr.Row():
1105
- with gr.Column(scale=1):
1106
- gr.Markdown("### Workflow List")
1107
- workflow_list = gr.Dropdown(label="Select Workflow", choices=list(GLOBAL_WORKFLOWS.keys()), value=default_workflow)
1108
- with gr.Row():
1109
- load_workflow_btn = gr.Button("📂 Load Workflow")
1110
- delete_workflow_btn = gr.Button("🗑️ Delete Workflow", variant="stop")
1111
- workflow_status = gr.Label(value="Select a workflow to edit or create a new one.")
1112
-
1113
- with gr.Column(scale=2):
1114
- gr.Markdown("### Workflow Editor")
1115
- workflow_name_input = gr.Textbox(label="Workflow Name", lines=1, value="workflow_template")
1116
- workflow_content_input = gr.Textbox(label="Workflow JSON Content", lines=20, value="", max_lines=30)
1117
- with gr.Row():
1118
- save_workflow_btn = gr.Button("💾 Save Workflow", variant="primary")
1119
- new_workflow_btn = gr.Button("➕ New Workflow")
1120
- workflow_editor_status = gr.Label(value="Edit the workflow JSON content above.")
1121
-
1122
- gr.Markdown("## Preferences")
1123
- with gr.Row():
1124
- language_dropdown = gr.Dropdown(label="Language", choices=["en", "zh"], value=config.get("language", "en"))
1125
- autosave_interval = gr.Number(label="Autosave Interval (seconds)", value=config.get("config_save_interval", 20), minimum=5, step=1)
1126
- with gr.Row():
1127
- save_prefs_btn = gr.Button("Save Preferences")
1128
- prefs_status = gr.Label("")
1129
-
1130
- # Define Inputs/Outputs for main generation
1131
- gen_inputs = [server_addr, positive_prefix_input, negative_prefix_input, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, workflow_selector]
1132
- gen_outputs = [status_label, output_gallery]
1133
-
1134
- # Wire up events
1135
- refresh_btn.click(fn=update_choices, inputs=server_addr, outputs=[model, sampler, scheduler])
1136
-
1137
- # Real-time save events
1138
- server_addr.change(fn=save_server_address, inputs=server_addr, outputs=server_addr)
1139
- model.change(fn=save_model, inputs=model, outputs=model)
1140
- sampler.change(fn=save_sampler, inputs=sampler, outputs=sampler)
1141
- scheduler.change(fn=save_scheduler, inputs=scheduler, outputs=scheduler)
1142
- steps.change(fn=save_steps, inputs=steps, outputs=steps)
1143
- cfg.change(fn=save_cfg, inputs=cfg, outputs=cfg)
1144
- width.change(fn=save_width, inputs=width, outputs=width)
1145
- height.change(fn=save_height, inputs=height, outputs=height)
1146
- batch_size.change(fn=save_batch_size, inputs=batch_size, outputs=batch_size)
1147
- batch_count.change(fn=save_batch_count, inputs=batch_count, outputs=batch_count)
1148
- seed.change(fn=save_seed, inputs=seed, outputs=seed)
1149
- after_generate.change(fn=save_after_generate, inputs=after_generate, outputs=after_generate)
1150
- # Save text fields on blur instead of every keystroke
1151
- positive_prefix_input.blur(fn=save_positive_prefix, inputs=positive_prefix_input, outputs=positive_prefix_input)
1152
- negative_prefix_input.blur(fn=save_negative_prefix, inputs=negative_prefix_input, outputs=negative_prefix_input)
1153
- positive_prompt.blur(fn=save_positive_prompt, inputs=positive_prompt, outputs=positive_prompt)
1154
- negative_prompt.blur(fn=save_negative_prompt, inputs=negative_prompt, outputs=negative_prompt)
1155
- preset_selector.change(fn=save_preset_name, inputs=preset_selector, outputs=preset_selector)
1156
- workflow_selector.change(fn=save_current_workflow, inputs=workflow_selector, outputs=workflow_selector)
1157
-
1158
- # Preset events
1159
- preset_selector.change(fn=select_preset, inputs=preset_selector, outputs=[preset_name_input, positive_prefix_input, negative_prefix_input])
1160
- save_preset_btn.click(fn=save_or_update_preset, inputs=[preset_name_input, positive_prefix_input, negative_prefix_input], outputs=[preset_selector, preset_status_label])
1161
- delete_preset_btn.click(fn=delete_preset, inputs=[preset_name_input], outputs=[preset_selector, preset_name_input, positive_prefix_input, negative_prefix_input, preset_status_label])
1162
-
1163
- gen_event = generate_btn.click(fn=generate_images, inputs=gen_inputs, outputs=gen_outputs)
1164
- gen_event.then(fn=get_history_images, outputs=history_gallery)
1165
-
1166
- # Scheduler Tab Events
1167
- scheduler_inputs = [scheduler_interval, server_addr, positive_prefix_input, negative_prefix_input, positive_prompt, negative_prompt, model, sampler, scheduler, steps, cfg, width, height, seed, after_generate, batch_size, batch_count, workflow_selector]
1168
-
1169
- start_scheduler_btn.click(fn=start_scheduler, inputs=scheduler_inputs, outputs=[scheduler_status, scheduler_output])
1170
- stop_scheduler_btn.click(fn=stop_scheduler_global, inputs=None, outputs=scheduler_status)
1171
-
1172
- # History Tab Events
1173
- refresh_history_btn.click(fn=get_history_images, outputs=history_gallery)
1174
- delete_btn.click(fn=delete_image, inputs=history_gallery, outputs=history_gallery)
1175
-
1176
- # API Settings Tab Events
1177
- def save_api_settings(*api_args):
1178
- keys = [
1179
- "api_return", "api_n", "api_server_address", "api_model", "api_sampler",
1180
- "api_scheduler", "api_steps", "api_cfg", "api_width", "api_height",
1181
- "api_seed", "api_after_generate", "api_positive_prefix", "api_negative_prefix", "api_workflow"
1182
- ]
1183
- # Convert to correct types
1184
- typed_args = list(api_args)
1185
- typed_args[1] = int(typed_args[1]) # api_n
1186
- typed_args[6] = int(typed_args[6]) # api_steps
1187
- typed_args[7] = float(typed_args[7]) # api_cfg
1188
- typed_args[8] = int(typed_args[8]) # api_width
1189
- typed_args[9] = int(typed_args[9]) # api_height
1190
- typed_args[10] = int(typed_args[10]) # api_seed
1191
-
1192
- api_config_dict = dict(zip(keys, typed_args))
1193
- queue_config_update(**api_config_dict)
1194
- return "API settings saved (will be applied on next auto-save)."
1195
-
1196
- api_inputs = [
1197
- api_return, api_n, api_server_addr, api_model, api_sampler, api_scheduler,
1198
- api_steps, api_cfg, api_width, api_height, api_seed, api_after,
1199
- api_pos_prefix, api_neg_prefix, api_workflow
1200
- ]
1201
- api_save_cfg_btn.click(fn=save_api_settings, inputs=api_inputs, outputs=api_status)
1202
-
1203
- # Workflow management events
1204
- def load_workflow_to_editor(workflow_name):
1205
- """Loads a workflow into the editor."""
1206
- if not workflow_name or workflow_name == "workflow_template":
1207
- workflow_path = os.path.join(WORKFLOWS_DIR, "workflow_template.json")
1208
- else:
1209
- workflow_path = os.path.join(WORKFLOWS_DIR, f"{workflow_name}.json")
1210
-
1211
- if os.path.exists(workflow_path):
1212
- try:
1213
- with open(workflow_path, 'r', encoding='utf-8') as f:
1214
- content = f.read()
1215
- return workflow_name, content, f"Loaded workflow '{workflow_name}'"
1216
- except Exception as e:
1217
- return workflow_name, "", f"Error loading workflow: {e}"
1218
- else:
1219
- return workflow_name, "", f"Workflow '{workflow_name}' not found"
1220
-
1221
- def save_workflow_from_editor(workflow_name, workflow_content):
1222
- """Saves workflow from editor."""
1223
- success, message = save_workflow(workflow_name, workflow_content)
1224
- if success:
1225
- # Reload workflows
1226
- global GLOBAL_WORKFLOWS
1227
- GLOBAL_WORKFLOWS = load_workflows()
1228
- return gr.update(choices=list(GLOBAL_WORKFLOWS.keys()), value=workflow_name), message
1229
- else:
1230
- return gr.update(), message
1231
-
1232
- def delete_workflow_from_editor(workflow_name):
1233
- """Deletes a workflow."""
1234
- success, message = delete_workflow(workflow_name)
1235
- if success:
1236
- # Reload workflows
1237
- global GLOBAL_WORKFLOWS
1238
- GLOBAL_WORKFLOWS = load_workflows()
1239
- return (gr.update(choices=list(GLOBAL_WORKFLOWS.keys()), value="workflow_template"),
1240
- "workflow_template", "", message)
1241
- else:
1242
- return gr.update(), workflow_name, "", message
1243
-
1244
- def create_new_workflow():
1245
- """Creates a new empty workflow."""
1246
- return "new_workflow", "", "New workflow created. Enter a name and JSON content."
1247
-
1248
- # Wire up workflow management events
1249
- load_workflow_btn.click(fn=load_workflow_to_editor, inputs=workflow_list, outputs=[workflow_name_input, workflow_content_input, workflow_status])
1250
- save_workflow_btn.click(fn=save_workflow_from_editor, inputs=[workflow_name_input, workflow_content_input], outputs=[workflow_list, workflow_editor_status])
1251
- delete_workflow_btn.click(fn=delete_workflow_from_editor, inputs=workflow_name_input, outputs=[workflow_list, workflow_name_input, workflow_content_input, workflow_editor_status])
1252
- new_workflow_btn.click(fn=create_new_workflow, outputs=[workflow_name_input, workflow_content_input, workflow_editor_status])
1253
-
1254
- # Preferences events
1255
- def save_preferences(lang, interval):
1256
- queue_config_update(language=lang)
1257
- seconds = set_config_save_interval(interval)
1258
- return f"Saved. Language: {lang}, autosave: {seconds}s"
1259
-
1260
- save_prefs_btn.click(fn=save_preferences, inputs=[language_dropdown, autosave_interval], outputs=prefs_status)
1261
-
1262
- # --- App Load and Polling Events ---
1263
-
1264
- # This function will be polled to update dynamic UI elements
1265
- def poll_updates():
1266
- history = get_history_images()
1267
- status_text, image_list = get_scheduler_status_for_ui()
1268
- return history, status_text, image_list
1269
-
1270
- # Load user config on page load (runs once)
1271
- app.load(fn=load_ui_config, outputs=[
1272
- server_addr, model, sampler, scheduler, steps, cfg, width, height,
1273
- batch_size, batch_count, seed, after_generate, positive_prefix_input,
1274
- negative_prefix_input, positive_prompt, negative_prompt, preset_selector, workflow_selector,
1275
- # API settings
1276
- api_return, api_n, api_server_addr, api_model, api_sampler, api_scheduler,
1277
- api_steps, api_cfg, api_width, api_height, api_seed, api_after,
1278
- api_pos_prefix, api_neg_prefix, api_workflow
1279
- ])
1280
-
1281
- # Poll for history and scheduler status updates every 5 seconds
1282
- # Use a backward-compatible method for creating a timer
1283
- if hasattr(gr, 'Timer'):
1284
- # New way for Gradio 4.x and later
1285
- timer = gr.Timer(5)
1286
- timer.tick(fn=poll_updates, outputs=[history_gallery, scheduler_status, scheduler_output])
1287
- else:
1288
- # Old way for Gradio 3.x
1289
- app.load(fn=poll_updates, outputs=[history_gallery, scheduler_status, scheduler_output], every=5)
1290
- return app
1291
- if __name__ == "__main__":
1292
- import argparse
1293
-
1294
- if FastAPI is None or uvicorn is None:
1295
- raise RuntimeError("FastAPI/uvicorn not installed. Please install with: pip install fastapi uvicorn")
1296
-
1297
- # 1. Create the FastAPI app that will host the API.
1298
- api_app = _create_openai_app()
1299
-
1300
- # 2. Create the Gradio UI app. This also starts the config saver.
1301
- webui_app = create_ui()
1302
-
1303
- # 3. Mount the Gradio app at the root of the FastAPI app.
1304
- # The FastAPI app becomes the main entry point.
1305
- final_app = gr.mount_gradio_app(api_app, webui_app, path="/")
1306
-
1307
- parser = argparse.ArgumentParser()
1308
- parser.add_argument("--host", type=str, default="0.0.0.0", help="Host to run the server on.")
1309
- default_port = int(os.getenv('PORT', 7860))
1310
- parser.add_argument("--port", type=int, default=default_port, help="Port to run the server on.")
1311
- args = parser.parse_args()
1312
-
1313
- print("---")
1314
- print(f"Starting server on {args.host}:{args.port}")
1315
- print("The Gradio UI will be at the root path.")
1316
- print(f"OpenAI-compatible API will be available under http://{args.host}:{args.port}/v1")
1317
- print("---")
1318
-
1319
- try:
1320
- uvicorn.run(final_app, host=args.host, port=args.port)
1321
- finally:
1322
- # Flush any pending config updates on exit
1323
- _flush_pending_config()