Rishabh12j commited on
Commit
2121d66
·
verified ·
1 Parent(s): a772c12

Upload 2 files

Browse files
Files changed (2) hide show
  1. app_copy.py +113 -0
  2. workflow_api_text2img.json +135 -0
app_copy.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import random
4
+ import time
5
+ from pathlib import Path
6
+
7
+ import gradio as gr
8
+ import requests
9
+
10
+ from PIL import Image
11
+
12
+
13
+ BASE_FOLDER = Path("C:/Users/risha/OneDrive/Documents/CourseWork/ComfyUI/ComfyUI_windows_portable/ComfyUI")
14
+ MODELS_FOLDER = BASE_FOLDER / "models/checkpoints"
15
+ UPSCALE_MODELS_FOLDER = BASE_FOLDER / "models/upscale_models"
16
+ INPUT_DIR = BASE_FOLDER / "input"
17
+ OUTPUT_DIR = BASE_FOLDER / "output"
18
+ URL = "http://127.0.0.1:8188/prompt"
19
+ TXT2IMG = "workflow_api_text2img.json"
20
+ IMG2IMG = "workflow_api_img2img.json"
21
+ HIRES = "workflow_api_HiRes.json"
22
+
23
+ KSAMPLER_NAMES = [
24
+ "euler",
25
+ "euler_ancestral",
26
+ "dpmpp_sde",
27
+ "dpmpp_2m",
28
+ "dpmpp_2m_sde",
29
+ "dpmpp_3m_sde",
30
+ "ddpm",
31
+ "lcm",
32
+ ]
33
+
34
+ SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform"]
35
+
36
+
37
+ css = """
38
+ .green {
39
+ border: 3px solid green !important;
40
+ }
41
+ .red {
42
+ border: 3px solid red !important;
43
+ }
44
+ """
45
+
46
+
47
+ def checkpoint_list():
48
+ return [model.name for model in MODELS_FOLDER.glob("*")]
49
+
50
+
51
+
52
+ def get_latest_image():
53
+ image_files = list(OUTPUT_DIR.glob("*.png"))
54
+ image_files.sort(key=lambda x: os.path.getmtime(x))
55
+ latest_image = image_files[-1] if image_files else None
56
+ return latest_image
57
+
58
+
59
+ def start_queue(prompt_workflow):
60
+ p = {"prompt": prompt_workflow}
61
+ data = json.dumps(p).encode("utf-8")
62
+ requests.post(URL, data=data)
63
+
64
+
65
+ def txt2img_workflow(checkpoint_name, positive_prompt):
66
+ with open(TXT2IMG, "r") as file_json:
67
+ prompt = json.load(file_json)
68
+
69
+ prompt["1"]["inputs"]["ckpt_name"] = checkpoint_name
70
+ prompt["2"]["inputs"]["noise_seed"] = random.randint(1, 999999999999)
71
+ prompt["3"]["inputs"]["text"] = positive_prompt
72
+
73
+ print( prompt["2"]["inputs"]["noise_seed"])
74
+ previous_image = get_latest_image()
75
+
76
+ start_queue(prompt)
77
+
78
+ while True:
79
+ latest_image = get_latest_image()
80
+ if latest_image != previous_image:
81
+ return latest_image
82
+
83
+ time.sleep(1)
84
+
85
+
86
+
87
+ def main():
88
+ with gr.Blocks(css=css) as demo:
89
+ models = checkpoint_list()
90
+
91
+ with gr.Row():
92
+ with gr.Column():
93
+ base_checkpoint = gr.Dropdown(choices=models, label="Stable Diffusion Checkpoint")
94
+
95
+ with gr.Tab("txt2img"):
96
+ workflow = TXT2IMG
97
+ with gr.Row():
98
+ with gr.Column(scale=3):
99
+ positive = gr.Textbox(lines=4, placeholder="Positive prompt", container=False, elem_classes="green")
100
+
101
+ with gr.Column(scale=1):
102
+ generate_btn_txt2img = gr.Button("Generate")
103
+
104
+ with gr.Row():
105
+ output_img = gr.Image(label="Output", interactive=False)
106
+
107
+ generate_btn_txt2img.click(fn=txt2img_workflow, inputs=[base_checkpoint, positive], outputs=output_img)
108
+
109
+ demo.launch()
110
+
111
+
112
+ if __name__ == "__main__":
113
+ main()
workflow_api_text2img.json ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "1": {
3
+ "inputs": {
4
+ "ckpt_name": "sd_xl_turbo_1.0_fp16.safetensors"
5
+ },
6
+ "class_type": "CheckpointLoaderSimple",
7
+ "_meta": {
8
+ "title": "Load Checkpoint"
9
+ }
10
+ },
11
+ "2": {
12
+ "inputs": {
13
+ "add_noise": true,
14
+ "noise_seed": 240322318706585,
15
+ "cfg": 1.1,
16
+ "model": [
17
+ "1",
18
+ 0
19
+ ],
20
+ "positive": [
21
+ "3",
22
+ 0
23
+ ],
24
+ "negative": [
25
+ "4",
26
+ 0
27
+ ],
28
+ "sampler": [
29
+ "6",
30
+ 0
31
+ ],
32
+ "sigmas": [
33
+ "7",
34
+ 0
35
+ ],
36
+ "latent_image": [
37
+ "8",
38
+ 0
39
+ ]
40
+ },
41
+ "class_type": "SamplerCustom",
42
+ "_meta": {
43
+ "title": "SamplerCustom"
44
+ }
45
+ },
46
+ "3": {
47
+ "inputs": {
48
+ "text": "an colorful 3d blender anime portrait of a girl with black long hair and glasses",
49
+ "clip": [
50
+ "1",
51
+ 1
52
+ ]
53
+ },
54
+ "class_type": "CLIPTextEncode",
55
+ "_meta": {
56
+ "title": "CLIP Text Encode (Prompt)"
57
+ }
58
+ },
59
+ "4": {
60
+ "inputs": {
61
+ "text": "",
62
+ "clip": [
63
+ "1",
64
+ 1
65
+ ]
66
+ },
67
+ "class_type": "CLIPTextEncode",
68
+ "_meta": {
69
+ "title": "CLIP Text Encode (Prompt)"
70
+ }
71
+ },
72
+ "5": {
73
+ "inputs": {
74
+ "samples": [
75
+ "2",
76
+ 0
77
+ ],
78
+ "vae": [
79
+ "1",
80
+ 2
81
+ ]
82
+ },
83
+ "class_type": "VAEDecode",
84
+ "_meta": {
85
+ "title": "VAE Decode"
86
+ }
87
+ },
88
+ "6": {
89
+ "inputs": {
90
+ "sampler_name": "euler_ancestral"
91
+ },
92
+ "class_type": "KSamplerSelect",
93
+ "_meta": {
94
+ "title": "KSamplerSelect"
95
+ }
96
+ },
97
+ "7": {
98
+ "inputs": {
99
+ "steps": 10,
100
+ "denoise": 1,
101
+ "model": [
102
+ "1",
103
+ 0
104
+ ]
105
+ },
106
+ "class_type": "SDTurboScheduler",
107
+ "_meta": {
108
+ "title": "SDTurboScheduler"
109
+ }
110
+ },
111
+ "8": {
112
+ "inputs": {
113
+ "width": 512,
114
+ "height": 512,
115
+ "batch_size": 1
116
+ },
117
+ "class_type": "EmptyLatentImage",
118
+ "_meta": {
119
+ "title": "Empty Latent Image"
120
+ }
121
+ },
122
+ "30": {
123
+ "inputs": {
124
+ "filename_prefix": "ComfyUI",
125
+ "images": [
126
+ "5",
127
+ 0
128
+ ]
129
+ },
130
+ "class_type": "SaveImage",
131
+ "_meta": {
132
+ "title": "Save Image"
133
+ }
134
+ }
135
+ }