kikinamatata commited on
Commit
8b958d1
·
1 Parent(s): 3887ba2

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +138 -0
  2. workflow_api.json +238 -0
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import websocket
2
+ import uuid
3
+ import io
4
+ import gradio as gr
5
+ import numpy as np
6
+ from PIL import Image
7
+ import random
8
+ import json
9
+ import requests
10
+ import urllib.parse
11
+
12
+ client_id = str(uuid.uuid4())
13
+
14
+ def queue_prompt(prompt):
15
+ p = {"prompt": prompt, "client_id": client_id}
16
+ data = json.dumps(p).encode('utf-8')
17
+ req = requests.post("http://{}/prompt".format(server_address), data=data)
18
+ return req.json()
19
+
20
+ def get_image(filename, subfolder, folder_type):
21
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
22
+ url_values = urllib.parse.urlencode(data)
23
+ with requests.get("http://{}/view?{}".format(server_address, url_values)) as response:
24
+ return response.content
25
+
26
+ def get_history(prompt_id):
27
+ with requests.get("http://{}/history/{}".format(server_address, prompt_id)) as response:
28
+ return response.json()
29
+
30
+ def get_images(prompt_id):
31
+ history = get_history(prompt_id)[prompt_id]
32
+ output_images = {}
33
+ for o in history['outputs']:
34
+ for node_id in history['outputs']:
35
+ node_output = history['outputs'][node_id]
36
+ if 'images' in node_output:
37
+ images_output = []
38
+ for image in node_output['images']:
39
+ image_data = get_image(image['filename'], image['subfolder'], image['type'])
40
+ images_output.append(image_data)
41
+ output_images[node_id] = images_output
42
+
43
+ return output_images
44
+
45
+
46
+ """
47
+ prompt = json.load(open('workflow_api.json'))
48
+ prompt["3"]["inputs"]["seed"] = random.randint(1, 1125899906842600)
49
+
50
+ ws = websocket.WebSocket()
51
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
52
+ images = get_images(ws, prompt)
53
+ for node_id in images:
54
+ for image_data in images[node_id]:
55
+ im = Image.open(io.BytesIO(image_data))
56
+ im.show()"""
57
+
58
+ def image_mod(server_address,image_path,pr=gr.Progress()):
59
+ def queue_prompt(prompt):
60
+ p = {"prompt": prompt, "client_id": client_id}
61
+ data = json.dumps(p).encode('utf-8')
62
+ req = requests.post("http://{}/prompt".format(server_address), data=data)
63
+ return req.json()
64
+ def get_image(filename, subfolder, folder_type):
65
+ data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
66
+ url_values = urllib.parse.urlencode(data)
67
+ with requests.get("http://{}/view?{}".format(server_address, url_values)) as response:
68
+ return response.content
69
+ def get_history(prompt_id):
70
+ with requests.get("http://{}/history/{}".format(server_address, prompt_id)) as response:
71
+ return response.json()
72
+ def get_images(prompt_id):
73
+ history = get_history(prompt_id)[prompt_id]
74
+ output_images = {}
75
+ for o in history['outputs']:
76
+ for node_id in history['outputs']:
77
+ node_output = history['outputs'][node_id]
78
+ if 'images' in node_output:
79
+ images_output = []
80
+ for image in node_output['images']:
81
+ image_data = get_image(image['filename'], image['subfolder'], image['type'])
82
+ images_output.append(image_data)
83
+ output_images[node_id] = images_output
84
+
85
+ return output_images
86
+ server_address = server_address
87
+ files = {"image":open(image_path, 'rb')}
88
+ data ={
89
+ "overwrite":None,
90
+ "subfolder":"",
91
+ "type":None
92
+ }
93
+ response = requests.post("http://{}/upload/image".format(server_address), files=files, data=data)
94
+ if response.status_code == 200:
95
+ response_json = response.json()
96
+ print("Image uploaded successfully!")
97
+ else:
98
+ print("Image upload failed:", response.text)
99
+ return Image.open(image_path)
100
+
101
+ prompt = json.load(open('workflow_api.json'))
102
+ prompt["3"]["inputs"]["seed"] = random.randint(1, 1125899906842600)
103
+ prompt["12"]["inputs"]["image"] = response.json()["name"]
104
+ ws = websocket.WebSocket()
105
+ ws.connect("ws://{}/ws?clientId={}".format(server_address, client_id))
106
+ prompt_id = queue_prompt(prompt)['prompt_id']
107
+ while True:
108
+ out = ws.recv()
109
+ if isinstance(out, str):
110
+ message = json.loads(out)
111
+ if message['type'] == 'executing':
112
+ data = message['data']
113
+ if data['node'] is None and data['prompt_id'] == prompt_id:
114
+ break
115
+ if message['type'] == 'progress':
116
+ data = message['data']
117
+ pr((data['value'],data['max']))
118
+ else:
119
+ continue
120
+ images = get_images(prompt_id)
121
+ result = []
122
+ """for node,image_data in images.items():
123
+ im = Image.open(io.BytesIO(image_data))
124
+ result.append(im)"""
125
+ for node_id in images:
126
+ for image_data in images[node_id]:
127
+ im = Image.open(io.BytesIO(image_data))
128
+ result.append(im)
129
+ return result
130
+
131
+ iface = gr.Interface(
132
+ fn=image_mod,
133
+ inputs=[gr.Textbox(label='Server Address'),gr.Image(type='filepath')],
134
+ outputs=gr.Gallery(),
135
+ title="Image Processor",
136
+ )
137
+
138
+ iface.queue().launch(share=True)
workflow_api.json ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "3": {
3
+ "inputs": {
4
+ "seed": 2674941754,
5
+ "steps": 20,
6
+ "cfg": 7,
7
+ "sampler_name": "dpmpp_2m",
8
+ "scheduler": "karras",
9
+ "denoise": 1,
10
+ "model": [
11
+ "23",
12
+ 0
13
+ ],
14
+ "positive": [
15
+ "11",
16
+ 0
17
+ ],
18
+ "negative": [
19
+ "11",
20
+ 1
21
+ ],
22
+ "latent_image": [
23
+ "5",
24
+ 0
25
+ ]
26
+ },
27
+ "class_type": "KSampler",
28
+ "_meta": {
29
+ "title": "KSampler"
30
+ }
31
+ },
32
+ "4": {
33
+ "inputs": {
34
+ "ckpt_name": "sd_xl_base_1.0.safetensors"
35
+ },
36
+ "class_type": "CheckpointLoaderSimple",
37
+ "_meta": {
38
+ "title": "Load Checkpoint"
39
+ }
40
+ },
41
+ "5": {
42
+ "inputs": {
43
+ "width": 1024,
44
+ "height": 1024,
45
+ "batch_size": 1
46
+ },
47
+ "class_type": "EmptyLatentImage",
48
+ "_meta": {
49
+ "title": "Empty Latent Image"
50
+ }
51
+ },
52
+ "6": {
53
+ "inputs": {
54
+ "text": "digital painting style",
55
+ "clip": [
56
+ "28",
57
+ 0
58
+ ]
59
+ },
60
+ "class_type": "CLIPTextEncode",
61
+ "_meta": {
62
+ "title": "CLIP Text Encode (Prompt)"
63
+ }
64
+ },
65
+ "7": {
66
+ "inputs": {
67
+ "text": "",
68
+ "clip": [
69
+ "28",
70
+ 0
71
+ ]
72
+ },
73
+ "class_type": "CLIPTextEncode",
74
+ "_meta": {
75
+ "title": "CLIP Text Encode (Prompt)"
76
+ }
77
+ },
78
+ "8": {
79
+ "inputs": {
80
+ "samples": [
81
+ "3",
82
+ 0
83
+ ],
84
+ "vae": [
85
+ "4",
86
+ 2
87
+ ]
88
+ },
89
+ "class_type": "VAEDecode",
90
+ "_meta": {
91
+ "title": "VAE Decode"
92
+ }
93
+ },
94
+ "10": {
95
+ "inputs": {
96
+ "control_net_name": "controlnet-sd-xl-1.0-softedge-dexined.safetensors"
97
+ },
98
+ "class_type": "ControlNetLoader",
99
+ "_meta": {
100
+ "title": "Load ControlNet Model"
101
+ }
102
+ },
103
+ "11": {
104
+ "inputs": {
105
+ "strength": 1,
106
+ "start_percent": 0,
107
+ "end_percent": 1,
108
+ "positive": [
109
+ "6",
110
+ 0
111
+ ],
112
+ "negative": [
113
+ "7",
114
+ 0
115
+ ],
116
+ "control_net": [
117
+ "10",
118
+ 0
119
+ ],
120
+ "image": [
121
+ "13",
122
+ 0
123
+ ]
124
+ },
125
+ "class_type": "ControlNetApplyAdvanced",
126
+ "_meta": {
127
+ "title": "Apply ControlNet (Advanced)"
128
+ }
129
+ },
130
+ "12": {
131
+ "inputs": {
132
+ "image": "244265560_10227611884548979_6104897418530269593_n.jpg",
133
+ "upload": "image"
134
+ },
135
+ "class_type": "LoadImage",
136
+ "_meta": {
137
+ "title": "Load Image"
138
+ }
139
+ },
140
+ "13": {
141
+ "inputs": {
142
+ "safe": "disable",
143
+ "resolution": 1024,
144
+ "image": [
145
+ "12",
146
+ 0
147
+ ]
148
+ },
149
+ "class_type": "PiDiNetPreprocessor",
150
+ "_meta": {
151
+ "title": "PiDiNet Lines"
152
+ }
153
+ },
154
+ "23": {
155
+ "inputs": {
156
+ "lora_name": "digital_painting-000007.safetensors",
157
+ "strength_model": 1,
158
+ "strength_clip": 1,
159
+ "model": [
160
+ "4",
161
+ 0
162
+ ],
163
+ "clip": [
164
+ "4",
165
+ 1
166
+ ]
167
+ },
168
+ "class_type": "LoraLoader",
169
+ "_meta": {
170
+ "title": "Load LoRA"
171
+ }
172
+ },
173
+ "26": {
174
+ "inputs": {
175
+ "images": [
176
+ "13",
177
+ 0
178
+ ]
179
+ },
180
+ "class_type": "PreviewImage",
181
+ "_meta": {
182
+ "title": "Preview Image"
183
+ }
184
+ },
185
+ "27": {
186
+ "inputs": {
187
+ "enabled": true,
188
+ "swap_model": "inswapper_128.onnx",
189
+ "facedetection": "retinaface_resnet50",
190
+ "face_restore_model": "codeformer-v0.1.0.pth",
191
+ "face_restore_visibility": 1,
192
+ "codeformer_weight": 0.5,
193
+ "detect_gender_source": "no",
194
+ "detect_gender_input": "no",
195
+ "source_faces_index": "0",
196
+ "input_faces_index": "0",
197
+ "console_log_level": 1,
198
+ "input_image": [
199
+ "8",
200
+ 0
201
+ ],
202
+ "source_image": [
203
+ "12",
204
+ 0
205
+ ]
206
+ },
207
+ "class_type": "ReActorFaceSwap",
208
+ "_meta": {
209
+ "title": "ReActor - Fast Face Swap"
210
+ }
211
+ },
212
+ "28": {
213
+ "inputs": {
214
+ "stop_at_clip_layer": -2,
215
+ "clip": [
216
+ "23",
217
+ 1
218
+ ]
219
+ },
220
+ "class_type": "CLIPSetLastLayer",
221
+ "_meta": {
222
+ "title": "CLIP Set Last Layer"
223
+ }
224
+ },
225
+ "29": {
226
+ "inputs": {
227
+ "filename_prefix": "digi_paint",
228
+ "images": [
229
+ "27",
230
+ 0
231
+ ]
232
+ },
233
+ "class_type": "SaveImage",
234
+ "_meta": {
235
+ "title": "Save Image"
236
+ }
237
+ }
238
+ }