Werli commited on
Commit
b3eb66d
·
verified ·
1 Parent(s): 2fb3475

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +373 -373
app.py CHANGED
@@ -1,373 +1,373 @@
1
- import gradio as gr
2
- import json
3
- from PIL import Image
4
- import os
5
- from collections import defaultdict
6
-
7
- css = """
8
- #custom-gallery{--row-height:180px;display:grid;grid-auto-rows:min-content;gap:10px}#custom-gallery .thumbnail-item{height:var(--row-height);width:100%;position:relative;overflow:hidden;border-radius:8px;box-shadow:0 2px 5px rgb(0 0 0 / .1);transition:transform 0.2s ease,box-shadow 0.2s ease}#custom-gallery .thumbnail-item:hover{transform:translateY(-3px);box-shadow:0 4px 12px rgb(0 0 0 / .15)}#custom-gallery .thumbnail-item img{width:auto;height:100%;max-width:100%;max-height:var(--row-height);object-fit:contain;margin:0 auto;display:block}#custom-gallery .thumbnail-item img.portrait{max-width:100%}#custom-gallery .thumbnail-item img.landscape{max-height:100%}.gallery-container{max-height:500px;overflow-y:auto;padding-right:0;--size-80:500px}.thumbnails{display:flex;position:absolute;bottom:0;width:120px;overflow-x:scroll;padding-top:320px;padding-bottom:280px;padding-left:4px;flex-wrap:wrap}
9
- """
10
-
11
- EMPTY_RESULT = ("Not Available2",) * 15
12
-
13
- # ---------- EXTRACTION FUNCTIONS ----------
14
- def read_metadata(file_path):
15
- try:
16
- with Image.open(file_path) as img:
17
- return img.info
18
- except Exception as e:
19
- return {"error": f"Error reading file: {str(e)}"}
20
-
21
- def extract_workflow_data(file_path):
22
- metadata = read_metadata(file_path)
23
- if "error" in metadata:
24
- return {"error": metadata["error"]}
25
-
26
- if 'prompt' in metadata:
27
- try:
28
- return json.loads(metadata['prompt'])
29
- except json.JSONDecodeError:
30
- pass
31
-
32
- for key, value in metadata.items():
33
- if isinstance(value, str) and value.strip().startswith('{'):
34
- try:
35
- return json.loads(value)
36
- except json.JSONDecodeError:
37
- continue
38
- return {"error": "No workflow data found"}
39
-
40
- def extract_ksampler_params(workflow_data):
41
- seed = steps = cfg = sampler = scheduler = denoise = "Not found"
42
- if not isinstance(workflow_data, dict):
43
- return seed, steps, cfg, sampler, scheduler, denoise
44
- for node in workflow_data.values():
45
- if isinstance(node, dict) and node.get("class_type", "") in ["KSampler", "KSampler (Efficient)"]:
46
- inputs = node.get("inputs", {})
47
- seed = inputs.get("seed", "Not found")
48
- steps = inputs.get("steps", "Not found")
49
- cfg = inputs.get("cfg", "Not found")
50
- sampler = inputs.get("sampler_name", "Not found")
51
- scheduler = inputs.get("scheduler", "Not found")
52
- denoise = inputs.get("denoise", "Not found")
53
- break
54
- return str(seed), str(steps), str(cfg), str(sampler), str(scheduler), str(denoise)
55
-
56
- def extract_prompts(workflow_data):
57
- positive = negative = "Not found"
58
- if not isinstance(workflow_data, dict):
59
- return positive, negative
60
- for node in workflow_data.values():
61
- if isinstance(node, dict):
62
- class_type = node.get("class_type", "")
63
- inputs = node.get("inputs", {})
64
- title = node.get("_meta", {}).get("title", "") if node.get("_meta") else ""
65
-
66
- if "Text to Conditioning" in class_type:
67
- if "POSITIVE" in title:
68
- positive = inputs.get("text", "Not found")
69
- elif "NEGATIVE" in title:
70
- negative = inputs.get("text", "Not found")
71
- if "ShowText|pysssss" in class_type:
72
- if "text_1" in inputs:
73
- positive = inputs["text_1"]
74
- if "text_2" in inputs:
75
- negative = inputs["text_2"]
76
- if "DPRandomGenerator" in class_type:
77
- if "POSITIVE" in title:
78
- positive = inputs.get("text", "Not found")
79
- elif "NEGATIVE" in title:
80
- negative = inputs.get("text", "Not found")
81
- return str(positive), str(negative)
82
-
83
- def extract_loras(workflow_data):
84
- loras = []
85
- if not isinstance(workflow_data, dict):
86
- return "None found"
87
- for node in workflow_data.values():
88
- if isinstance(node, dict):
89
- inputs = node.get("inputs", {})
90
- if "LoraLoader" in node.get("class_type", ""):
91
- name = inputs.get("lora_name", "Unknown")
92
- strength = inputs.get("strength_model", "Unknown")
93
- loras.append(f"{name} (Strength: {strength})")
94
- for val in inputs.values():
95
- if isinstance(val, str) and "lora:" in val.lower():
96
- loras.append(val)
97
- return "\n".join(loras) if loras else "None found"
98
-
99
- def extract_model_info(workflow_data):
100
- models = []
101
- if not isinstance(workflow_data, dict):
102
- return "Not found"
103
- for node in workflow_data.values():
104
- if isinstance(node, dict):
105
- inputs = node.get("inputs", {})
106
- class_type = node.get("class_type", "")
107
- if "CheckpointLoader" in class_type:
108
- models.append(inputs.get("ckpt_name", "Unknown"))
109
- if "Model Mecha Recipe" in class_type:
110
- models.append(inputs.get("model_path", "Unknown"))
111
- return "\n".join(models) if models else "Not found"
112
-
113
- def extract_image_info(workflow_data):
114
- width = height = batch_size = "Not found"
115
- if not isinstance(workflow_data, dict):
116
- return width, height, batch_size
117
- for node in workflow_data.values():
118
- if isinstance(node, dict) and node.get("class_type", "") == "EmptyLatentImage":
119
- inputs = node.get("inputs", {})
120
- width = inputs.get("width", "Not found")
121
- height = inputs.get("height", "Not found")
122
- batch_size = inputs.get("batch_size", "Not found")
123
- break
124
- return str(width), str(height), str(batch_size)
125
-
126
- def extract_nodes_info(workflow_data):
127
- if not isinstance(workflow_data, dict):
128
- return "Not found"
129
- total_nodes = len(workflow_data)
130
- node_types = defaultdict(int)
131
- for node in workflow_data.values():
132
- if isinstance(node, dict):
133
- node_types[node.get("class_type", "Unknown")] += 1
134
- summary = f"Total Nodes: {total_nodes}\n"
135
- for t, c in sorted(node_types.items()):
136
- summary += f"{t}: {c}\n"
137
- return summary.strip()
138
-
139
- def extract_workflow_as_json(workflow_data):
140
- if isinstance(workflow_data, dict):
141
- return json.dumps(workflow_data, ensure_ascii=False, indent=2)
142
- return "{}"
143
- # ---------- EXTRACTION FUNCTIONS ----------
144
- #
145
- # ---------- IMAGE PROCESSING ----------
146
- def process_single_image(image_path):
147
- """Extract all workflow info from a single image path."""
148
- if not image_path:
149
- return EMPTY_RESULT
150
-
151
- workflow_data = extract_workflow_data(image_path)
152
-
153
- if isinstance(workflow_data, dict) and "error" not in workflow_data:
154
- seed, steps, cfg, sampler, scheduler, denoise = extract_ksampler_params(workflow_data)
155
- positive, negative = extract_prompts(workflow_data)
156
- loras = extract_loras(workflow_data)
157
- models = extract_model_info(workflow_data)
158
- width, height, batch = extract_image_info(workflow_data)
159
- nodes = extract_nodes_info(workflow_data)
160
- full_json = extract_workflow_as_json(workflow_data)
161
- else:
162
- error = str(workflow_data.get("error", "Unknown error"))
163
- seed = steps = cfg = sampler = scheduler = denoise = positive = negative = loras = models = width = height = batch = nodes = full_json = error
164
-
165
- return seed, steps, cfg, sampler, scheduler, denoise, \
166
- positive, negative, loras, models, width, height, batch, nodes, full_json
167
-
168
- def append_gallery(gallery: list, image: str):
169
- """Add a single image to the gallery"""
170
- if gallery is None:
171
- gallery = []
172
- if not image:
173
- return gallery, None
174
- gallery.append(image)
175
- return gallery, None
176
-
177
- def extend_gallery(gallery, images):
178
- """Extend gallery preserving uniqueness"""
179
-
180
- if gallery is None:
181
- gallery = []
182
-
183
- if not images:
184
- return gallery
185
-
186
- # Normalize input - Gradio might pass various formats
187
- incoming_paths = []
188
- if isinstance(images, str): # Single image path
189
- incoming_paths.append(images)
190
- elif isinstance(images, list):
191
- for img in images:
192
- # Handle cases where elements could be tuples from Gallery
193
- if isinstance(img, (tuple, list)):
194
- incoming_paths.append(str(img[0]))
195
- else:
196
- incoming_paths.append(str(img))
197
-
198
- unique_incoming = list(set(incoming_paths)) # Avoid duplicates
199
-
200
- seen_paths = {item[0] if isinstance(item, (list, tuple)) else item for item in gallery}
201
- new_entries = [path for path in unique_incoming if path not in seen_paths]
202
-
203
- # Create entries matching expected gallery style
204
- formatted_new = [(path, '') for path in new_entries]
205
-
206
- updated_gallery = gallery + formatted_new
207
-
208
- return updated_gallery
209
-
210
- def process_gallery(gallery, results_state):
211
- """Process all images and populate metadata in session."""
212
- if not gallery or len(gallery) == 0:
213
- # Clear results if nothing left
214
- results_state.clear()
215
- return EMPTY_RESULT + (results_state,)
216
-
217
- updated_state = {}
218
- first_image_result = EMPTY_RESULT
219
- try:
220
- for item in gallery:
221
- path = item if isinstance(item, str) else item[0]
222
-
223
- if path not in results_state:
224
- res = process_single_image(path)
225
- results_state[path] = res
226
- updated_state[path] = res
227
-
228
- if first_image_result == EMPTY_RESULT:
229
- first_image_result = res
230
- else:
231
- # Already cached
232
- res = results_state[path]
233
- updated_state[path] = res
234
-
235
- if first_image_result == EMPTY_RESULT:
236
- first_image_result = res
237
-
238
- results_state.update(updated_state)
239
- return first_image_result + (results_state,)
240
- except Exception as e:
241
- print("[ERROR]", str(e))
242
- return EMPTY_RESULT + (results_state,)
243
-
244
- def get_selection_from_gallery(gallery, results_state, evt: gr.SelectData):
245
- """Fetch result for selected image in gallery."""
246
- if evt is None or evt.value is None:
247
- # No selection: use first image
248
- if gallery and len(gallery) > 0:
249
- img_path = str(gallery[0][0] if isinstance(gallery[0], (list, tuple)) else gallery[0])
250
- if img_path in results_state:
251
- return list(results_state[img_path])
252
- else:
253
- # Handle selection event
254
- try:
255
- selected_value = evt.value
256
- img_path = None
257
-
258
- if isinstance(selected_value, dict) and 'image' in selected_value:
259
- img_path = selected_value['image']['path']
260
- elif isinstance(selected_value, (list, tuple)):
261
- img_path = selected_value[0]
262
- else:
263
- img_path = str(selected_value)
264
-
265
- if img_path in results_state:
266
- return list(results_state[img_path])
267
- except Exception as e:
268
- print(f"Selection error: {e}")
269
-
270
- # Return empty if no image found
271
- return list(EMPTY_RESULT)
272
- # ---------- IMAGE PROCESSING ----------
273
- #
274
- with gr.Blocks(title="ComfyUI Workflow Extractor", css=css, theme="Werli/Purple-Crimson-Gradio-Theme", fill_width=True) as demo:
275
- gr.Markdown("# 🛠️ ComfyUI Workflow Information Extractor")
276
- gr.Markdown("Upload Multiple ComfyUI-generated images. Extract prompts, parameters, models, and full workflows.")
277
- with gr.Row():
278
- with gr.Column(scale=2):
279
- upload_button = gr.UploadButton(
280
- "📁 Upload Multiple Images",
281
- file_types=["image"],
282
- file_count="multiple",
283
- size='lg'
284
- )
285
- gallery = gr.Gallery(
286
- columns=3,
287
- show_share_button=False,
288
- interactive=True,
289
- height='auto',
290
- label='Grid of images',
291
- preview=False,
292
- elem_id='custom-gallery'
293
- )
294
-
295
- with gr.Column(scale=3):
296
- with gr.Tabs():
297
- with gr.Tab("Sampling Parameters"):
298
- with gr.Row():
299
- with gr.Column():
300
- seed_out = gr.Textbox(label="Seed", interactive=False)
301
- steps_out = gr.Textbox(label="Steps", interactive=False)
302
- cfg_out = gr.Textbox(label="CFG Scale", interactive=False)
303
- with gr.Column():
304
- sampler_out = gr.Textbox(label="Sampler", interactive=False)
305
- scheduler_out = gr.Textbox(label="Scheduler", interactive=False)
306
- denoise_out = gr.Textbox(label="Denoise", interactive=False)
307
-
308
- with gr.Tab("Prompts"):
309
- pos_prompt = gr.Textbox(label="Positive Prompt", lines=4, interactive=False, show_copy_button=True)
310
- neg_prompt = gr.Textbox(label="Negative Prompt", lines=4, interactive=False, show_copy_button=True)
311
-
312
- with gr.Tab("Models & LoRAs"):
313
- with gr.Row():
314
- lora_out = gr.Textbox(label="LoRAs", lines=5, interactive=False, show_copy_button=True)
315
- model_out = gr.Textbox(label="Base Models", lines=5, interactive=False, show_copy_button=True)
316
-
317
- with gr.Tab("Image Info"):
318
- with gr.Row():
319
- with gr.Column():
320
- width_out = gr.Textbox(label="Width", interactive=False)
321
- height_out = gr.Textbox(label="Height", interactive=False)
322
- batch_out = gr.Textbox(label="Batch Size", interactive=False)
323
- with gr.Column():
324
- nodes_out = gr.Textbox(label="Node Counts", lines=6, interactive=False)
325
-
326
- with gr.Tab("Full Workflow"):
327
- json_out = gr.Textbox(label="Workflow JSON", lines=20, interactive=True, show_copy_button=True)
328
-
329
- # State to store results per image
330
- results_state = gr.State({})
331
-
332
- # Event Connections
333
- upload_event = upload_button.upload(
334
- fn=extend_gallery,
335
- inputs=[gallery, upload_button],
336
- outputs=gallery,
337
- queue=False
338
- )
339
-
340
- upload_event.then(
341
- fn=process_gallery,
342
- inputs=[gallery, results_state],
343
- outputs=[
344
- seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
345
- pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
346
- batch_out, nodes_out, json_out, results_state
347
- ]
348
- )
349
- gallery.change(
350
- fn=process_gallery,
351
- inputs=[gallery, results_state],
352
- outputs=[
353
- seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
354
- pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
355
- batch_out, nodes_out, json_out, results_state
356
- ],
357
- queue=True
358
- )
359
-
360
- gallery.select(
361
- get_selection_from_gallery,
362
- inputs=[gallery, results_state],
363
- outputs=[
364
- seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
365
- pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
366
- batch_out, nodes_out, json_out
367
- ]
368
- )
369
-
370
- gr.Markdown("---\n💡 **Note:** It's under development.")
371
-
372
- if __name__ == "__main__":
373
- demo.queue(max_size=10).launch(show_api=False, show_error=True)
 
1
+ import gradio as gr
2
+ import json
3
+ from PIL import Image
4
+ import os
5
+ from collections import defaultdict
6
+
7
+ css = """
8
+ #custom-gallery{--row-height:180px;display:grid;grid-auto-rows:min-content;gap:10px}#custom-gallery .thumbnail-item{height:var(--row-height);width:100%;position:relative;overflow:hidden;border-radius:8px;box-shadow:0 2px 5px rgb(0 0 0 / .1);transition:transform 0.2s ease,box-shadow 0.2s ease}#custom-gallery .thumbnail-item:hover{transform:translateY(-3px);box-shadow:0 4px 12px rgb(0 0 0 / .15)}#custom-gallery .thumbnail-item img{width:auto;height:100%;max-width:100%;max-height:var(--row-height);object-fit:contain;margin:0 auto;display:block}#custom-gallery .thumbnail-item img.portrait{max-width:100%}#custom-gallery .thumbnail-item img.landscape{max-height:100%}.gallery-container{max-height:500px;overflow-y:auto;padding-right:0;--size-80:500px}.thumbnails{display:flex;position:absolute;bottom:0;width:120px;overflow-x:scroll;padding-top:320px;padding-bottom:280px;padding-left:4px;flex-wrap:wrap}
9
+ """
10
+
11
+ EMPTY_RESULT = ("Not Available",) * 15
12
+
13
+ # ---------- EXTRACTION FUNCTIONS ----------
14
+ def read_metadata(file_path):
15
+ try:
16
+ with Image.open(file_path) as img:
17
+ return img.info
18
+ except Exception as e:
19
+ return {"error": f"Error reading file: {str(e)}"}
20
+
21
+ def extract_workflow_data(file_path):
22
+ metadata = read_metadata(file_path)
23
+ if "error" in metadata:
24
+ return {"error": metadata["error"]}
25
+
26
+ if 'prompt' in metadata:
27
+ try:
28
+ return json.loads(metadata['prompt'])
29
+ except json.JSONDecodeError:
30
+ pass
31
+
32
+ for key, value in metadata.items():
33
+ if isinstance(value, str) and value.strip().startswith('{'):
34
+ try:
35
+ return json.loads(value)
36
+ except json.JSONDecodeError:
37
+ continue
38
+ return {"error": "No workflow data found"}
39
+
40
+ def extract_ksampler_params(workflow_data):
41
+ seed = steps = cfg = sampler = scheduler = denoise = "Not found"
42
+ if not isinstance(workflow_data, dict):
43
+ return seed, steps, cfg, sampler, scheduler, denoise
44
+ for node in workflow_data.values():
45
+ if isinstance(node, dict) and node.get("class_type", "") in ["KSampler", "KSampler (Efficient)"]:
46
+ inputs = node.get("inputs", {})
47
+ seed = inputs.get("seed", "Not found")
48
+ steps = inputs.get("steps", "Not found")
49
+ cfg = inputs.get("cfg", "Not found")
50
+ sampler = inputs.get("sampler_name", "Not found")
51
+ scheduler = inputs.get("scheduler", "Not found")
52
+ denoise = inputs.get("denoise", "Not found")
53
+ break
54
+ return str(seed), str(steps), str(cfg), str(sampler), str(scheduler), str(denoise)
55
+
56
+ def extract_prompts(workflow_data):
57
+ positive = negative = "Not found"
58
+ if not isinstance(workflow_data, dict):
59
+ return positive, negative
60
+ for node in workflow_data.values():
61
+ if isinstance(node, dict):
62
+ class_type = node.get("class_type", "")
63
+ inputs = node.get("inputs", {})
64
+ title = node.get("_meta", {}).get("title", "") if node.get("_meta") else ""
65
+
66
+ if "Text to Conditioning" in class_type:
67
+ if "POSITIVE" in title:
68
+ positive = inputs.get("text", "Not found")
69
+ elif "NEGATIVE" in title:
70
+ negative = inputs.get("text", "Not found")
71
+ if "ShowText|pysssss" in class_type:
72
+ if "text_1" in inputs:
73
+ positive = inputs["text_1"]
74
+ if "text_2" in inputs:
75
+ negative = inputs["text_2"]
76
+ if "DPRandomGenerator" in class_type:
77
+ if "POSITIVE" in title:
78
+ positive = inputs.get("text", "Not found")
79
+ elif "NEGATIVE" in title:
80
+ negative = inputs.get("text", "Not found")
81
+ return str(positive), str(negative)
82
+
83
+ def extract_loras(workflow_data):
84
+ loras = []
85
+ if not isinstance(workflow_data, dict):
86
+ return "None found"
87
+ for node in workflow_data.values():
88
+ if isinstance(node, dict):
89
+ inputs = node.get("inputs", {})
90
+ if "LoraLoader" in node.get("class_type", ""):
91
+ name = inputs.get("lora_name", "Unknown")
92
+ strength = inputs.get("strength_model", "Unknown")
93
+ loras.append(f"{name} (Strength: {strength})")
94
+ for val in inputs.values():
95
+ if isinstance(val, str) and "lora:" in val.lower():
96
+ loras.append(val)
97
+ return "\n".join(loras) if loras else "None found"
98
+
99
+ def extract_model_info(workflow_data):
100
+ models = []
101
+ if not isinstance(workflow_data, dict):
102
+ return "Not found"
103
+ for node in workflow_data.values():
104
+ if isinstance(node, dict):
105
+ inputs = node.get("inputs", {})
106
+ class_type = node.get("class_type", "")
107
+ if "CheckpointLoader" in class_type:
108
+ models.append(inputs.get("ckpt_name", "Unknown"))
109
+ if "Model Mecha Recipe" in class_type:
110
+ models.append(inputs.get("model_path", "Unknown"))
111
+ return "\n".join(models) if models else "Not found"
112
+
113
+ def extract_image_info(workflow_data):
114
+ width = height = batch_size = "Not found"
115
+ if not isinstance(workflow_data, dict):
116
+ return width, height, batch_size
117
+ for node in workflow_data.values():
118
+ if isinstance(node, dict) and node.get("class_type", "") == "EmptyLatentImage":
119
+ inputs = node.get("inputs", {})
120
+ width = inputs.get("width", "Not found")
121
+ height = inputs.get("height", "Not found")
122
+ batch_size = inputs.get("batch_size", "Not found")
123
+ break
124
+ return str(width), str(height), str(batch_size)
125
+
126
+ def extract_nodes_info(workflow_data):
127
+ if not isinstance(workflow_data, dict):
128
+ return "Not found"
129
+ total_nodes = len(workflow_data)
130
+ node_types = defaultdict(int)
131
+ for node in workflow_data.values():
132
+ if isinstance(node, dict):
133
+ node_types[node.get("class_type", "Unknown")] += 1
134
+ summary = f"Total Nodes: {total_nodes}\n"
135
+ for t, c in sorted(node_types.items()):
136
+ summary += f"{t}: {c}\n"
137
+ return summary.strip()
138
+
139
+ def extract_workflow_as_json(workflow_data):
140
+ if isinstance(workflow_data, dict):
141
+ return json.dumps(workflow_data, ensure_ascii=False, indent=2)
142
+ return "{}"
143
+ # ---------- EXTRACTION FUNCTIONS ----------
144
+ #
145
+ # ---------- IMAGE PROCESSING ----------
146
+ def process_single_image(image_path):
147
+ """Extract all workflow info from a single image path."""
148
+ if not image_path:
149
+ return EMPTY_RESULT
150
+
151
+ workflow_data = extract_workflow_data(image_path)
152
+
153
+ if isinstance(workflow_data, dict) and "error" not in workflow_data:
154
+ seed, steps, cfg, sampler, scheduler, denoise = extract_ksampler_params(workflow_data)
155
+ positive, negative = extract_prompts(workflow_data)
156
+ loras = extract_loras(workflow_data)
157
+ models = extract_model_info(workflow_data)
158
+ width, height, batch = extract_image_info(workflow_data)
159
+ nodes = extract_nodes_info(workflow_data)
160
+ full_json = extract_workflow_as_json(workflow_data)
161
+ else:
162
+ error = str(workflow_data.get("error", "Unknown error"))
163
+ seed = steps = cfg = sampler = scheduler = denoise = positive = negative = loras = models = width = height = batch = nodes = full_json = error
164
+
165
+ return seed, steps, cfg, sampler, scheduler, denoise, \
166
+ positive, negative, loras, models, width, height, batch, nodes, full_json
167
+
168
+ def append_gallery(gallery: list, image: str):
169
+ """Add a single image to the gallery"""
170
+ if gallery is None:
171
+ gallery = []
172
+ if not image:
173
+ return gallery, None
174
+ gallery.append(image)
175
+ return gallery, None
176
+
177
+ def extend_gallery(gallery, images):
178
+ """Extend gallery preserving uniqueness"""
179
+
180
+ if gallery is None:
181
+ gallery = []
182
+
183
+ if not images:
184
+ return gallery
185
+
186
+ # Normalize input - Gradio might pass various formats
187
+ incoming_paths = []
188
+ if isinstance(images, str): # Single image path
189
+ incoming_paths.append(images)
190
+ elif isinstance(images, list):
191
+ for img in images:
192
+ # Handle cases where elements could be tuples from Gallery
193
+ if isinstance(img, (tuple, list)):
194
+ incoming_paths.append(str(img[0]))
195
+ else:
196
+ incoming_paths.append(str(img))
197
+
198
+ unique_incoming = list(set(incoming_paths)) # Avoid duplicates
199
+
200
+ seen_paths = {item[0] if isinstance(item, (list, tuple)) else item for item in gallery}
201
+ new_entries = [path for path in unique_incoming if path not in seen_paths]
202
+
203
+ # Create entries matching expected gallery style
204
+ formatted_new = [(path, '') for path in new_entries]
205
+
206
+ updated_gallery = gallery + formatted_new
207
+
208
+ return updated_gallery
209
+
210
+ def process_gallery(gallery, results_state):
211
+ """Process all images and populate metadata in session."""
212
+ if not gallery or len(gallery) == 0:
213
+ # Clear results if nothing left
214
+ results_state.clear()
215
+ return EMPTY_RESULT + (results_state,)
216
+
217
+ updated_state = {}
218
+ first_image_result = EMPTY_RESULT
219
+ try:
220
+ for item in gallery:
221
+ path = item if isinstance(item, str) else item[0]
222
+
223
+ if path not in results_state:
224
+ res = process_single_image(path)
225
+ results_state[path] = res
226
+ updated_state[path] = res
227
+
228
+ if first_image_result == EMPTY_RESULT:
229
+ first_image_result = res
230
+ else:
231
+ # Already cached
232
+ res = results_state[path]
233
+ updated_state[path] = res
234
+
235
+ if first_image_result == EMPTY_RESULT:
236
+ first_image_result = res
237
+
238
+ results_state.update(updated_state)
239
+ return first_image_result + (results_state,)
240
+ except Exception as e:
241
+ print("[ERROR]", str(e))
242
+ return EMPTY_RESULT + (results_state,)
243
+
244
+ def get_selection_from_gallery(gallery, results_state, evt: gr.SelectData):
245
+ """Fetch result for selected image in gallery."""
246
+ if evt is None or evt.value is None:
247
+ # No selection: use first image
248
+ if gallery and len(gallery) > 0:
249
+ img_path = str(gallery[0][0] if isinstance(gallery[0], (list, tuple)) else gallery[0])
250
+ if img_path in results_state:
251
+ return list(results_state[img_path])
252
+ else:
253
+ # Handle selection event
254
+ try:
255
+ selected_value = evt.value
256
+ img_path = None
257
+
258
+ if isinstance(selected_value, dict) and 'image' in selected_value:
259
+ img_path = selected_value['image']['path']
260
+ elif isinstance(selected_value, (list, tuple)):
261
+ img_path = selected_value[0]
262
+ else:
263
+ img_path = str(selected_value)
264
+
265
+ if img_path in results_state:
266
+ return list(results_state[img_path])
267
+ except Exception as e:
268
+ print(f"Selection error: {e}")
269
+
270
+ # Return empty if no image found
271
+ return list(EMPTY_RESULT)
272
+ # ---------- IMAGE PROCESSING ----------
273
+ #
274
+ with gr.Blocks(title="ComfyUI Workflow Extractor", css=css, theme="Werli/Purple-Crimson-Gradio-Theme", fill_width=True) as demo:
275
+ gr.Markdown("# 🛠️ ComfyUI Workflow Information Extractor")
276
+ gr.Markdown("Upload Multiple ComfyUI-generated images. Extract prompts, parameters, models, and full workflows.")
277
+ with gr.Row():
278
+ with gr.Column(scale=2):
279
+ upload_button = gr.UploadButton(
280
+ "📁 Upload Multiple Images",
281
+ file_types=["image"],
282
+ file_count="multiple",
283
+ size='lg'
284
+ )
285
+ gallery = gr.Gallery(
286
+ columns=3,
287
+ show_share_button=False,
288
+ interactive=True,
289
+ height='auto',
290
+ label='Grid of images',
291
+ preview=False,
292
+ elem_id='custom-gallery'
293
+ )
294
+
295
+ with gr.Column(scale=3):
296
+ with gr.Tabs():
297
+ with gr.Tab("Sampling Parameters"):
298
+ with gr.Row():
299
+ with gr.Column():
300
+ seed_out = gr.Textbox(label="Seed", interactive=False)
301
+ steps_out = gr.Textbox(label="Steps", interactive=False)
302
+ cfg_out = gr.Textbox(label="CFG Scale", interactive=False)
303
+ with gr.Column():
304
+ sampler_out = gr.Textbox(label="Sampler", interactive=False)
305
+ scheduler_out = gr.Textbox(label="Scheduler", interactive=False)
306
+ denoise_out = gr.Textbox(label="Denoise", interactive=False)
307
+
308
+ with gr.Tab("Prompts"):
309
+ pos_prompt = gr.Textbox(label="Positive Prompt", lines=4, interactive=False, show_copy_button=True)
310
+ neg_prompt = gr.Textbox(label="Negative Prompt", lines=4, interactive=False, show_copy_button=True)
311
+
312
+ with gr.Tab("Models & LoRAs"):
313
+ with gr.Row():
314
+ lora_out = gr.Textbox(label="LoRAs", lines=5, interactive=False, show_copy_button=True)
315
+ model_out = gr.Textbox(label="Base Models", lines=5, interactive=False, show_copy_button=True)
316
+
317
+ with gr.Tab("Image Info"):
318
+ with gr.Row():
319
+ with gr.Column():
320
+ width_out = gr.Textbox(label="Width", interactive=False)
321
+ height_out = gr.Textbox(label="Height", interactive=False)
322
+ batch_out = gr.Textbox(label="Batch Size", interactive=False)
323
+ with gr.Column():
324
+ nodes_out = gr.Textbox(label="Node Counts", lines=6, interactive=False)
325
+
326
+ with gr.Tab("Full Workflow"):
327
+ json_out = gr.Textbox(label="Workflow JSON", lines=20, interactive=True, show_copy_button=True)
328
+
329
+ # State to store results per image
330
+ results_state = gr.State({})
331
+
332
+ # Event Connections
333
+ upload_event = upload_button.upload(
334
+ fn=extend_gallery,
335
+ inputs=[gallery, upload_button],
336
+ outputs=gallery,
337
+ queue=False
338
+ )
339
+
340
+ upload_event.then(
341
+ fn=process_gallery,
342
+ inputs=[gallery, results_state],
343
+ outputs=[
344
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
345
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
346
+ batch_out, nodes_out, json_out, results_state
347
+ ]
348
+ )
349
+ gallery.change(
350
+ fn=process_gallery,
351
+ inputs=[gallery, results_state],
352
+ outputs=[
353
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
354
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
355
+ batch_out, nodes_out, json_out, results_state
356
+ ],
357
+ queue=True
358
+ )
359
+
360
+ gallery.select(
361
+ get_selection_from_gallery,
362
+ inputs=[gallery, results_state],
363
+ outputs=[
364
+ seed_out, steps_out, cfg_out, sampler_out, scheduler_out, denoise_out,
365
+ pos_prompt, neg_prompt, lora_out, model_out, width_out, height_out,
366
+ batch_out, nodes_out, json_out
367
+ ]
368
+ )
369
+
370
+ gr.Markdown("---\n💡 **Note:** It's under development.")
371
+
372
+ if __name__ == "__main__":
373
+ demo.queue(max_size=10).launch(show_api=False, show_error=True)