lukeafullard commited on
Commit
edf2856
·
verified ·
1 Parent(s): 68179ca

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +445 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,447 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
1
  import streamlit as st
2
+ from PIL import Image, ImageColor, ImageDraw, ImageFont, PngImagePlugin
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torchvision import transforms
6
+ from transformers import AutoModelForImageSegmentation, AutoImageProcessor, Swin2SRForImageSuperResolution, VitMatteForImageMatting
7
+ import io
8
+ import numpy as np
9
+ import gc
10
+
11
+ # Page Configuration
12
+ st.set_page_config(layout="wide", page_title="AI Image Lab Pro")
13
+
14
+ # --- 1. MODEL LOADING (Cached - UNCHANGED) ---
15
+
16
+ @st.cache_resource
17
+ def load_rmbg_model():
18
+ model = AutoModelForImageSegmentation.from_pretrained("briaai/RMBG-1.4", trust_remote_code=True)
19
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
20
+ model.to(device)
21
+ return model, device
22
+
23
+ @st.cache_resource
24
+ def load_birefnet_model():
25
+ model = AutoModelForImageSegmentation.from_pretrained("ZhengPeng7/BiRefNet", trust_remote_code=True)
26
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
27
+ model.to(device)
28
+ return model, device
29
+
30
+ @st.cache_resource
31
+ def load_vitmatte_model():
32
+ processor = AutoImageProcessor.from_pretrained("hustvl/vitmatte-small-composition-1k")
33
+ model = VitMatteForImageMatting.from_pretrained("hustvl/vitmatte-small-composition-1k")
34
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
35
+ model.to(device)
36
+ return processor, model, device
37
+
38
+ @st.cache_resource
39
+ def load_upscaler(scale=2):
40
+ if scale == 4:
41
+ model_id = "caidas/swin2SR-realworld-sr-x4-64-bsrgan-psnr"
42
+ else:
43
+ model_id = "caidas/swin2SR-classical-sr-x2-64"
44
+ processor = AutoImageProcessor.from_pretrained(model_id)
45
+ model = Swin2SRForImageSuperResolution.from_pretrained(model_id)
46
+ return processor, model
47
+
48
+ # --- 2. HELPER FUNCTIONS (AI & Processing - UNCHANGED) ---
49
+
50
+ def cleanup_memory():
51
+ gc.collect()
52
+ if torch.cuda.is_available():
53
+ torch.cuda.empty_cache()
54
+
55
+ def find_mask_tensor(output):
56
+ if isinstance(output, torch.Tensor):
57
+ if output.dim() == 4 and output.shape[1] == 1: return output
58
+ elif output.dim() == 3 and output.shape[0] == 1: return output
59
+ return None
60
+ if hasattr(output, "logits"): return find_mask_tensor(output.logits)
61
+ elif isinstance(output, (list, tuple)):
62
+ for item in output:
63
+ found = find_mask_tensor(item)
64
+ if found is not None: return found
65
+ return None
66
+
67
+ def generate_trimap(mask_tensor, erode_kernel_size=10, dilate_kernel_size=10):
68
+ if mask_tensor.dim() == 3: mask_tensor = mask_tensor.unsqueeze(0)
69
+ erode_k = erode_kernel_size
70
+ dilate_k = dilate_kernel_size
71
+ dilated = F.max_pool2d(mask_tensor, kernel_size=dilate_k, stride=1, padding=dilate_k//2)
72
+ eroded = -F.max_pool2d(-mask_tensor, kernel_size=erode_k, stride=1, padding=erode_k//2)
73
+ trimap = torch.full_like(mask_tensor, 0.5)
74
+ trimap[eroded > 0.5] = 1.0
75
+ trimap[dilated < 0.5] = 0.0
76
+ return trimap
77
+
78
+ # --- 3. INFERENCE LOGIC (UNCHANGED) ---
79
+
80
+ def inference_segmentation(model, image, device, resolution=1024):
81
+ w, h = image.size
82
+ transform = transforms.Compose([
83
+ transforms.Resize((resolution, resolution)),
84
+ transforms.ToTensor(),
85
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
86
+ ])
87
+ input_tensor = transform(image).unsqueeze(0).to(device)
88
+
89
+ with torch.no_grad():
90
+ outputs = model(input_tensor)
91
+
92
+ result_tensor = find_mask_tensor(outputs)
93
+ if result_tensor is None: result_tensor = outputs[0] if isinstance(outputs, (list, tuple)) else outputs
94
+ if not isinstance(result_tensor, torch.Tensor):
95
+ if isinstance(result_tensor, (list, tuple)): result_tensor = result_tensor[0]
96
+
97
+ pred = result_tensor.squeeze().cpu()
98
+ if pred.max() > 1 or pred.min() < 0: pred = pred.sigmoid()
99
+
100
+ pred_pil = transforms.ToPILImage()(pred)
101
+ mask = pred_pil.resize((w, h), resample=Image.LANCZOS)
102
+ return mask
103
+
104
+ def inference_vitmatte(image, device):
105
+ cleanup_memory()
106
+ original_size = image.size
107
+ max_dim = 1536
108
+ if max(image.size) > max_dim:
109
+ scale_ratio = max_dim / max(image.size)
110
+ new_w = int(image.size[0] * scale_ratio)
111
+ new_h = int(image.size[1] * scale_ratio)
112
+ processing_image = image.resize((new_w, new_h), Image.LANCZOS)
113
+ else:
114
+ processing_image = image
115
+
116
+ rmbg_model, _ = load_rmbg_model()
117
+ rough_mask_pil = inference_segmentation(rmbg_model, processing_image, device, resolution=1024)
118
+
119
+ mask_tensor = transforms.ToTensor()(rough_mask_pil).to(device)
120
+ trimap_tensor = generate_trimap(mask_tensor, erode_kernel_size=25, dilate_kernel_size=25)
121
+ trimap_pil = transforms.ToPILImage()(trimap_tensor.squeeze().cpu())
122
+
123
+ processor, model, _ = load_vitmatte_model()
124
+ inputs = processor(images=processing_image, trimaps=trimap_pil, return_tensors="pt").to(device)
125
+
126
+ with torch.no_grad():
127
+ outputs = model(**inputs)
128
+
129
+ alphas = outputs.alphas
130
+ alpha_np = alphas.squeeze().cpu().numpy()
131
+ alpha_pil = Image.fromarray((alpha_np * 255).astype("uint8"), mode="L")
132
+
133
+ if original_size != processing_image.size:
134
+ alpha_pil = alpha_pil.resize(original_size, resample=Image.LANCZOS)
135
+
136
+ cleanup_memory()
137
+ return alpha_pil
138
+
139
+ @st.cache_data(show_spinner=False)
140
+ def process_background_removal(image_bytes, method="RMBG-1.4"):
141
+ cleanup_memory()
142
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGBA")
143
+ image_rgb = image.convert("RGB")
144
+
145
+ if method == "RMBG-1.4":
146
+ model, device = load_rmbg_model()
147
+ mask = inference_segmentation(model, image_rgb, device)
148
+
149
+ elif method == "BiRefNet (Heavy)":
150
+ model, device = load_birefnet_model()
151
+ mask = inference_segmentation(model, image_rgb, device, resolution=1024)
152
+
153
+ elif method == "VitMatte (Refiner)":
154
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
155
+ mask = inference_vitmatte(image_rgb, device)
156
+
157
+ else:
158
+ return image
159
+
160
+ final_image = image_rgb.copy()
161
+ final_image.putalpha(mask)
162
+ return final_image
163
+
164
+ # --- Upscaling Logic ---
165
+ def run_swin_inference(image, processor, model):
166
+ inputs = processor(image, return_tensors="pt")
167
+ with torch.no_grad():
168
+ outputs = model(**inputs)
169
+ output = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy()
170
+ output = np.moveaxis(output, 0, -1)
171
+ output = (output * 255.0).round().astype(np.uint8)
172
+ return Image.fromarray(output)
173
+
174
+ def upscale_chunk_logic(image, processor, model):
175
+ if image.mode == 'RGBA':
176
+ r, g, b, a = image.split()
177
+ rgb_image = Image.merge('RGB', (r, g, b))
178
+ upscaled_rgb = run_swin_inference(rgb_image, processor, model)
179
+ upscaled_a = a.resize(upscaled_rgb.size, Image.Resampling.LANCZOS)
180
+ return Image.merge('RGBA', (*upscaled_rgb.split(), upscaled_a))
181
+ else:
182
+ return run_swin_inference(image, processor, model)
183
+
184
+ def process_tiled_upscale(image, scale_factor, grid_n, progress_bar):
185
+ cleanup_memory()
186
+ processor, model = load_upscaler(scale_factor)
187
+ w, h = image.size
188
+ rows = cols = grid_n
189
+ tile_w = w // cols
190
+ tile_h = h // rows
191
+ overlap = 32
192
+ full_image = Image.new(image.mode, (w * scale_factor, h * scale_factor))
193
+ total_tiles = rows * cols
194
+ count = 0
195
+ for y in range(rows):
196
+ for x in range(cols):
197
+ target_left = x * tile_w
198
+ target_upper = y * tile_h
199
+ target_right = w if x == cols - 1 else (x + 1) * tile_w
200
+ target_lower = h if y == rows - 1 else (y + 1) * tile_h
201
+ source_left = max(0, target_left - overlap)
202
+ source_upper = max(0, target_upper - overlap)
203
+ source_right = min(w, target_right + overlap)
204
+ source_lower = min(h, target_lower + overlap)
205
+ tile = image.crop((source_left, source_upper, source_right, source_lower))
206
+ upscaled_tile = upscale_chunk_logic(tile, processor, model)
207
+ target_w = target_right - target_left
208
+ target_h = target_lower - target_upper
209
+ extra_left = target_left - source_left
210
+ extra_upper = target_upper - source_upper
211
+ crop_x = extra_left * scale_factor
212
+ crop_y = extra_upper * scale_factor
213
+ crop_w = target_w * scale_factor
214
+ crop_h = target_h * scale_factor
215
+ clean_tile = upscaled_tile.crop((crop_x, crop_y, crop_x + crop_w, crop_y + crop_h))
216
+ paste_x = target_left * scale_factor
217
+ paste_y = target_upper * scale_factor
218
+ full_image.paste(clean_tile, (paste_x, paste_y))
219
+ del tile, upscaled_tile, clean_tile
220
+ cleanup_memory()
221
+ count += 1
222
+ progress_bar.progress(count / total_tiles, text=f"Upscaling Tile {count}/{total_tiles}...")
223
+ return full_image
224
+
225
+ # --- 4. NEW HELPER FUNCTIONS (Watermark & Metadata) ---
226
+
227
+ def apply_watermark(image, text, opacity, size_scale, position):
228
+ if not text: return image
229
+ watermark_image = image.convert("RGBA")
230
+ text_layer = Image.new("RGBA", watermark_image.size, (255, 255, 255, 0))
231
+ draw = ImageDraw.Draw(text_layer)
232
+ w, h = watermark_image.size
233
+ base_font_size = int(h * 0.05)
234
+ font_size = int(base_font_size * size_scale)
235
+ try:
236
+ font = ImageFont.load_default()
237
+ except ImportError:
238
+ font = ImageFont.load_default()
239
+ bbox = draw.textbbox((0, 0), text, font=font)
240
+ text_width = bbox[2] - bbox[0]
241
+ text_height = bbox[3] - bbox[1]
242
+ padding = 20
243
+ x, y = 0, 0
244
+ if position == "Bottom Right":
245
+ x, y = w - text_width - padding, h - text_height - padding
246
+ elif position == "Bottom Left":
247
+ x, y = padding, h - text_height - padding
248
+ elif position == "Top Right":
249
+ x, y = w - text_width - padding, padding
250
+ elif position == "Top Left":
251
+ x, y = padding, padding
252
+ elif position == "Center":
253
+ x, y = (w - text_width) // 2, (h - text_height) // 2
254
+ alpha_val = int(opacity * 255)
255
+ text_color = (255, 255, 255, alpha_val)
256
+ draw.text((x, y), text, font=font, fill=text_color)
257
+ output = Image.alpha_composite(watermark_image, text_layer)
258
+ if image.mode == 'RGB': return output.convert('RGB')
259
+ return output
260
+
261
+ def convert_image_to_bytes_with_metadata(img, author=None, copyright_text=None):
262
+ buf = io.BytesIO()
263
+ pnginfo = PngImagePlugin.PngInfo()
264
+ if author:
265
+ pnginfo.add_text("Author", author)
266
+ pnginfo.add_text("Software", "AI Image Lab Pro")
267
+ if copyright_text:
268
+ pnginfo.add_text("Copyright", copyright_text)
269
+ img.save(buf, format="PNG", pnginfo=pnginfo)
270
+ return buf.getvalue()
271
+
272
+ # --- 5. MAIN APP ---
273
+
274
+ def main():
275
+ st.title("✨ AI Image Lab: Professional")
276
+
277
+ # --- Sidebar Section 1: Input & Metadata ---
278
+ st.sidebar.header("1. Input & Metadata")
279
+ uploaded_file = st.file_uploader("Upload Image", type=["png", "jpg", "jpeg", "webp"])
280
+
281
+ clean_metadata_on_load = st.sidebar.checkbox("Strip Original Metadata on Load", value=False)
282
+
283
+ if uploaded_file is not None:
284
+ file_bytes = uploaded_file.getvalue()
285
+ initial_img_inspect = Image.open(io.BytesIO(file_bytes))
286
+ with st.sidebar.expander("🔍 View Original Metadata"):
287
+ if initial_img_inspect.info:
288
+ safe_info = {k: v for k, v in initial_img_inspect.info.items() if isinstance(v, (str, int, float))}
289
+ if safe_info: st.json(safe_info)
290
+ else: st.write("Binary metadata hidden.")
291
+ else: st.write("No metadata found.")
292
+
293
+ if clean_metadata_on_load:
294
+ clean_img = Image.new(initial_img_inspect.mode, initial_img_inspect.size)
295
+ clean_img.putdata(list(initial_img_inspect.getdata()))
296
+ buf = io.BytesIO()
297
+ clean_img.save(buf, format="PNG")
298
+ processing_bytes = buf.getvalue()
299
+ st.sidebar.success("Metadata stripped.")
300
+ else:
301
+ processing_bytes = file_bytes
302
+
303
+ # --- Sidebar Section 2: AI Processing ---
304
+ st.sidebar.header("2. AI Processing")
305
+ remove_bg = st.sidebar.checkbox("Remove Background", value=True)
306
+
307
+ if remove_bg:
308
+ bg_model = st.sidebar.selectbox("AI Model", ["BiRefNet (Heavy)", "RMBG-1.4", "VitMatte (Refiner)"], index=0)
309
+ else:
310
+ bg_model = "None"
311
+
312
+ upscale_mode = st.sidebar.radio("Magnification", ["None", "2x", "4x"])
313
+ if upscale_mode != "None":
314
+ grid_n = st.sidebar.slider("Grid Split", 2, 8, 4)
315
+ else:
316
+ grid_n = 2
317
+
318
+ # --- Sidebar Section 3: Studio Tools ---
319
+ st.sidebar.markdown("---")
320
+ st.sidebar.header("3. Studio Tools")
321
+
322
+ bg_color_mode = st.sidebar.selectbox("Background Color", ["Transparent", "White", "Black", "Custom"])
323
+ custom_bg_color = "#FFFFFF"
324
+ if bg_color_mode == "Custom":
325
+ custom_bg_color = st.sidebar.color_picker("Pick color", "#FF0000")
326
+
327
+ enable_smart_crop = st.sidebar.checkbox("Smart Auto-Crop (to Subject)", value=False)
328
+ crop_padding = 0
329
+ if enable_smart_crop:
330
+ crop_padding = st.sidebar.slider("Auto-Crop Padding", 0, 500, 50)
331
+
332
+ st.sidebar.caption("Manual Crop (px)")
333
+ col_c1, col_c2 = st.sidebar.columns(2)
334
+ with col_c1:
335
+ crop_top = st.number_input("Top", min_value=0, value=0, step=10)
336
+ crop_left = st.number_input("Left", min_value=0, value=0, step=10)
337
+ with col_c2:
338
+ crop_bottom = st.number_input("Bottom", min_value=0, value=0, step=10)
339
+ crop_right = st.number_input("Right", min_value=0, value=0, step=10)
340
+
341
+ rotate_angle = st.sidebar.slider("Rotate", -180, 180, 0, 1)
342
+
343
+ st.sidebar.subheader("Watermark")
344
+ wm_text = st.sidebar.text_input("Watermark Text")
345
+ wm_opacity = st.sidebar.slider("Opacity", 0.1, 1.0, 0.5)
346
+ wm_size = st.sidebar.slider("Size Scale", 0.5, 3.0, 1.0)
347
+ wm_position = st.sidebar.selectbox("Position", ["Bottom Right", "Bottom Left", "Top Right", "Top Left", "Center"])
348
+
349
+
350
+ # --- Sidebar Section 4: Output Settings ---
351
+ st.sidebar.markdown("---")
352
+ st.sidebar.header("4. Output Settings")
353
+ meta_author = st.sidebar.text_input("Author Name")
354
+ meta_copyright = st.sidebar.text_input("Copyright Notice")
355
+
356
+
357
+ # --- Main Application Logic ---
358
+ if uploaded_file is not None:
359
+ if remove_bg:
360
+ with st.spinner(f"Removing background using {bg_model}..."):
361
+ processed_image = process_background_removal(processing_bytes, bg_model)
362
+ else:
363
+ processed_image = Image.open(io.BytesIO(processing_bytes)).convert("RGBA")
364
+
365
+ if upscale_mode != "None":
366
+ scale = 4 if "4x" in upscale_mode else 2
367
+ cache_key = f"{uploaded_file.name}_clean{clean_metadata_on_load}_{bg_model}_{scale}_{grid_n}_v11"
368
+ if "upscale_cache" not in st.session_state: st.session_state.upscale_cache = {}
369
+ if cache_key in st.session_state.upscale_cache:
370
+ processed_image = st.session_state.upscale_cache[cache_key]
371
+ st.info("✅ Loaded upscaled image from cache")
372
+ else:
373
+ progress_bar = st.progress(0, text="Initializing AI models...")
374
+ processed_image = process_tiled_upscale(processed_image, scale, grid_n, progress_bar)
375
+ progress_bar.empty()
376
+ st.session_state.upscale_cache[cache_key] = processed_image
377
+
378
+ final_image = processed_image.copy()
379
+
380
+ # A. Rotation
381
+ if rotate_angle != 0:
382
+ final_image = final_image.rotate(rotate_angle, expand=True)
383
+
384
+ # B. Smart Auto-Crop
385
+ if enable_smart_crop and final_image.mode == 'RGBA':
386
+ alpha = final_image.getchannel('A')
387
+ bbox = alpha.getbbox()
388
+ if bbox:
389
+ left, upper, right, lower = bbox
390
+ w, h = final_image.size
391
+ left = max(0, left - crop_padding)
392
+ upper = max(0, upper - crop_padding)
393
+ right = min(w, right + crop_padding)
394
+ lower = min(h, lower + crop_padding)
395
+ final_image = final_image.crop((left, upper, right, lower))
396
+
397
+ # C. Manual Crop
398
+ # Applied after Smart Crop so you can refine it
399
+ w, h = final_image.size
400
+ # Ensure we don't crop beyond image dimensions
401
+ valid_left = min(crop_left, w - 1)
402
+ valid_top = min(crop_top, h - 1)
403
+ valid_right = min(crop_right, w - valid_left - 1)
404
+ valid_bottom = min(crop_bottom, h - valid_top - 1)
405
+
406
+ if valid_left > 0 or valid_top > 0 or valid_right > 0 or valid_bottom > 0:
407
+ final_image = final_image.crop((
408
+ valid_left,
409
+ valid_top,
410
+ w - valid_right,
411
+ h - valid_bottom
412
+ ))
413
+
414
+ # D. Background Compositing
415
+ if bg_color_mode != "Transparent" and final_image.mode == 'RGBA':
416
+ if bg_color_mode == "White": bg = Image.new("RGBA", final_image.size, "WHITE")
417
+ elif bg_color_mode == "Black": bg = Image.new("RGBA", final_image.size, "BLACK")
418
+ else: bg = Image.new("RGBA", final_image.size, custom_bg_color)
419
+ bg.alpha_composite(final_image)
420
+ final_image = bg.convert("RGB")
421
+
422
+ # E. Watermark
423
+ if wm_text:
424
+ final_image = apply_watermark(final_image, wm_text, wm_opacity, wm_size, wm_position)
425
+
426
+ # --- Display ---
427
+ col1, col2 = st.columns(2)
428
+ with col1:
429
+ st.subheader("Original")
430
+ st.image(Image.open(io.BytesIO(file_bytes)), use_container_width=True)
431
+
432
+ with col2:
433
+ st.subheader("Result")
434
+ st.markdown("""<style>[data-testid="stImage"] {background-image: url('https://i.imgur.com/s1B49hR.png'); background-size: 20px 20px;}</style>""", unsafe_allow_html=True)
435
+ st.image(final_image, use_container_width=True)
436
+
437
+ st.markdown("---")
438
+ download_data = convert_image_to_bytes_with_metadata(final_image, author=meta_author, copyright_text=meta_copyright)
439
+ st.download_button(
440
+ label="💾 Download Result (PNG with Metadata)",
441
+ data=download_data,
442
+ file_name="processed_image.png",
443
+ mime="image/png"
444
+ )
445
 
446
+ if __name__ == "__main__":
447
+ main()