Your Name commited on
Commit
851b7d3
·
1 Parent(s): a24e5f9

Implement feature detection and editing capabilities using OpenCV, update UI for enhanced user experience, and add opencv-python dependency to requirements.txt.

Browse files
Files changed (2) hide show
  1. app.py +376 -26
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,10 +1,12 @@
1
  import gradio as gr
2
  import numpy as np
3
- from PIL import Image
 
4
  import os
5
  import io
6
  import base64
7
  import time
 
8
 
9
  # Global variables
10
  FEATURE_TYPES = ["Eyes", "Nose", "Lips", "Face Shape", "Hair", "Body"]
@@ -17,16 +19,371 @@ MODIFICATION_PRESETS = {
17
  "Body": ["Slim", "Athletic", "Curvy", "Muscular"]
18
  }
19
 
20
- # Simplified processing function that doesn't require heavy models
21
- def process_image_simple(image, feature_type, modification_type, intensity, custom_prompt="", use_custom_prompt=False):
 
22
  if image is None:
23
  return None, "Please upload an image first."
24
 
25
- # Create a copy of the image to simulate processing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  if isinstance(image, np.ndarray):
27
- processed_image = Image.fromarray(image.copy())
28
  else:
29
- processed_image = image.copy()
30
 
31
  # Get the instruction based on feature and modification
32
  if use_custom_prompt and custom_prompt:
@@ -34,29 +391,18 @@ def process_image_simple(image, feature_type, modification_type, intensity, cust
34
  else:
35
  instruction = f"Applied {feature_type} modification: {modification_type} with intensity {intensity:.1f}"
36
 
37
- # Simulate processing time
38
- time.sleep(1)
39
-
40
- # Add a simple visual indicator to show something happened
41
- # Draw a small colored rectangle in the corner to indicate processing
42
- from PIL import ImageDraw
43
- draw = ImageDraw.Draw(processed_image)
44
- color = (int(255 * intensity), 100, 200)
45
- draw.rectangle((10, 10, 30, 30), fill=color)
46
-
47
- return processed_image, f"Simulated edit: {instruction}\n\nNote: This is a placeholder. In the full version, this would apply AI-powered edits using PyTorch models. For actual editing, please use the Pinokio local version which supports GPU acceleration."
48
 
49
  # UI Components
50
  def create_ui():
51
  with gr.Blocks(title="PortraitPerfectAI - Facial & Body Feature Editor") as app:
52
  gr.Markdown("# PortraitPerfectAI - Facial & Body Feature Editor")
53
  gr.Markdown("Upload an image and use the controls to edit specific facial and body features.")
54
- gr.Markdown("⚠️ **Note:** This is a simplified web demo. For full AI-powered editing, download the Pinokio package below.")
55
 
56
  with gr.Row():
57
  with gr.Column(scale=1):
58
  # Input controls
59
- input_image = gr.Image(label="Upload Image", type="pil")
60
 
61
  with gr.Group():
62
  gr.Markdown("### Feature Selection")
@@ -98,14 +444,18 @@ def create_ui():
98
 
99
  with gr.Column(scale=1):
100
  # Output display
101
- output_image = gr.Image(label="Edited Image", type="pil")
 
 
 
 
102
 
103
  # Download Pinokio package section
104
  with gr.Accordion("Download Full Version for Local Use", open=True):
105
  gr.Markdown("""
106
  ### Get the Full AI-Powered Version
107
 
108
- This web demo has limited functionality. For the complete experience with GPU acceleration:
109
 
110
  1. Download the Pinokio package below
111
  2. Install [Pinokio](https://pinokio.computer/) on your computer
@@ -127,7 +477,7 @@ def create_ui():
127
  - Intuitive sliders and controls
128
  - Non-destructive editing workflow
129
 
130
- **Note:** The web version has limited functionality. For full AI-powered editing with GPU acceleration, download the Pinokio package.
131
  """)
132
 
133
  # Event handlers
@@ -141,7 +491,7 @@ def create_ui():
141
  )
142
 
143
  edit_button.click(
144
- fn=process_image_simple,
145
  inputs=[
146
  input_image,
147
  feature_type,
@@ -150,16 +500,16 @@ def create_ui():
150
  custom_prompt,
151
  use_custom_prompt
152
  ],
153
- outputs=[output_image, status_text]
154
  )
155
 
156
  def reset_image():
157
- return None, "Image reset."
158
 
159
  reset_button.click(
160
  fn=reset_image,
161
  inputs=[],
162
- outputs=[output_image, status_text]
163
  )
164
 
165
  # Add ethical usage notice
 
1
  import gradio as gr
2
  import numpy as np
3
+ from PIL import Image, ImageDraw, ImageFont
4
+ import cv2
5
  import os
6
  import io
7
  import base64
8
  import time
9
+ import random
10
 
11
  # Global variables
12
  FEATURE_TYPES = ["Eyes", "Nose", "Lips", "Face Shape", "Hair", "Body"]
 
19
  "Body": ["Slim", "Athletic", "Curvy", "Muscular"]
20
  }
21
 
22
+ # Feature detection function
23
+ def detect_features(image):
24
+ """Detect facial features in the image using OpenCV."""
25
  if image is None:
26
  return None, "Please upload an image first."
27
 
28
+ # Convert to numpy array if it's a PIL Image
29
+ if isinstance(image, Image.Image):
30
+ img_array = np.array(image)
31
+ else:
32
+ img_array = image.copy()
33
+
34
+ # Convert to grayscale for face detection
35
+ gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
36
+
37
+ # Load pre-trained face detector
38
+ face_cascade_path = cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
39
+ eye_cascade_path = cv2.data.haarcascades + 'haarcascade_eye.xml'
40
+
41
+ face_cascade = cv2.CascadeClassifier(face_cascade_path)
42
+ eye_cascade = cv2.CascadeClassifier(eye_cascade_path)
43
+
44
+ # Detect faces
45
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
46
+
47
+ # Create a copy for visualization
48
+ visualization = img_array.copy()
49
+
50
+ # Dictionary to store detected features
51
+ detected_features = {
52
+ "faces": [],
53
+ "eyes": [],
54
+ "nose": [],
55
+ "lips": []
56
+ }
57
+
58
+ # Draw rectangles around detected faces
59
+ for (x, y, w, h) in faces:
60
+ # Store face coordinates
61
+ detected_features["faces"].append((x, y, w, h))
62
+
63
+ # Draw face rectangle
64
+ cv2.rectangle(visualization, (x, y), (x+w, y+h), (0, 255, 0), 2)
65
+
66
+ # Region of interest for the face
67
+ roi_gray = gray[y:y+h, x:x+w]
68
+ roi_color = visualization[y:y+h, x:x+w]
69
+
70
+ # Detect eyes
71
+ eyes = eye_cascade.detectMultiScale(roi_gray)
72
+ for (ex, ey, ew, eh) in eyes:
73
+ # Store eye coordinates (relative to the face)
74
+ detected_features["eyes"].append((x+ex, y+ey, ew, eh))
75
+
76
+ # Draw eye rectangle
77
+ cv2.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (255, 0, 0), 2)
78
+
79
+ # Approximate nose position (center of face)
80
+ nose_x = x + w//2 - 15
81
+ nose_y = y + h//2 - 10
82
+ nose_w = 30
83
+ nose_h = 30
84
+ detected_features["nose"].append((nose_x, nose_y, nose_w, nose_h))
85
+
86
+ # Draw nose rectangle
87
+ cv2.rectangle(visualization, (nose_x, nose_y), (nose_x+nose_w, nose_y+nose_h), (0, 0, 255), 2)
88
+
89
+ # Approximate lips position (lower third of face)
90
+ lips_x = x + w//4
91
+ lips_y = y + int(h * 0.7)
92
+ lips_w = w//2
93
+ lips_h = h//6
94
+ detected_features["lips"].append((lips_x, lips_y, lips_w, lips_h))
95
+
96
+ # Draw lips rectangle
97
+ cv2.rectangle(visualization, (lips_x, lips_y), (lips_x+lips_w, lips_y+lips_h), (255, 0, 255), 2)
98
+
99
+ # Add labels
100
+ font = cv2.FONT_HERSHEY_SIMPLEX
101
+ if len(detected_features["faces"]) > 0:
102
+ cv2.putText(visualization, 'Face', (faces[0][0], faces[0][1]-10), font, 0.8, (0, 255, 0), 2)
103
+
104
+ if len(detected_features["eyes"]) > 0:
105
+ cv2.putText(visualization, 'Eye', (detected_features["eyes"][0][0], detected_features["eyes"][0][1]-5), font, 0.5, (255, 0, 0), 2)
106
+
107
+ if len(detected_features["nose"]) > 0:
108
+ cv2.putText(visualization, 'Nose', (detected_features["nose"][0][0], detected_features["nose"][0][1]-5), font, 0.5, (0, 0, 255), 2)
109
+
110
+ if len(detected_features["lips"]) > 0:
111
+ cv2.putText(visualization, 'Lips', (detected_features["lips"][0][0], detected_features["lips"][0][1]-5), font, 0.5, (255, 0, 255), 2)
112
+
113
+ return Image.fromarray(visualization), detected_features
114
+
115
+ # Basic image editing function
116
+ def edit_image(image, feature_type, modification_type, intensity, detected_features):
117
+ """Apply basic image editing based on the selected feature and modification."""
118
+ if image is None or detected_features is None:
119
+ return image
120
+
121
+ # Convert to numpy array if it's a PIL Image
122
+ if isinstance(image, Image.Image):
123
+ img_array = np.array(image)
124
+ else:
125
+ img_array = image.copy()
126
+
127
+ # Create a copy for editing
128
+ edited_img = img_array.copy()
129
+
130
+ # Apply different edits based on feature type
131
+ if feature_type == "Eyes" and len(detected_features["eyes"]) > 0:
132
+ for (x, y, w, h) in detected_features["eyes"]:
133
+ # Get the eye region
134
+ eye_region = edited_img[y:y+h, x:x+w]
135
+
136
+ if modification_type == "Larger":
137
+ # Scale the eye region
138
+ scale_factor = 1.0 + (intensity * 0.5) # Scale up to 1.5x based on intensity
139
+ new_h, new_w = int(h * scale_factor), int(w * scale_factor)
140
+
141
+ # Resize the eye region
142
+ resized_eye = cv2.resize(eye_region, (new_w, new_h))
143
+
144
+ # Calculate offsets to center the resized eye
145
+ offset_y = (new_h - h) // 2
146
+ offset_x = (new_w - w) // 2
147
+
148
+ # Create a larger region to paste the resized eye
149
+ y1 = max(0, y - offset_y)
150
+ y2 = min(edited_img.shape[0], y + h + offset_y)
151
+ x1 = max(0, x - offset_x)
152
+ x2 = min(edited_img.shape[1], x + w + offset_x)
153
+
154
+ # Blend the resized eye with the original image
155
+ alpha = 0.7 # Blend factor
156
+ try:
157
+ # Crop the resized eye to fit the target region
158
+ crop_y1 = max(0, offset_y - (y - y1))
159
+ crop_y2 = crop_y1 + (y2 - y1)
160
+ crop_x1 = max(0, offset_x - (x - x1))
161
+ crop_x2 = crop_x1 + (x2 - x1)
162
+
163
+ cropped_eye = resized_eye[crop_y1:crop_y2, crop_x1:crop_x2]
164
+
165
+ # Ensure dimensions match before blending
166
+ if cropped_eye.shape[0] == (y2 - y1) and cropped_eye.shape[1] == (x2 - x1):
167
+ edited_img[y1:y2, x1:x2] = cv2.addWeighted(
168
+ edited_img[y1:y2, x1:x2], 1-alpha, cropped_eye, alpha, 0
169
+ )
170
+ except Exception as e:
171
+ print(f"Error resizing eye: {e}")
172
+
173
+ elif modification_type == "Smaller":
174
+ # Scale the eye region
175
+ scale_factor = 1.0 - (intensity * 0.3) # Scale down to 0.7x based on intensity
176
+ new_h, new_w = int(h * scale_factor), int(w * scale_factor)
177
+
178
+ # Resize the eye region
179
+ resized_eye = cv2.resize(eye_region, (new_w, new_h))
180
+
181
+ # Calculate offsets to center the resized eye
182
+ offset_y = (h - new_h) // 2
183
+ offset_x = (w - new_w) // 2
184
+
185
+ # Create a background (use the surrounding area)
186
+ background = edited_img[y:y+h, x:x+w].copy()
187
+
188
+ # Paste the resized eye onto the background
189
+ background[offset_y:offset_y+new_h, offset_x:offset_x+new_w] = resized_eye
190
+
191
+ # Blend the result with the original image
192
+ edited_img[y:y+h, x:x+w] = background
193
+
194
+ elif modification_type == "Change Color":
195
+ # Apply a color tint to the eye region
196
+ # Generate a random color based on intensity
197
+ blue = random.randint(0, 255)
198
+ green = random.randint(0, 255)
199
+ red = random.randint(0, 255)
200
+
201
+ # Create a color overlay
202
+ overlay = np.ones(eye_region.shape, dtype=np.uint8) * np.array([blue, green, red], dtype=np.uint8)
203
+
204
+ # Blend the overlay with the eye region
205
+ alpha = intensity * 0.7 # Adjust alpha based on intensity
206
+ edited_img[y:y+h, x:x+w] = cv2.addWeighted(eye_region, 1-alpha, overlay, alpha, 0)
207
+
208
+ elif feature_type == "Nose" and len(detected_features["nose"]) > 0:
209
+ for (x, y, w, h) in detected_features["nose"]:
210
+ # Get the nose region
211
+ nose_region = edited_img[y:y+h, x:x+w]
212
+
213
+ if modification_type == "Refine":
214
+ # Apply a subtle blur to refine the nose
215
+ blurred_nose = cv2.GaussianBlur(nose_region, (5, 5), 0)
216
+
217
+ # Blend the blurred nose with the original
218
+ alpha = intensity * 0.8
219
+ edited_img[y:y+h, x:x+w] = cv2.addWeighted(nose_region, 1-alpha, blurred_nose, alpha, 0)
220
+
221
+ elif modification_type == "Reshape" or modification_type == "Resize":
222
+ # Apply a subtle transformation
223
+ scale_x = 1.0 + (intensity * 0.4 - 0.2) # Scale between 0.8x and 1.2x
224
+ scale_y = 1.0 + (intensity * 0.4 - 0.2)
225
+
226
+ # Create transformation matrix
227
+ center = (w // 2, h // 2)
228
+ M = cv2.getRotationMatrix2D(center, 0, scale_x)
229
+
230
+ # Apply transformation
231
+ transformed_nose = cv2.warpAffine(nose_region, M, (w, h))
232
+
233
+ # Blend the transformed nose with the original
234
+ alpha = 0.7
235
+ edited_img[y:y+h, x:x+w] = cv2.addWeighted(nose_region, 1-alpha, transformed_nose, alpha, 0)
236
+
237
+ elif feature_type == "Lips" and len(detected_features["lips"]) > 0:
238
+ for (x, y, w, h) in detected_features["lips"]:
239
+ # Get the lips region
240
+ lips_region = edited_img[y:y+h, x:x+w]
241
+
242
+ if modification_type == "Fuller":
243
+ # Scale the lips region
244
+ scale_factor = 1.0 + (intensity * 0.3) # Scale up to 1.3x based on intensity
245
+ new_h, new_w = int(h * scale_factor), int(w * scale_factor)
246
+
247
+ # Resize the lips region
248
+ resized_lips = cv2.resize(lips_region, (new_w, new_h))
249
+
250
+ # Calculate offsets to center the resized lips
251
+ offset_y = (new_h - h) // 2
252
+ offset_x = (new_w - w) // 2
253
+
254
+ # Create a larger region to paste the resized lips
255
+ y1 = max(0, y - offset_y)
256
+ y2 = min(edited_img.shape[0], y + h + offset_y)
257
+ x1 = max(0, x - offset_x)
258
+ x2 = min(edited_img.shape[1], x + w + offset_x)
259
+
260
+ # Blend the resized lips with the original image
261
+ alpha = 0.7 # Blend factor
262
+ try:
263
+ # Crop the resized lips to fit the target region
264
+ crop_y1 = max(0, offset_y - (y - y1))
265
+ crop_y2 = crop_y1 + (y2 - y1)
266
+ crop_x1 = max(0, offset_x - (x - x1))
267
+ crop_x2 = crop_x1 + (x2 - x1)
268
+
269
+ cropped_lips = resized_lips[crop_y1:crop_y2, crop_x1:crop_x2]
270
+
271
+ # Ensure dimensions match before blending
272
+ if cropped_lips.shape[0] == (y2 - y1) and cropped_lips.shape[1] == (x2 - x1):
273
+ edited_img[y1:y2, x1:x2] = cv2.addWeighted(
274
+ edited_img[y1:y2, x1:x2], 1-alpha, cropped_lips, alpha, 0
275
+ )
276
+ except Exception as e:
277
+ print(f"Error resizing lips: {e}")
278
+
279
+ elif modification_type == "Thinner":
280
+ # Scale the lips region
281
+ scale_factor = 1.0 - (intensity * 0.3) # Scale down to 0.7x based on intensity
282
+ new_h, new_w = int(h * scale_factor), int(w) # Only reduce height
283
+
284
+ # Resize the lips region
285
+ resized_lips = cv2.resize(lips_region, (new_w, new_h))
286
+
287
+ # Calculate offsets to center the resized lips
288
+ offset_y = (h - new_h) // 2
289
+ offset_x = 0
290
+
291
+ # Create a background (use the surrounding area)
292
+ background = edited_img[y:y+h, x:x+w].copy()
293
+
294
+ # Paste the resized lips onto the background
295
+ background[offset_y:offset_y+new_h, offset_x:offset_x+new_w] = resized_lips
296
+
297
+ # Blend the result with the original image
298
+ edited_img[y:y+h, x:x+w] = background
299
+
300
+ elif modification_type == "Change Color":
301
+ # Apply a color tint to the lips
302
+ # Use a reddish color for lips
303
+ red_tint = np.ones(lips_region.shape, dtype=np.uint8) * np.array([50, 50, 200], dtype=np.uint8)
304
+
305
+ # Blend the tint with the lips region
306
+ alpha = intensity * 0.6 # Adjust alpha based on intensity
307
+ edited_img[y:y+h, x:x+w] = cv2.addWeighted(lips_region, 1-alpha, red_tint, alpha, 0)
308
+
309
+ elif feature_type == "Face Shape" and len(detected_features["faces"]) > 0:
310
+ for (x, y, w, h) in detected_features["faces"]:
311
+ # Get the face region
312
+ face_region = edited_img[y:y+h, x:x+w]
313
+
314
+ if modification_type == "Slim":
315
+ # Apply a slimming effect by squeezing horizontally
316
+ scale_x = 1.0 - (intensity * 0.2) # Scale between 0.8x and 1.0x horizontally
317
+ scale_y = 1.0 # Keep vertical scale the same
318
+
319
+ # Create transformation matrix
320
+ center = (w // 2, h // 2)
321
+ M = cv2.getRotationMatrix2D(center, 0, 1.0)
322
+ M[0, 0] = scale_x # Modify the horizontal scale
323
+
324
+ # Apply transformation
325
+ transformed_face = cv2.warpAffine(face_region, M, (w, h))
326
+
327
+ # Blend the transformed face with the original
328
+ alpha = 0.7
329
+ edited_img[y:y+h, x:x+w] = cv2.addWeighted(face_region, 1-alpha, transformed_face, alpha, 0)
330
+
331
+ elif modification_type == "Round":
332
+ # Apply a rounding effect
333
+ # Create a circular mask
334
+ mask = np.zeros((h, w), dtype=np.uint8)
335
+ center = (w // 2, h // 2)
336
+ radius = min(w, h) // 2
337
+ cv2.circle(mask, center, radius, 255, -1)
338
+
339
+ # Blur the edges of the face
340
+ blurred_face = cv2.GaussianBlur(face_region, (21, 21), 0)
341
+
342
+ # Blend based on the mask
343
+ alpha = intensity * 0.5
344
+ for i in range(h):
345
+ for j in range(w):
346
+ if mask[i, j] == 0:
347
+ # Outside the circle, blend more of the blurred face
348
+ edited_img[y+i, x+j] = cv2.addWeighted(
349
+ face_region[i, j].reshape(1, 3), 1-alpha,
350
+ blurred_face[i, j].reshape(1, 3), alpha, 0
351
+ ).reshape(3)
352
+
353
+ # For other features, apply a simpler effect
354
+ else:
355
+ # Add a visual indicator to show something happened
356
+ # Draw a small colored rectangle in the corner to indicate processing
357
+ color = (int(255 * intensity), 100, 200)
358
+ cv2.rectangle(edited_img, (10, 10), (30, 30), color, -1)
359
+
360
+ # Add text to indicate the modification
361
+ font = cv2.FONT_HERSHEY_SIMPLEX
362
+ cv2.putText(
363
+ edited_img,
364
+ f"{feature_type}: {modification_type}",
365
+ (40, 25),
366
+ font,
367
+ 0.7,
368
+ (255, 255, 255),
369
+ 2
370
+ )
371
+
372
+ return Image.fromarray(edited_img)
373
+
374
+ # Main processing function
375
+ def process_image(image, feature_type, modification_type, intensity, custom_prompt="", use_custom_prompt=False):
376
+ if image is None:
377
+ return None, None, "Please upload an image first."
378
+
379
+ # Step 1: Detect features and create visualization
380
+ visualization, detected_features = detect_features(image)
381
+
382
+ # Step 2: Apply edits based on detected features
383
  if isinstance(image, np.ndarray):
384
+ processed_image = edit_image(image, feature_type, modification_type, intensity, detected_features)
385
  else:
386
+ processed_image = edit_image(np.array(image), feature_type, modification_type, intensity, detected_features)
387
 
388
  # Get the instruction based on feature and modification
389
  if use_custom_prompt and custom_prompt:
 
391
  else:
392
  instruction = f"Applied {feature_type} modification: {modification_type} with intensity {intensity:.1f}"
393
 
394
+ return processed_image, visualization, f"Edit applied: {instruction}\n\nNote: This is using CPU-based processing. For more advanced AI-powered edits, download the Pinokio local version which supports GPU acceleration."
 
 
 
 
 
 
 
 
 
 
395
 
396
  # UI Components
397
  def create_ui():
398
  with gr.Blocks(title="PortraitPerfectAI - Facial & Body Feature Editor") as app:
399
  gr.Markdown("# PortraitPerfectAI - Facial & Body Feature Editor")
400
  gr.Markdown("Upload an image and use the controls to edit specific facial and body features.")
 
401
 
402
  with gr.Row():
403
  with gr.Column(scale=1):
404
  # Input controls
405
+ input_image = gr.Image(label="Upload Image", type="numpy")
406
 
407
  with gr.Group():
408
  gr.Markdown("### Feature Selection")
 
444
 
445
  with gr.Column(scale=1):
446
  # Output display
447
+ with gr.Tab("Edited Image"):
448
+ output_image = gr.Image(label="Edited Image", type="pil")
449
+
450
+ with gr.Tab("Feature Detection"):
451
+ feature_visualization = gr.Image(label="Detected Features", type="pil")
452
 
453
  # Download Pinokio package section
454
  with gr.Accordion("Download Full Version for Local Use", open=True):
455
  gr.Markdown("""
456
  ### Get the Full AI-Powered Version
457
 
458
+ For more advanced AI-powered editing with GPU acceleration:
459
 
460
  1. Download the Pinokio package below
461
  2. Install [Pinokio](https://pinokio.computer/) on your computer
 
477
  - Intuitive sliders and controls
478
  - Non-destructive editing workflow
479
 
480
+ **Note:** The web version uses CPU-based processing. For more advanced AI-powered editing with GPU acceleration, download the Pinokio package.
481
  """)
482
 
483
  # Event handlers
 
491
  )
492
 
493
  edit_button.click(
494
+ fn=process_image,
495
  inputs=[
496
  input_image,
497
  feature_type,
 
500
  custom_prompt,
501
  use_custom_prompt
502
  ],
503
+ outputs=[output_image, feature_visualization, status_text]
504
  )
505
 
506
  def reset_image():
507
+ return None, None, "Image reset."
508
 
509
  reset_button.click(
510
  fn=reset_image,
511
  inputs=[],
512
+ outputs=[output_image, feature_visualization, status_text]
513
  )
514
 
515
  # Add ethical usage notice
requirements.txt CHANGED
@@ -8,4 +8,5 @@ pillow
8
  numpy
9
  huggingface_hub
10
  safetensors
11
- accelerate
 
 
8
  numpy
9
  huggingface_hub
10
  safetensors
11
+ accelerate
12
+ opencv-python