Translsis commited on
Commit
2a140a6
·
verified ·
1 Parent(s): 85adcbc

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -390
app.py CHANGED
@@ -16,13 +16,7 @@ from transformers import (
16
  Sam3VideoModel, Sam3VideoProcessor,
17
  Sam3TrackerModel, Sam3TrackerProcessor
18
  )
19
- import json
20
- from datetime import datetime
21
- import threading
22
- import queue
23
- import uuid
24
 
25
- # ============ THEME SETUP ============
26
  colors.steel_blue = colors.Color(
27
  name="steel_blue",
28
  c50="#EBF3F8",
@@ -85,75 +79,41 @@ class CustomBlueTheme(Soft):
85
 
86
  app_theme = CustomBlueTheme()
87
 
88
- # ============ GLOBAL SETUP ============
89
  device = "cuda" if torch.cuda.is_available() else "cpu"
90
  print(f"🖥️ Using compute device: {device}")
91
 
92
- # History storage
93
- HISTORY_DIR = "processing_history"
94
- os.makedirs(HISTORY_DIR, exist_ok=True)
95
- HISTORY_FILE = os.path.join(HISTORY_DIR, "history.json")
96
-
97
- # Background processing queue
98
- processing_queue = queue.Queue()
99
- processing_results = {}
100
-
101
- # Load models
102
  print("⏳ Loading SAM3 Models permanently into memory...")
 
103
  try:
 
104
  print(" ... Loading Image Text Model")
105
  IMG_MODEL = Sam3Model.from_pretrained("DiffusionWave/sam3").to(device)
106
  IMG_PROCESSOR = Sam3Processor.from_pretrained("DiffusionWave/sam3")
107
 
 
108
  print(" ... Loading Image Tracker Model")
109
  TRK_MODEL = Sam3TrackerModel.from_pretrained("DiffusionWave/sam3").to(device)
110
  TRK_PROCESSOR = Sam3TrackerProcessor.from_pretrained("DiffusionWave/sam3")
111
 
 
112
  print(" ... Loading Video Model")
 
113
  VID_MODEL = Sam3VideoModel.from_pretrained("DiffusionWave/sam3").to(device, dtype=torch.bfloat16)
114
  VID_PROCESSOR = Sam3VideoProcessor.from_pretrained("DiffusionWave/sam3")
115
 
116
  print("✅ All Models loaded successfully!")
 
117
  except Exception as e:
118
  print(f"❌ CRITICAL ERROR LOADING MODELS: {e}")
119
- IMG_MODEL = IMG_PROCESSOR = TRK_MODEL = TRK_PROCESSOR = VID_MODEL = VID_PROCESSOR = None
120
-
121
- # ============ HISTORY MANAGEMENT ============
122
- def load_history():
123
- """Load processing history from JSON file"""
124
- if os.path.exists(HISTORY_FILE):
125
- try:
126
- with open(HISTORY_FILE, 'r') as f:
127
- return json.load(f)
128
- except:
129
- return []
130
- return []
131
-
132
- def save_history(history_item):
133
- """Save a new history item"""
134
- history = load_history()
135
- history.insert(0, history_item) # Add to beginning
136
- history = history[:100] # Keep last 100 items
137
- with open(HISTORY_FILE, 'w') as f:
138
- json.dump(history, f, indent=2)
139
-
140
- def get_history_display():
141
- """Format history for display"""
142
- history = load_history()
143
- if not history:
144
- return "Chưa có lịch sử xử lý nào"
145
-
146
- display_text = ""
147
- for i, item in enumerate(history[:50], 1):
148
- status_emoji = "✅" if item['status'] == 'completed' else "❌"
149
- display_text += f"{status_emoji} **{item['type'].upper()}** - {item['timestamp']}\n"
150
- display_text += f" Prompt: {item['prompt']}\n"
151
- if item.get('output_path'):
152
- display_text += f" File: `{os.path.basename(item['output_path'])}`\n"
153
- display_text += "\n"
154
- return display_text
155
-
156
- # ============ UTILITY FUNCTIONS ============
157
  def apply_mask_overlay(base_image, mask_data, opacity=0.5):
158
  """Draws segmentation masks on top of an image."""
159
  if isinstance(base_image, np.ndarray):
@@ -167,6 +127,7 @@ def apply_mask_overlay(base_image, mask_data, opacity=0.5):
167
  mask_data = mask_data.cpu().numpy()
168
  mask_data = mask_data.astype(np.uint8)
169
 
 
170
  if mask_data.ndim == 4: mask_data = mask_data[0]
171
  if mask_data.ndim == 3 and mask_data.shape[0] == 1: mask_data = mask_data[0]
172
 
@@ -207,297 +168,154 @@ def draw_points_on_image(image, points):
207
 
208
  for pt in points:
209
  x, y = pt
210
- r = 8
211
  draw.ellipse((x-r, y-r, x+r, y+r), fill="red", outline="white", width=4)
212
 
213
  return draw_img
214
 
215
- # ============ BACKGROUND PROCESSING WORKER ============
216
- def background_worker():
217
- """Background thread that processes jobs from queue"""
218
- while True:
219
- try:
220
- job = processing_queue.get()
221
- if job is None:
222
- break
223
-
224
- job_id = job['id']
225
- job_type = job['type']
226
-
227
- processing_results[job_id] = {'status': 'processing', 'progress': 0}
228
-
229
- try:
230
- if job_type == 'image':
231
- result = process_image_job(job)
232
- elif job_type == 'video':
233
- result = process_video_job(job)
234
- elif job_type == 'click':
235
- result = process_click_job(job)
236
-
237
- processing_results[job_id] = {
238
- 'status': 'completed',
239
- 'result': result,
240
- 'progress': 100
241
- }
242
-
243
- # Save to history
244
- save_history({
245
- 'id': job_id,
246
- 'type': job_type,
247
- 'prompt': job.get('prompt', 'N/A'),
248
- 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
249
- 'status': 'completed',
250
- 'output_path': result.get('output_path')
251
- })
252
-
253
- except Exception as e:
254
- processing_results[job_id] = {
255
- 'status': 'error',
256
- 'error': str(e),
257
- 'progress': 0
258
- }
259
- save_history({
260
- 'id': job_id,
261
- 'type': job_type,
262
- 'prompt': job.get('prompt', 'N/A'),
263
- 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
264
- 'status': 'error',
265
- 'error': str(e)
266
- })
267
- except Exception as e:
268
- print(f"Worker error: {e}")
269
-
270
- # Start background worker
271
- worker_thread = threading.Thread(target=background_worker, daemon=True)
272
- worker_thread.start()
273
-
274
- # ============ JOB PROCESSORS ============
275
  @spaces.GPU
276
- def process_image_job(job):
277
- """Process image segmentation job"""
278
- source_img = job['image']
279
- text_query = job['prompt']
280
- conf_thresh = job.get('conf_thresh', 0.5)
281
-
282
- if isinstance(source_img, str):
283
- source_img = Image.open(source_img)
284
-
285
- pil_image = source_img.convert("RGB")
286
- model_inputs = IMG_PROCESSOR(images=pil_image, text=text_query, return_tensors="pt").to(device)
287
-
288
- with torch.no_grad():
289
- inference_output = IMG_MODEL(**model_inputs)
290
-
291
- processed_results = IMG_PROCESSOR.post_process_instance_segmentation(
292
- inference_output,
293
- threshold=conf_thresh,
294
- mask_threshold=0.5,
295
- target_sizes=model_inputs.get("original_sizes").tolist()
296
- )[0]
297
-
298
- annotation_list = []
299
- raw_masks = processed_results['masks'].cpu().numpy()
300
- raw_scores = processed_results['scores'].cpu().numpy()
301
-
302
- for idx, mask_array in enumerate(raw_masks):
303
- label_str = f"{text_query} ({raw_scores[idx]:.2f})"
304
- annotation_list.append((mask_array, label_str))
305
-
306
- # Save output
307
- output_path = os.path.join(HISTORY_DIR, f"{job['id']}_result.jpg")
308
- result_img = apply_mask_overlay(pil_image, raw_masks)
309
- result_img.save(output_path)
310
 
311
- return {
312
- 'image': (pil_image, annotation_list),
313
- 'output_path': output_path
314
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
  @spaces.GPU
317
- def process_video_job(job):
318
- """Process video segmentation job"""
319
- source_vid = job['video']
320
- text_query = job['prompt']
321
- frame_limit = job.get('frame_limit', 60)
322
-
323
- video_cap = cv2.VideoCapture(source_vid)
324
- vid_fps = video_cap.get(cv2.CAP_PROP_FPS)
325
- vid_w = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
326
- vid_h = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
327
-
328
- video_frames = []
329
- counter = 0
330
- while video_cap.isOpened():
331
- ret, frame = video_cap.read()
332
- if not ret or (frame_limit > 0 and counter >= frame_limit): break
333
- video_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
334
- counter += 1
335
- video_cap.release()
336
 
337
- session = VID_PROCESSOR.init_video_session(video=video_frames, inference_device=device, dtype=torch.bfloat16)
338
- session = VID_PROCESSOR.add_text_prompt(inference_session=session, text=text_query)
339
 
340
- output_path = os.path.join(HISTORY_DIR, f"{job['id']}_result.mp4")
341
- video_writer = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), vid_fps, (vid_w, vid_h))
342
-
343
- total_frames = len(video_frames)
344
- for frame_idx, model_out in enumerate(VID_MODEL.propagate_in_video_iterator(inference_session=session, max_frame_num_to_track=total_frames)):
345
- post_processed = VID_PROCESSOR.postprocess_outputs(session, model_out)
346
- f_idx = model_out.frame_idx
347
- original_pil = Image.fromarray(video_frames[f_idx])
 
 
348
 
349
- if 'masks' in post_processed:
350
- detected_masks = post_processed['masks']
351
- if detected_masks.ndim == 4: detected_masks = detected_masks.squeeze(1)
352
- final_frame = apply_mask_overlay(original_pil, detected_masks)
353
- else:
354
- final_frame = original_pil
355
 
356
- video_writer.write(cv2.cvtColor(np.array(final_frame), cv2.COLOR_RGB2BGR))
 
357
 
358
- # Update progress
359
- progress = int((frame_idx + 1) / total_frames * 100)
360
- processing_results[job['id']]['progress'] = progress
361
 
362
- video_writer.release()
363
- return {'output_path': output_path}
364
-
365
- @spaces.GPU
366
- def process_click_job(job):
367
- """Process click segmentation job"""
368
- input_image = job['image']
369
- points_state = job['points']
370
- labels_state = job['labels']
371
-
372
- if isinstance(input_image, str):
373
- input_image = Image.open(input_image)
374
-
375
- input_points = [[points_state]]
376
- input_labels = [[labels_state]]
377
-
378
- inputs = TRK_PROCESSOR(images=input_image, input_points=input_points, input_labels=input_labels, return_tensors="pt").to(device)
379
-
380
- with torch.no_grad():
381
- outputs = TRK_MODEL(**inputs, multimask_output=False)
382
 
383
- masks = TRK_PROCESSOR.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"], binarize=True)[0]
384
- final_img = apply_mask_overlay(input_image, masks[0])
385
- final_img = draw_points_on_image(final_img, points_state)
386
-
387
- output_path = os.path.join(HISTORY_DIR, f"{job['id']}_result.jpg")
388
- final_img.save(output_path)
389
-
390
- return {
391
- 'image': final_img,
392
- 'output_path': output_path
393
- }
394
-
395
- # ============ UI HANDLERS ============
396
- def submit_image_job(source_img, text_query, conf_thresh):
397
- """Submit image segmentation job to background queue"""
398
- if source_img is None or not text_query:
399
- return None, "❌ Vui lòng cung cấp ảnh và prompt", ""
400
-
401
- job_id = str(uuid.uuid4())
402
- job = {
403
- 'id': job_id,
404
- 'type': 'image',
405
- 'image': source_img,
406
- 'prompt': text_query,
407
- 'conf_thresh': conf_thresh
408
- }
409
-
410
- processing_queue.put(job)
411
- return None, f"✅ Đã thêm vào hàng chờ (ID: {job_id[:8]}). Đang xử lý...", job_id
412
 
413
- def check_image_status(job_id):
414
- """Check status of image processing job"""
415
- if not job_id or job_id not in processing_results:
416
- return None, "Không tìm thấy công việc"
417
-
418
- result = processing_results[job_id]
419
-
420
- if result['status'] == 'processing':
421
- return None, f"⏳ Đang xử lý... {result['progress']}%"
422
- elif result['status'] == 'completed':
423
- return result['result']['image'], "✅ Hoàn thành!"
424
- else:
425
- return None, f"❌ Lỗi: {result.get('error', 'Unknown')}"
426
-
427
- def submit_video_job(source_vid, text_query, frame_limit, time_limit):
428
- """Submit video segmentation job to background queue"""
429
- if not source_vid or not text_query:
430
- return None, "❌ Vui lòng cung cấp video và prompt", ""
431
-
432
- job_id = str(uuid.uuid4())
433
- job = {
434
- 'id': job_id,
435
- 'type': 'video',
436
- 'video': source_vid,
437
- 'prompt': text_query,
438
- 'frame_limit': frame_limit,
439
- 'time_limit': time_limit
440
- }
441
-
442
- processing_queue.put(job)
443
- return None, f"✅ Đã thêm vào hàng chờ (ID: {job_id[:8]}). Đang xử lý...", job_id
444
-
445
- def check_video_status(job_id):
446
- """Check status of video processing job"""
447
- if not job_id or job_id not in processing_results:
448
- return None, "Không tìm thấy công việc"
449
-
450
- result = processing_results[job_id]
451
-
452
- if result['status'] == 'processing':
453
- return None, f"⏳ Đang xử lý... {result['progress']}%"
454
- elif result['status'] == 'completed':
455
- return result['result']['output_path'], "✅ Hoàn thành!"
456
- else:
457
- return None, f"❌ Lỗi: {result.get('error', 'Unknown')}"
458
 
459
  def image_click_handler(image, evt: gr.SelectData, points_state, labels_state):
460
- """Handle click events for interactive segmentation"""
461
  x, y = evt.index
462
-
463
- if points_state is None: points_state = []
464
- if labels_state is None: labels_state = []
465
-
466
- points_state.append([x, y])
467
- labels_state.append(1)
468
-
469
- # Process immediately (can be changed to background if needed)
470
- job_id = str(uuid.uuid4())
471
- job = {
472
- 'id': job_id,
473
- 'type': 'click',
474
- 'image': image,
475
- 'points': points_state,
476
- 'labels': labels_state
477
- }
478
-
479
  try:
480
- result = process_click_job(job)
481
- return result['image'], points_state, labels_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
482
  except Exception as e:
483
- print(f"Click error: {e}")
484
- return image, points_state, labels_state
485
 
486
- # ============ GRADIO INTERFACE ============
487
  custom_css="""
488
- #col-container { margin: 0 auto; max-width: 1200px; }
489
  #main-title h1 { font-size: 2.1em !important; }
490
- .history-box { max-height: 600px; overflow-y: auto; }
491
  """
492
 
493
- with gr.Blocks(css=custom_css, theme=app_theme) as demo:
494
  with gr.Column(elem_id="col-container"):
495
- gr.Markdown("# **SAM3: Segment Anything Model 3** 🚀", elem_id="main-title")
496
- gr.Markdown("Xử ảnh/video với **background processing** - không cần chờ đợi!")
497
 
498
  with gr.Tabs():
499
- # ===== IMAGE SEGMENTATION TAB =====
500
- with gr.Tab("📷 Image Segmentation"):
501
  with gr.Row():
502
  with gr.Column(scale=1):
503
  image_input = gr.Image(label="Upload Image", type="pil", height=350)
@@ -505,28 +323,29 @@ with gr.Blocks(css=custom_css, theme=app_theme) as demo:
505
  with gr.Accordion("Advanced Settings", open=False):
506
  conf_slider = gr.Slider(0.0, 1.0, value=0.45, step=0.05, label="Confidence Threshold")
507
 
508
- btn_submit_img = gr.Button("🚀 Submit Job (Background)", variant="primary")
509
- btn_check_img = gr.Button("🔍 Check Status", variant="secondary")
510
- job_id_img = gr.Textbox(label="Job ID", visible=False)
511
 
512
  with gr.Column(scale=1.5):
513
  image_result = gr.AnnotatedImage(label="Segmented Result", height=410)
514
- status_img = gr.Textbox(label="Status", interactive=False)
515
 
516
- btn_submit_img.click(
517
- fn=submit_image_job,
518
- inputs=[image_input, txt_prompt_img, conf_slider],
519
- outputs=[image_result, status_img, job_id_img]
520
- )
521
-
522
- btn_check_img.click(
523
- fn=check_image_status,
524
- inputs=[job_id_img],
525
- outputs=[image_result, status_img]
526
- )
 
 
 
 
 
527
 
528
- # ===== VIDEO SEGMENTATION TAB =====
529
- with gr.Tab("🎥 Video Segmentation"):
530
  with gr.Row():
531
  with gr.Column():
532
  video_input = gr.Video(label="Upload Video", format="mp4", height=320)
@@ -536,35 +355,36 @@ with gr.Blocks(css=custom_css, theme=app_theme) as demo:
536
  frame_limiter = gr.Slider(10, 500, value=60, step=10, label="Max Frames")
537
  time_limiter = gr.Radio([60, 120, 180], value=60, label="Timeout (seconds)")
538
 
539
- btn_submit_vid = gr.Button("🚀 Submit Job (Background)", variant="primary")
540
- btn_check_vid = gr.Button("🔍 Check Status", variant="secondary")
541
- job_id_vid = gr.Textbox(label="Job ID", visible=False)
542
 
543
  with gr.Column():
544
  video_result = gr.Video(label="Processed Video")
545
- status_vid = gr.Textbox(label="Status", interactive=False)
546
-
547
- btn_submit_vid.click(
548
- fn=submit_video_job,
549
- inputs=[video_input, txt_prompt_vid, frame_limiter, time_limiter],
550
- outputs=[video_result, status_vid, job_id_vid]
551
- )
552
 
553
- btn_check_vid.click(
554
- fn=check_video_status,
555
- inputs=[job_id_vid],
556
- outputs=[video_result, status_vid]
 
 
 
 
 
 
 
 
 
 
 
557
  )
558
 
559
- # ===== CLICK SEGMENTATION TAB =====
560
- with gr.Tab("👆 Click Segmentation"):
561
  with gr.Row():
562
  with gr.Column(scale=1):
563
  img_click_input = gr.Image(type="pil", label="Upload Image", interactive=True, height=450)
564
- gr.Markdown("**Hướng dẫn:** Click vào đối tượng bạn muốn phân đoạn")
565
 
566
  with gr.Row():
567
- img_click_clear = gr.Button("🔄 Clear Points & Reset", variant="primary")
568
 
569
  st_click_points = gr.State([])
570
  st_click_labels = gr.State([])
@@ -582,43 +402,6 @@ with gr.Blocks(css=custom_css, theme=app_theme) as demo:
582
  lambda: (None, [], []),
583
  outputs=[img_click_output, st_click_points, st_click_labels]
584
  )
585
-
586
- # ===== HISTORY TAB =====
587
- with gr.Tab("📜 Lịch Sử Xử Lý"):
588
- with gr.Row():
589
- with gr.Column():
590
- btn_refresh_history = gr.Button("🔄 Refresh History", variant="primary")
591
- history_display = gr.Markdown(value=get_history_display(), elem_classes="history-box")
592
-
593
- with gr.Accordion("Hướng dẫn", open=False):
594
- gr.Markdown("""
595
- ### Lịch sử lưu:
596
- - ✅ **Hoàn thành**: File đã được xử lý thành công
597
- - ❌ **Lỗi**: Xử lý thất bại
598
- - Tất cả file output được lưu trong thư mục `processing_history/`
599
- - Hệ thống giữ lại 100 lịch sử gần nhất
600
- """)
601
-
602
- btn_refresh_history.click(
603
- fn=get_history_display,
604
- outputs=[history_display]
605
- )
606
-
607
- # ===== BATCH PROCESSING TAB =====
608
- with gr.Tab("⚙️ Batch Processing"):
609
- gr.Markdown("### Xử lý hàng loạt (Coming Soon)")
610
- gr.Markdown("""
611
- Tính năng này sẽ cho phép bạn:
612
- - Upload nhiều ảnh/video cùng lúc
613
- - Tự động xử lý tuần tự
614
- - Download tất cả kết quả dưới dạng ZIP
615
- """)
616
 
617
  if __name__ == "__main__":
618
- demo.launch(
619
- css=custom_css,
620
- theme=app_theme,
621
- ssr_mode=False,
622
- mcp_server=True,
623
- show_error=True
624
- )
 
16
  Sam3VideoModel, Sam3VideoProcessor,
17
  Sam3TrackerModel, Sam3TrackerProcessor
18
  )
 
 
 
 
 
19
 
 
20
  colors.steel_blue = colors.Color(
21
  name="steel_blue",
22
  c50="#EBF3F8",
 
79
 
80
  app_theme = CustomBlueTheme()
81
 
 
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
83
  print(f"🖥️ Using compute device: {device}")
84
 
 
 
 
 
 
 
 
 
 
 
85
  print("⏳ Loading SAM3 Models permanently into memory...")
86
+
87
  try:
88
+ # 1. Load Image Segmentation Model (Text)
89
  print(" ... Loading Image Text Model")
90
  IMG_MODEL = Sam3Model.from_pretrained("DiffusionWave/sam3").to(device)
91
  IMG_PROCESSOR = Sam3Processor.from_pretrained("DiffusionWave/sam3")
92
 
93
+ # 2. Load Image Tracker Model (Click)
94
  print(" ... Loading Image Tracker Model")
95
  TRK_MODEL = Sam3TrackerModel.from_pretrained("DiffusionWave/sam3").to(device)
96
  TRK_PROCESSOR = Sam3TrackerProcessor.from_pretrained("DiffusionWave/sam3")
97
 
98
+ # 3. Load Video Segmentation Model
99
  print(" ... Loading Video Model")
100
+ # Using bfloat16 for video to optimize VRAM
101
  VID_MODEL = Sam3VideoModel.from_pretrained("DiffusionWave/sam3").to(device, dtype=torch.bfloat16)
102
  VID_PROCESSOR = Sam3VideoProcessor.from_pretrained("DiffusionWave/sam3")
103
 
104
  print("✅ All Models loaded successfully!")
105
+
106
  except Exception as e:
107
  print(f"❌ CRITICAL ERROR LOADING MODELS: {e}")
108
+ IMG_MODEL = None
109
+ IMG_PROCESSOR = None
110
+ TRK_MODEL = None
111
+ TRK_PROCESSOR = None
112
+ VID_MODEL = None
113
+ VID_PROCESSOR = None
114
+
115
+
116
+ # --- UTILS ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  def apply_mask_overlay(base_image, mask_data, opacity=0.5):
118
  """Draws segmentation masks on top of an image."""
119
  if isinstance(base_image, np.ndarray):
 
127
  mask_data = mask_data.cpu().numpy()
128
  mask_data = mask_data.astype(np.uint8)
129
 
130
+ # Handle dimensions
131
  if mask_data.ndim == 4: mask_data = mask_data[0]
132
  if mask_data.ndim == 3 and mask_data.shape[0] == 1: mask_data = mask_data[0]
133
 
 
168
 
169
  for pt in points:
170
  x, y = pt
171
+ r = 8 # Radius of point
172
  draw.ellipse((x-r, y-r, x+r, y+r), fill="red", outline="white", width=4)
173
 
174
  return draw_img
175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  @spaces.GPU
177
+ def run_image_segmentation(source_img, text_query, conf_thresh=0.5):
178
+ if IMG_MODEL is None or IMG_PROCESSOR is None:
179
+ raise gr.Error("Models failed to load on startup.")
180
+
181
+ if source_img is None or not text_query:
182
+ raise gr.Error("Please provide an image and a text prompt.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
+ try:
185
+ pil_image = source_img.convert("RGB")
186
+ model_inputs = IMG_PROCESSOR(images=pil_image, text=text_query, return_tensors="pt").to(device)
187
+
188
+ with torch.no_grad():
189
+ inference_output = IMG_MODEL(**model_inputs)
190
+
191
+ processed_results = IMG_PROCESSOR.post_process_instance_segmentation(
192
+ inference_output,
193
+ threshold=conf_thresh,
194
+ mask_threshold=0.5,
195
+ target_sizes=model_inputs.get("original_sizes").tolist()
196
+ )[0]
197
+
198
+ annotation_list = []
199
+ raw_masks = processed_results['masks'].cpu().numpy()
200
+ raw_scores = processed_results['scores'].cpu().numpy()
201
+
202
+ for idx, mask_array in enumerate(raw_masks):
203
+ label_str = f"{text_query} ({raw_scores[idx]:.2f})"
204
+ annotation_list.append((mask_array, label_str))
205
+
206
+ return (pil_image, annotation_list)
207
+
208
+ except Exception as e:
209
+ raise gr.Error(f"Error during image processing: {e}")
210
 
211
  @spaces.GPU
212
+ def run_image_click_gpu(input_image, x, y, points_state, labels_state):
213
+ if TRK_MODEL is None or TRK_PROCESSOR is None:
214
+ raise gr.Error("Tracker Model failed to load.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
 
216
+ if input_image is None: return input_image, [], []
217
+ if points_state is None: points_state = []; labels_state = []
218
 
219
+ # Append new point
220
+ points_state.append([x, y])
221
+ labels_state.append(1) # 1 indicates a positive click (foreground)
222
+
223
+ try:
224
+ # Prepare inputs format: [Batch, Point_Group, Point_Idx, Coord]
225
+ input_points = [[points_state]]
226
+ input_labels = [[labels_state]]
227
+
228
+ inputs = TRK_PROCESSOR(images=input_image, input_points=input_points, input_labels=input_labels, return_tensors="pt").to(device)
229
 
230
+ with torch.no_grad():
231
+ # multimask_output=True usually helps with ambiguity, but let's default to best mask for simplicity here
232
+ outputs = TRK_MODEL(**inputs, multimask_output=False)
 
 
 
233
 
234
+ # Post process
235
+ masks = TRK_PROCESSOR.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"], binarize=True)[0]
236
 
237
+ # Overlay mask
238
+ # masks shape is [1, 1, H, W] for single object tracking
239
+ final_img = apply_mask_overlay(input_image, masks[0])
240
 
241
+ # Draw the visual points on top
242
+ final_img = draw_points_on_image(final_img, points_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
 
244
+ return final_img, points_state, labels_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
+ except Exception as e:
247
+ print(f"Tracker Error: {e}")
248
+ return input_image, points_state, labels_state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
  def image_click_handler(image, evt: gr.SelectData, points_state, labels_state):
251
+ # Wrapper to handle the Gradio select event
252
  x, y = evt.index
253
+ return run_image_click_gpu(image, x, y, points_state, labels_state)
254
+
255
+ def calc_timeout_duration(vid_file, *args):
256
+ return args[-1] if args else 60
257
+
258
+ @spaces.GPU(duration=calc_timeout_duration)
259
+ def run_video_segmentation(source_vid, text_query, frame_limit, time_limit):
260
+ if VID_MODEL is None or VID_PROCESSOR is None:
261
+ raise gr.Error("Video Models failed to load on startup.")
262
+
263
+ if not source_vid or not text_query:
264
+ raise gr.Error("Missing video or prompt.")
265
+
 
 
 
 
266
  try:
267
+ video_cap = cv2.VideoCapture(source_vid)
268
+ vid_fps = video_cap.get(cv2.CAP_PROP_FPS)
269
+ vid_w = int(video_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
270
+ vid_h = int(video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
271
+
272
+ video_frames = []
273
+ counter = 0
274
+ while video_cap.isOpened():
275
+ ret, frame = video_cap.read()
276
+ if not ret or (frame_limit > 0 and counter >= frame_limit): break
277
+ video_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
278
+ counter += 1
279
+ video_cap.release()
280
+
281
+ session = VID_PROCESSOR.init_video_session(video=video_frames, inference_device=device, dtype=torch.bfloat16)
282
+ session = VID_PROCESSOR.add_text_prompt(inference_session=session, text=text_query)
283
+
284
+ temp_out_path = tempfile.mktemp(suffix=".mp4")
285
+ video_writer = cv2.VideoWriter(temp_out_path, cv2.VideoWriter_fourcc(*'mp4v'), vid_fps, (vid_w, vid_h))
286
+
287
+ for model_out in VID_MODEL.propagate_in_video_iterator(inference_session=session, max_frame_num_to_track=len(video_frames)):
288
+ post_processed = VID_PROCESSOR.postprocess_outputs(session, model_out)
289
+ f_idx = model_out.frame_idx
290
+ original_pil = Image.fromarray(video_frames[f_idx])
291
+
292
+ if 'masks' in post_processed:
293
+ detected_masks = post_processed['masks']
294
+ if detected_masks.ndim == 4: detected_masks = detected_masks.squeeze(1)
295
+ final_frame = apply_mask_overlay(original_pil, detected_masks)
296
+ else:
297
+ final_frame = original_pil
298
+
299
+ video_writer.write(cv2.cvtColor(np.array(final_frame), cv2.COLOR_RGB2BGR))
300
+
301
+ video_writer.release()
302
+ return temp_out_path, "Video processing completed successfully.✅"
303
+
304
  except Exception as e:
305
+ return None, f"Error during video processing: {str(e)}"
 
306
 
 
307
  custom_css="""
308
+ #col-container { margin: 0 auto; max-width: 1100px; }
309
  #main-title h1 { font-size: 2.1em !important; }
 
310
  """
311
 
312
+ with gr.Blocks() as demo:
313
  with gr.Column(elem_id="col-container"):
314
+ gr.Markdown("# **SAM3: Segment Anything Model 3**", elem_id="main-title")
315
+ gr.Markdown("Segment objects in image or video using **SAM3** with Text Prompts or Interactive Clicks.")
316
 
317
  with gr.Tabs():
318
+ with gr.Tab("Image Segmentation"):
 
319
  with gr.Row():
320
  with gr.Column(scale=1):
321
  image_input = gr.Image(label="Upload Image", type="pil", height=350)
 
323
  with gr.Accordion("Advanced Settings", open=False):
324
  conf_slider = gr.Slider(0.0, 1.0, value=0.45, step=0.05, label="Confidence Threshold")
325
 
326
+ btn_process_img = gr.Button("Segment Image", variant="primary")
 
 
327
 
328
  with gr.Column(scale=1.5):
329
  image_result = gr.AnnotatedImage(label="Segmented Result", height=410)
 
330
 
331
+ gr.Examples(
332
+ examples=[
333
+ ["examples/player.jpg", "player in white", 0.5],
334
+ ],
335
+ inputs=[image_input, txt_prompt_img, conf_slider],
336
+ outputs=[image_result],
337
+ fn=run_image_segmentation,
338
+ cache_examples=False,
339
+ label="Image Examples"
340
+ )
341
+
342
+ btn_process_img.click(
343
+ fn=run_image_segmentation,
344
+ inputs=[image_input, txt_prompt_img, conf_slider],
345
+ outputs=[image_result]
346
+ )
347
 
348
+ with gr.Tab("Video Segmentation"):
 
349
  with gr.Row():
350
  with gr.Column():
351
  video_input = gr.Video(label="Upload Video", format="mp4", height=320)
 
355
  frame_limiter = gr.Slider(10, 500, value=60, step=10, label="Max Frames")
356
  time_limiter = gr.Radio([60, 120, 180], value=60, label="Timeout (seconds)")
357
 
358
+ btn_process_vid = gr.Button("Segment Video", variant="primary")
 
 
359
 
360
  with gr.Column():
361
  video_result = gr.Video(label="Processed Video")
362
+ process_status = gr.Textbox(label="System Status", interactive=False)
 
 
 
 
 
 
363
 
364
+ gr.Examples(
365
+ examples=[
366
+ ["examples/sample_video.mp4", "players", 120, 120],
367
+ ],
368
+ inputs=[video_input, txt_prompt_vid, frame_limiter, time_limiter],
369
+ outputs=[video_result, process_status],
370
+ fn=run_video_segmentation,
371
+ cache_examples=False,
372
+ label="Video Examples"
373
+ )
374
+
375
+ btn_process_vid.click(
376
+ run_video_segmentation,
377
+ inputs=[video_input, txt_prompt_vid, frame_limiter, time_limiter],
378
+ outputs=[video_result, process_status]
379
  )
380
 
381
+ with gr.Tab("Image Click Segmentation"):
 
382
  with gr.Row():
383
  with gr.Column(scale=1):
384
  img_click_input = gr.Image(type="pil", label="Upload Image", interactive=True, height=450)
 
385
 
386
  with gr.Row():
387
+ img_click_clear = gr.Button("Clear Points & Reset", variant="primary")
388
 
389
  st_click_points = gr.State([])
390
  st_click_labels = gr.State([])
 
402
  lambda: (None, [], []),
403
  outputs=[img_click_output, st_click_points, st_click_labels]
404
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
  if __name__ == "__main__":
407
+ demo.launch(css=custom_css, theme=app_theme, ssr_mode=False, mcp_server=True, show_error=True)