milliyin commited on
Commit
5204364
Β·
verified Β·
1 Parent(s): 3cfe471

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +442 -299
app.py CHANGED
@@ -3,7 +3,12 @@ import io
3
  import base64
4
  import time
5
  import logging
 
 
 
6
  from pathlib import Path
 
 
7
 
8
  import gradio as gr
9
  from gradio_client import Client
@@ -13,18 +18,124 @@ from PIL import Image
13
  logging.basicConfig(level=logging.INFO)
14
  logger = logging.getLogger(__name__)
15
 
16
- # ───────── Constants ─────────
17
- PREDICT_TIMEOUT = 600
18
- GPU_WARM_WINDOW = 900
19
- MAX_RETRIES = 1
20
- MAX_QUEUE_SIZE = 50 # Maximum number of requests in queue
21
- DEFAULT_CONCURRENCY_LIMIT = 3 # Number of concurrent backend calls
22
 
23
- # ───────── Backend connection ─────────
24
  HF_TOKEN = os.getenv("HF_TOKEN")
25
  if not HF_TOKEN:
26
  raise ValueError("HF_TOKEN environment variable is required")
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  backend_status = {
29
  "client": None,
30
  "connected": False,
@@ -56,10 +167,81 @@ def check_backend_connection():
56
  return False, "🟑 Model is starting up. Please wait 3‑4 min."
57
  return False, f"πŸ”΄ Backend error: {e}"
58
 
59
- # initial probe
60
  check_backend_connection()
61
 
62
- # ───────── Helpers ─────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  def image_to_base64(image: Image.Image) -> str:
64
  if image is None:
65
  return ""
@@ -69,7 +251,7 @@ def image_to_base64(image: Image.Image) -> str:
69
  image.save(buf, format="PNG")
70
  return base64.b64encode(buf.getvalue()).decode()
71
 
72
- def base64_to_image(b64: str) -> Image.Image | None:
73
  if not b64:
74
  return None
75
  try:
@@ -78,174 +260,146 @@ def base64_to_image(b64: str) -> Image.Image | None:
78
  logger.error(f"Failed to decode base64 β†’ image: {e}")
79
  return None
80
 
81
- # ───────── Queue Status ─────────
82
- def get_queue_status():
83
- """Get current queue status for display"""
84
- try:
85
- # This would need to be implemented based on your queue system
86
- # For now, return a placeholder
87
- return "πŸ“Š Queue: Ready for requests"
88
- except Exception as e:
89
- return f"πŸ“Š Queue status unavailable: {e}"
90
-
91
- # ───────── UI ↔ Backend bridge ─────────
92
- def call_backend_with_retry(input_image: Image.Image, category: str, gender: str, *, max_retries: int = MAX_RETRIES):
93
- """Single‑shot call (no more than `max_retries` times) with queue handling."""
94
 
 
 
95
  if input_image is None:
96
- return None, None, "❌ Please upload an image.", gr.update(interactive=True)
97
-
98
- if not backend_status["connected"]:
99
- ok, msg = check_backend_connection()
100
- if not ok:
101
- return None, None, msg, gr.update(interactive=True)
102
-
103
- client: Client = backend_status["client"]
104
- img_b64 = image_to_base64(input_image)
105
-
106
- # Add queue position info
107
- start_time = time.time()
108
- logger.info(f"Request queued at {time.strftime('%H:%M:%S')}")
109
-
110
- for attempt in range(max_retries):
111
- try:
112
- logger.info(f"Backend call #{attempt+1}")
113
-
114
- # Add timeout to prevent hanging
115
- result = client.predict(
116
- img_b64,
117
- category,
118
- gender,
119
- api_name="/predict",
120
- )
121
- dt = time.time() - start_time
122
-
123
- if not result or len(result) < 4:
124
- raise ValueError("Invalid response structure from backend")
125
-
126
- _, overlay_b64, bg_b64, status = result
127
- overlay_img = base64_to_image(overlay_b64)
128
- bg_img = base64_to_image(bg_b64)
129
-
130
- if overlay_img is None or bg_img is None:
131
- raise ValueError("Failed to decode backend images")
132
-
133
- if not status.startswith("βœ…"):
134
- status = "βœ… " + status
135
- status += f" (⏱ Total: {dt:.1f}s)"
136
-
137
- logger.info(f"Request completed successfully in {dt:.1f}s")
138
- return overlay_img, bg_img, status, gr.update(interactive=True)
139
-
140
- except Exception as e:
141
- logger.error(f"Attempt {attempt+1} failed: {e}")
142
- if attempt == max_retries - 1:
143
- return None, None, f"❌ {e}", gr.update(interactive=True)
144
- time.sleep(1)
145
 
146
- return None, None, "❌ Unknown error", gr.update(interactive=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
- def disable_button():
149
- return gr.update(interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
- def show_queue_position():
152
- """Show current position in queue"""
153
- return "⏳ Request added to queue..."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
 
155
- # ───────── CSS ─────────
156
  custom_css = """
157
  .gradio-container {
158
  background: linear-gradient(135deg, #3b4371 0%, #2d1b69 25%, #673ab7 50%, #8e24aa 75%, #6a1b9a 100%);
159
  font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
160
  min-height: 100vh;
161
  }
162
- .contain {
163
- background: rgba(255, 255, 255, 0.95);
164
- border-radius: 15px;
165
- padding: 25px;
166
- margin: 15px;
167
- box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
168
- backdrop-filter: blur(10px);
169
- }
170
- .queue-status {
171
  background: linear-gradient(135deg, #4CAF50, #45a049);
172
  color: white;
173
- padding: 10px;
174
- border-radius: 8px;
175
  text-align: center;
176
  margin: 10px 0;
177
- font-weight: 500;
 
178
  }
 
179
  .queue-warning {
180
  background: linear-gradient(135deg, #ff9800, #f57c00);
181
  color: white;
182
- padding: 10px;
183
- border-radius: 8px;
184
  text-align: center;
185
  margin: 10px 0;
186
- font-weight: 500;
187
- }
188
- .title-container {
189
- text-align: center;
190
- margin-bottom: 25px;
191
- padding: 20px;
192
- background: linear-gradient(135deg, #673ab7, #8e24aa);
193
- border-radius: 12px;
194
- box-shadow: 0 5px 20px rgba(103, 58, 183, 0.4);
195
  }
196
- .title-container h1 {
 
 
197
  color: white;
198
- font-size: 2.2em;
199
- font-weight: bold;
200
- margin: 0;
201
- text-shadow: 1px 1px 3px rgba(0, 0, 0, 0.3);
202
- }
203
- .info-bar {
204
- background: linear-gradient(135deg, #7c4dff, #6a1b9a);
205
  padding: 12px;
206
- border-radius: 8px;
207
- margin-bottom: 20px;
208
- color: white;
209
  text-align: center;
210
- font-weight: 500;
211
- box-shadow: 0 3px 12px rgba(124, 77, 255, 0.3);
212
- }
213
- .section-header {
214
- background: linear-gradient(135deg, #e1bee7, #d1c4e9);
215
- padding: 12px;
216
- border-radius: 8px;
217
- margin-bottom: 15px;
218
- border-left: 4px solid #673ab7;
219
- }
220
- .section-header h3 {
221
- margin: 0;
222
- color: #333;
223
  font-weight: 600;
 
224
  }
225
- .input-group {
226
- background: rgba(255, 255, 255, 0.85);
227
- padding: 18px;
228
- border-radius: 12px;
229
- margin-bottom: 15px;
230
- border: 1px solid rgba(103, 58, 183, 0.2);
231
- box-shadow: 0 3px 12px rgba(103, 58, 183, 0.1);
232
- }
233
- .result-section {
234
- background: rgba(255, 255, 255, 0.9);
235
- padding: 18px;
236
  border-radius: 12px;
237
- border: 1px solid rgba(103, 58, 183, 0.2);
238
- box-shadow: 0 3px 12px rgba(103, 58, 183, 0.1);
239
- }
240
- .tip-box {
241
- background: linear-gradient(135deg, #f3e5f5, #e8eaf6);
242
- padding: 10px;
243
- border-radius: 6px;
244
- margin: 8px 0;
245
- border-left: 3px solid #673ab7;
246
- color: #4a148c;
247
- font-weight: 500;
248
  }
 
249
  button.primary {
250
  background: linear-gradient(135deg, #673ab7, #8e24aa) !important;
251
  border: none !important;
@@ -256,185 +410,174 @@ button.primary {
256
  font-size: 15px !important;
257
  box-shadow: 0 5px 15px rgba(103, 58, 183, 0.4) !important;
258
  }
259
- button.primary:hover {
260
- box-shadow: 0 8px 25px rgba(103, 58, 183, 0.6) !important;
261
- opacity: 0.9 !important;
262
- transform: translateY(-2px) !important;
263
- }
264
- label {
265
- color: #4a148c !important;
266
- font-weight: 600 !important;
267
- }
268
- input, textarea, select {
269
- border: 1px solid rgba(103, 58, 183, 0.3) !important;
270
- border-radius: 6px !important;
271
- }
272
- input:focus, textarea:focus, select:focus {
273
- border-color: #673ab7 !important;
274
- box-shadow: 0 0 0 2px rgba(103, 58, 183, 0.2) !important;
275
- }
276
- .gr-slider input[type="range"] {
277
- accent-color: #673ab7 !important;
278
- }
279
- input[type="checkbox"] {
280
- accent-color: #673ab7 !important;
281
- }
282
- .preserve-aspect-ratio img {
283
- object-fit: contain !important;
284
- width: auto !important;
285
- max-height: 512px !important;
286
- }
287
- .social-links {
288
- text-align: center;
289
- margin: 20px 0;
290
- }
291
- .social-links a {
292
- margin: 0 10px;
293
- padding: 8px 16px;
294
- background: #667eea;
295
- color: white;
296
- text-decoration: none;
297
- border-radius: 8px;
298
- transition: all 0.3s ease;
299
- }
300
- .social-links a:hover {
301
- background: #764ba2;
302
- transform: translateY(-2px);
303
- }
304
- .feature-box {
305
- background: #f8fafc;
306
- border: 1px solid #e2e8f0;
307
  padding: 20px;
308
- border-radius: 12px;
309
  margin: 10px 0;
310
  }
311
  """
312
 
313
- # ───────── Gradio Blocks ─────────
314
- with gr.Blocks(css=custom_css, title="Jewellery Photography Preview") as demo:
315
- # Hero
316
  gr.HTML("""
317
  <div style="text-align: center; margin-bottom: 20px;">
318
- <h1 style="font-size: 2.5em;">🎨 Raresence: AI-Powered Jewellery Photo Preview</h1>
319
- <p style="color: #666;">Upload a jewellery image, select model, and get professional photos instantly</p>
 
 
 
 
320
  </div>
321
  """)
322
 
323
- # Status banner
324
- status_html = gr.HTML()
325
-
326
- # Queue status display
327
- queue_status = gr.HTML()
328
-
329
- def _update_status():
330
- ok, msg = check_backend_connection()
331
- cls = "status-ready" if ok else ("status-starting" if "🟑" in msg else "status-error")
332
- return f'<div class="status-banner {cls}">{msg}</div>'
333
-
334
- def _update_queue_status():
335
- return f'<div class="queue-status">{get_queue_status()}</div>'
336
-
337
- status_html.value = _update_status()
338
- queue_status.value = _update_queue_status()
339
-
340
  with gr.Row():
341
- gr.Button("πŸ”„ Check Status").click(fn=_update_status, outputs=status_html)
342
- gr.Button("πŸ“Š Queue Status").click(fn=_update_queue_status, outputs=queue_status)
343
 
344
- with gr.Column():
345
- with gr.Row():
346
-
347
- with gr.Column(scale=0.4):
348
- gr.HTML("""
349
- <div class="feature-box"">
350
- <h3>πŸ–ΌοΈ Upload Jewellery Image</h3>
351
- <p style="color: #666; font-size: 14px;">Select a clear jewellery image for best results</p>
352
- </div>
353
- """)
354
- gr.Markdown("β€Ž")
355
- gr.Markdown("β€Ž")
356
- input_img = gr.Image(label="Upload image", type="pil", height=400)
357
-
358
- # with gr.Column():
359
-
360
- with gr.Column():
361
- gr.HTML("""
362
- <div class="feature-box">
363
- <h3>🎨 AI Generated Results</h3>
364
- <p style="color: #666; font-size: 14px;">Preview overlay detection and final professional background</p>
365
- </div>
366
- """)
367
-
368
- with gr.Tabs():
369
- with gr.TabItem("Final result"):
370
- info2 = gr.Markdown(value="### Final result")
371
- out_bg = gr.Image(height=400)
372
- with gr.TabItem("Detection overlay"):
373
- info1 = gr.Markdown(value="### Detection overlay")
374
- out_overlay = gr.Image(height=400)
375
- run_btn = gr.Button("🎯 Generate", elem_id="button", variant="primary")
376
-
377
- with gr.Row():
378
- with gr.Column(scale=0.4):
379
- gr.Markdown(value="Setting")
380
- category = gr.Dropdown(label="Jewellery category", choices=["Rings", "Bracelets", "Watches", "Earrings"], value="Bracelets")
381
- gender = gr.Dropdown(label="Model gender", choices=["male", "female"], value="female")
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
- out_status = gr.Text(label="Status", interactive=False)
385
 
386
- # Queue information
387
  gr.HTML("""
388
- <div class="tip-box">
389
- <strong>⏳ Queue Information:</strong> Requests are processed in order.
390
- During high traffic, you may experience wait times. The button will be disabled while processing.
391
  </div>
392
  """)
393
 
394
- # ──────── Footer ────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395
  gr.HTML("""
396
- <div style="text-align:center;padding:40px 20px;background:#f8fafc;border:1px solid #e2e8f0;border-radius:16px;margin:30px 0;">
397
- <h3 style="color:#333;">πŸš€ Powered by Snapwear AI</h3>
398
- <p style="color:#666;">
399
- Experience the future of virtual fashion and garment visualization.
400
  </p>
401
- <div class="social-links">
402
- <a href="https://snapwear.io" target="_blank">🌐 Website</a>
403
- <a href="https://www.instagram.com/snapwearai/" target="_blank">πŸ“Έ Instagram</a>
404
- <a href="https://huggingface.co/spaces/SnapwearAI/Snapwear-Texture-Transfer" target="_blank">🎨 Pattern Transfer</a>
405
- </div>
406
- <p style="font-size:12px;color:#999;margin-top:20px;">
407
- Β© 2024 Snapwear AI. Professional AI tools for fashion and design.
408
  </p>
409
  </div>
410
  """)
411
 
412
- # Wire button β†’ backend with queue status updates
413
- run_btn.click(
414
- fn=disable_button,
415
- inputs=None,
416
- outputs=run_btn
417
- ).then(
418
- fn=show_queue_position,
419
- inputs=None,
420
- outputs=out_status
421
- ).then(
422
- fn=call_backend_with_retry,
423
- inputs=[input_img, category, gender],
424
- outputs=[out_overlay, out_bg, out_status, run_btn],
425
- concurrency_limit=DEFAULT_CONCURRENCY_LIMIT, # Limit concurrent backend calls
426
- show_progress=True,
427
- )
428
-
429
-
430
- # ───────── Launch with Queue ─────────
431
  if __name__ == "__main__":
 
 
432
  demo.queue(
433
- max_size=MAX_QUEUE_SIZE, # Maximum requests in queue
434
- default_concurrency_limit=DEFAULT_CONCURRENCY_LIMIT, # Concurrent processing limit
435
  ).launch(
436
  share=False,
437
  server_name="0.0.0.0",
438
  server_port=7860,
439
- show_error=True
440
- )
 
 
 
 
3
  import base64
4
  import time
5
  import logging
6
+ import threading
7
+ import uuid
8
+ from datetime import datetime
9
  from pathlib import Path
10
+ from collections import deque
11
+ from typing import Dict, Optional, Tuple
12
 
13
  import gradio as gr
14
  from gradio_client import Client
 
18
  logging.basicConfig(level=logging.INFO)
19
  logger = logging.getLogger(__name__)
20
 
21
+ # ───────── Queue System Configuration ─────────
22
+ MAX_QUEUE_SIZE = 50
23
+ MAX_CONCURRENT_REQUESTS = 1 # GPU can only handle 1 request at a time
24
+ AVERAGE_PROCESSING_TIME = 15 # seconds
25
+ QUEUE_UPDATE_INTERVAL = 2 # seconds
 
26
 
27
+ # ───────── Backend Configuration ─────────
28
  HF_TOKEN = os.getenv("HF_TOKEN")
29
  if not HF_TOKEN:
30
  raise ValueError("HF_TOKEN environment variable is required")
31
 
32
+ # ───────── Global Queue System ─────────
33
+ class QueueManager:
34
+ def __init__(self):
35
+ self.queue = deque() # (request_id, user_data, timestamp)
36
+ self.processing = {} # request_id -> processing_start_time
37
+ self.completed = {} # request_id -> result
38
+ self.failed = {} # request_id -> error_message
39
+ self.lock = threading.Lock()
40
+ self.stats = {
41
+ 'total_processed': 0,
42
+ 'total_failed': 0,
43
+ 'avg_processing_time': AVERAGE_PROCESSING_TIME
44
+ }
45
+
46
+ def add_request(self, request_id: str, user_data: dict) -> Tuple[int, float]:
47
+ """Add request to queue. Returns (position, estimated_wait)"""
48
+ with self.lock:
49
+ if len(self.queue) >= MAX_QUEUE_SIZE:
50
+ raise Exception("Queue is full. Please try again later.")
51
+
52
+ self.queue.append((request_id, user_data, time.time()))
53
+ position = len(self.queue)
54
+
55
+ # Calculate estimated wait time
56
+ processing_count = len(self.processing)
57
+ queue_ahead = position - 1
58
+
59
+ if processing_count == 0:
60
+ # GPU is free, can start immediately
61
+ estimated_wait = 0
62
+ else:
63
+ # GPU is busy, need to wait for current + queue ahead
64
+ estimated_wait = (queue_ahead + 1) * self.stats['avg_processing_time']
65
+
66
+ logger.info(f"Request {request_id} added to queue. Position: {position}, Est. wait: {estimated_wait:.0f}s")
67
+ return position, estimated_wait
68
+
69
+ def get_next_requests(self, count: int = 1):
70
+ """Get next request to process (only 1 at a time for GPU)"""
71
+ with self.lock:
72
+ if len(self.processing) >= MAX_CONCURRENT_REQUESTS or len(self.queue) == 0:
73
+ return []
74
+
75
+ # Get only the next single request
76
+ request_id, user_data, timestamp = self.queue.popleft()
77
+ self.processing[request_id] = time.time()
78
+
79
+ return [(request_id, user_data)]
80
+
81
+ def complete_request(self, request_id: str, result):
82
+ """Mark request as completed"""
83
+ with self.lock:
84
+ if request_id in self.processing:
85
+ processing_time = time.time() - self.processing[request_id]
86
+ del self.processing[request_id]
87
+ self.completed[request_id] = result
88
+ self.stats['total_processed'] += 1
89
+
90
+ # Update average processing time
91
+ current_avg = self.stats['avg_processing_time']
92
+ self.stats['avg_processing_time'] = (current_avg * 0.8) + (processing_time * 0.2)
93
+
94
+ logger.info(f"Request {request_id} completed in {processing_time:.1f}s")
95
+
96
+ def fail_request(self, request_id: str, error_msg: str):
97
+ """Mark request as failed"""
98
+ with self.lock:
99
+ if request_id in self.processing:
100
+ del self.processing[request_id]
101
+ self.failed[request_id] = error_msg
102
+ self.stats['total_failed'] += 1
103
+ logger.error(f"Request {request_id} failed: {error_msg}")
104
+
105
+ def get_queue_status(self) -> dict:
106
+ """Get current queue status"""
107
+ with self.lock:
108
+ return {
109
+ 'queue_length': len(self.queue),
110
+ 'processing_count': len(self.processing),
111
+ 'total_processed': self.stats['total_processed'],
112
+ 'total_failed': self.stats['total_failed'],
113
+ 'avg_processing_time': self.stats['avg_processing_time'],
114
+ 'max_queue_size': MAX_QUEUE_SIZE,
115
+ 'max_concurrent': MAX_CONCURRENT_REQUESTS
116
+ }
117
+
118
+ def get_request_status(self, request_id: str) -> dict:
119
+ """Get status of specific request"""
120
+ with self.lock:
121
+ if request_id in self.completed:
122
+ return {'status': 'completed', 'result': self.completed[request_id]}
123
+ elif request_id in self.failed:
124
+ return {'status': 'failed', 'error': self.failed[request_id]}
125
+ elif request_id in self.processing:
126
+ processing_time = time.time() - self.processing[request_id]
127
+ return {'status': 'processing', 'time': processing_time}
128
+ else:
129
+ # Check position in queue
130
+ for i, (rid, _, _) in enumerate(self.queue):
131
+ if rid == request_id:
132
+ return {'status': 'queued', 'position': i + 1}
133
+ return {'status': 'not_found'}
134
+
135
+ # Global queue manager
136
+ queue_manager = QueueManager()
137
+
138
+ # ───────── Backend Connection ─────────
139
  backend_status = {
140
  "client": None,
141
  "connected": False,
 
167
  return False, "🟑 Model is starting up. Please wait 3‑4 min."
168
  return False, f"πŸ”΄ Backend error: {e}"
169
 
170
+ # Initial connection check
171
  check_backend_connection()
172
 
173
+ # ───────── Queue Processing Worker ─────────
174
+ def queue_worker():
175
+ """Background worker to process queue - one request at a time"""
176
+ while True:
177
+ try:
178
+ requests = queue_manager.get_next_requests()
179
+
180
+ if not requests:
181
+ time.sleep(1)
182
+ continue
183
+
184
+ # Process single request (GPU limitation)
185
+ request_id, user_data = requests[0]
186
+ logger.info(f"Starting processing request {request_id}")
187
+
188
+ # Process synchronously since GPU can only handle one
189
+ process_single_request(request_id, user_data)
190
+
191
+ # Small delay before checking for next request
192
+ time.sleep(0.5)
193
+
194
+ except Exception as e:
195
+ logger.error(f"Queue worker error: {e}")
196
+ time.sleep(5)
197
+
198
+ def process_single_request(request_id: str, user_data: dict):
199
+ """Process a single request"""
200
+ try:
201
+ # Extract user data
202
+ img_b64 = user_data['image_b64']
203
+ category = user_data['category']
204
+ gender = user_data['gender']
205
+
206
+ if not backend_status["connected"]:
207
+ check_backend_connection()
208
+ if not backend_status["connected"]:
209
+ raise Exception("Backend not available")
210
+
211
+ client = backend_status["client"]
212
+ start_time = time.time()
213
+
214
+ result = client.predict(
215
+ img_b64,
216
+ category,
217
+ gender,
218
+ api_name="/predict",
219
+ )
220
+
221
+ processing_time = time.time() - start_time
222
+
223
+ if not result or len(result) < 4:
224
+ raise ValueError("Invalid response structure from backend")
225
+
226
+ _, overlay_b64, bg_b64, status = result
227
+
228
+ final_result = {
229
+ 'overlay_b64': overlay_b64,
230
+ 'bg_b64': bg_b64,
231
+ 'status': status,
232
+ 'processing_time': processing_time
233
+ }
234
+
235
+ queue_manager.complete_request(request_id, final_result)
236
+
237
+ except Exception as e:
238
+ queue_manager.fail_request(request_id, str(e))
239
+
240
+ # Start queue worker
241
+ worker_thread = threading.Thread(target=queue_worker, daemon=True)
242
+ worker_thread.start()
243
+
244
+ # ───────── Helper Functions ─────────
245
  def image_to_base64(image: Image.Image) -> str:
246
  if image is None:
247
  return ""
 
251
  image.save(buf, format="PNG")
252
  return base64.b64encode(buf.getvalue()).decode()
253
 
254
+ def base64_to_image(b64: str) -> Optional[Image.Image]:
255
  if not b64:
256
  return None
257
  try:
 
260
  logger.error(f"Failed to decode base64 β†’ image: {e}")
261
  return None
262
 
263
+ # ──��────── Request Management ─────────
264
+ active_requests = {} # session_id -> request_id
 
 
 
 
 
 
 
 
 
 
 
265
 
266
+ def submit_request(input_image: Image.Image, category: str, gender: str, session_id: str = None):
267
+ """Submit a new request to the queue"""
268
  if input_image is None:
269
+ return None, None, "❌ Please upload an image.", gr.update(interactive=True), ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
270
 
271
+ try:
272
+ # Generate unique request ID
273
+ request_id = str(uuid.uuid4())
274
+ if session_id:
275
+ active_requests[session_id] = request_id
276
+
277
+ # Prepare user data
278
+ img_b64 = image_to_base64(input_image)
279
+ user_data = {
280
+ 'image_b64': img_b64,
281
+ 'category': category,
282
+ 'gender': gender,
283
+ 'timestamp': time.time()
284
+ }
285
+
286
+ # Add to queue
287
+ position, estimated_wait = queue_manager.add_request(request_id, user_data)
288
+
289
+ status_msg = f"πŸš€ Request submitted! Position in queue: #{position}"
290
+ if position == 1 and queue_manager.get_queue_status()['processing_count'] == 0:
291
+ status_msg += " | Starting processing now..."
292
+ elif estimated_wait > 0:
293
+ status_msg += f" | Estimated wait: {estimated_wait:.0f}s ({int(estimated_wait/60)}m {int(estimated_wait%60)}s)"
294
+
295
+ return None, None, status_msg, gr.update(interactive=False), request_id
296
+
297
+ except Exception as e:
298
+ return None, None, f"❌ {str(e)}", gr.update(interactive=True), ""
299
 
300
+ def check_request_status(request_id: str):
301
+ """Check the status of a request"""
302
+ if not request_id:
303
+ return None, None, "No active request", gr.update(interactive=True)
304
+
305
+ status_info = queue_manager.get_request_status(request_id)
306
+
307
+ if status_info['status'] == 'completed':
308
+ result = status_info['result']
309
+ overlay_img = base64_to_image(result['overlay_b64'])
310
+ bg_img = base64_to_image(result['bg_b64'])
311
+ status_msg = f"βœ… {result['status']} (⏱ {result['processing_time']:.1f}s)"
312
+ return overlay_img, bg_img, status_msg, gr.update(interactive=True)
313
+
314
+ elif status_info['status'] == 'failed':
315
+ return None, None, f"❌ {status_info['error']}", gr.update(interactive=True)
316
+
317
+ elif status_info['status'] == 'processing':
318
+ processing_time = status_info['time']
319
+ return None, None, f"⚑ Processing... ({processing_time:.1f}s)", gr.update(interactive=False)
320
+
321
+ elif status_info['status'] == 'queued':
322
+ position = status_info['position']
323
+ # Calculate estimated wait for this position
324
+ avg_time = queue_manager.stats['avg_processing_time']
325
+ estimated_wait = position * avg_time
326
+ wait_msg = f" | Est. wait: {int(estimated_wait/60)}m {int(estimated_wait%60)}s" if estimated_wait > 30 else ""
327
+ return None, None, f"⏳ In queue, position #{position}{wait_msg}", gr.update(interactive=False)
328
+
329
+ else:
330
+ return None, None, "❓ Request not found", gr.update(interactive=True)
331
 
332
+ def get_queue_info():
333
+ """Get formatted queue information"""
334
+ status = queue_manager.get_queue_status()
335
+
336
+ info = f"""
337
+ <div style="background: linear-gradient(135deg, #e3f2fd, #bbdefb); padding: 15px; border-radius: 10px; margin: 10px 0;">
338
+ <h4 style="margin: 0 0 10px 0; color: #1565c0;">πŸ“Š Queue Status (Single GPU Processing)</h4>
339
+ <div style="display: grid; grid-template-columns: 1fr 1fr; gap: 10px; font-size: 14px;">
340
+ <div><strong>Queue Length:</strong> {status['queue_length']}/{status['max_queue_size']}</div>
341
+ <div><strong>GPU Status:</strong> {'πŸ”΄ Processing' if status['processing_count'] > 0 else '🟒 Available'}</div>
342
+ <div><strong>Completed:</strong> {status['total_processed']}</div>
343
+ <div><strong>Failed:</strong> {status['total_failed']}</div>
344
+ <div><strong>Avg. Processing:</strong> {status['avg_processing_time']:.1f}s</div>
345
+ <div><strong>Last Updated:</strong> {datetime.now().strftime('%H:%M:%S')}</div>
346
+ </div>
347
+ <div style="margin-top: 10px; padding: 8px; background: rgba(0,0,0,0.1); border-radius: 5px; font-size: 12px;">
348
+ <strong>Note:</strong> GPU processes one request at a time. Queue position = exact wait order.
349
+ </div>
350
+ </div>
351
+ """
352
+ return info
353
 
354
+ # ───────── CSS Styles ─────────
355
  custom_css = """
356
  .gradio-container {
357
  background: linear-gradient(135deg, #3b4371 0%, #2d1b69 25%, #673ab7 50%, #8e24aa 75%, #6a1b9a 100%);
358
  font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
359
  min-height: 100vh;
360
  }
361
+
362
+ .queue-indicator {
 
 
 
 
 
 
 
363
  background: linear-gradient(135deg, #4CAF50, #45a049);
364
  color: white;
365
+ padding: 12px;
366
+ border-radius: 10px;
367
  text-align: center;
368
  margin: 10px 0;
369
+ font-weight: 600;
370
+ box-shadow: 0 4px 15px rgba(76, 175, 80, 0.3);
371
  }
372
+
373
  .queue-warning {
374
  background: linear-gradient(135deg, #ff9800, #f57c00);
375
  color: white;
376
+ padding: 12px;
377
+ border-radius: 10px;
378
  text-align: center;
379
  margin: 10px 0;
380
+ font-weight: 600;
381
+ box-shadow: 0 4px 15px rgba(255, 152, 0, 0.3);
 
 
 
 
 
 
 
382
  }
383
+
384
+ .queue-full {
385
+ background: linear-gradient(135deg, #f44336, #d32f2f);
386
  color: white;
 
 
 
 
 
 
 
387
  padding: 12px;
388
+ border-radius: 10px;
 
 
389
  text-align: center;
390
+ margin: 10px 0;
 
 
 
 
 
 
 
 
 
 
 
 
391
  font-weight: 600;
392
+ box-shadow: 0 4px 15px rgba(244, 67, 54, 0.3);
393
  }
394
+
395
+ .feature-box {
396
+ background: #f8fafc;
397
+ border: 1px solid #e2e8f0;
398
+ padding: 20px;
 
 
 
 
 
 
399
  border-radius: 12px;
400
+ margin: 10px 0;
 
 
 
 
 
 
 
 
 
 
401
  }
402
+
403
  button.primary {
404
  background: linear-gradient(135deg, #673ab7, #8e24aa) !important;
405
  border: none !important;
 
410
  font-size: 15px !important;
411
  box-shadow: 0 5px 15px rgba(103, 58, 183, 0.4) !important;
412
  }
413
+
414
+ .status-panel {
415
+ background: rgba(255, 255, 255, 0.95);
416
+ border: 2px solid #673ab7;
417
+ border-radius: 15px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
  padding: 20px;
 
419
  margin: 10px 0;
420
  }
421
  """
422
 
423
+ # ───────── Gradio Interface ─────────
424
+ with gr.Blocks(css=custom_css, title="Jewellery Photography Preview - Queue System") as demo:
425
+ # Hero Section
426
  gr.HTML("""
427
  <div style="text-align: center; margin-bottom: 20px;">
428
+ <h1 style="font-size: 2.5em; color: white; text-shadow: 2px 2px 4px rgba(0,0,0,0.5);">
429
+ 🎨 Raresence: AI-Powered Jewellery Photo Preview
430
+ </h1>
431
+ <p style="color: #e1bee7; font-size: 1.2em;">
432
+ Single GPU Processing | Queue System | One Request at a Time
433
+ </p>
434
  </div>
435
  """)
436
 
437
+ # Status and Queue Information
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
  with gr.Row():
439
+ backend_status_display = gr.HTML()
440
+ queue_info_display = gr.HTML()
441
 
442
+ # Main Interface
443
+ with gr.Row():
444
+ with gr.Column(scale=1):
445
+ gr.HTML("""
446
+ <div class="feature-box">
447
+ <h3>πŸ–ΌοΈ Upload Jewellery Image</h3>
448
+ <p style="color: #666;">Select a clear jewellery image for best results</p>
449
+ </div>
450
+ """)
451
+ input_img = gr.Image(label="Upload image", type="pil", height=400)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
 
453
+ gr.HTML("""
454
+ <div class="feature-box">
455
+ <h3>βš™οΈ Settings</h3>
456
+ </div>
457
+ """)
458
+ category = gr.Dropdown(
459
+ label="Jewellery category",
460
+ choices=["Rings", "Bracelets", "Watches", "Earrings"],
461
+ value="Bracelets"
462
+ )
463
+ gender = gr.Dropdown(
464
+ label="Model gender",
465
+ choices=["male", "female"],
466
+ value="female"
467
+ )
468
 
469
+ submit_btn = gr.Button("πŸš€ Submit to Queue", variant="primary", size="lg")
470
 
471
+ with gr.Column(scale=1):
472
  gr.HTML("""
473
+ <div class="feature-box">
474
+ <h3>🎨 AI Generated Results</h3>
475
+ <p style="color: #666;">Results will appear here after processing</p>
476
  </div>
477
  """)
478
 
479
+ with gr.Tabs():
480
+ with gr.TabItem("Final Result"):
481
+ result_bg = gr.Image(label="Professional Background", height=400)
482
+ with gr.TabItem("Detection Overlay"):
483
+ result_overlay = gr.Image(label="Detection Overlay", height=400)
484
+
485
+ # Status and Control Panel
486
+ with gr.Group():
487
+ gr.HTML('<div class="status-panel">')
488
+ status_output = gr.Textbox(
489
+ label="Request Status",
490
+ interactive=False,
491
+ value="Ready to submit request"
492
+ )
493
+
494
+ with gr.Row():
495
+ check_status_btn = gr.Button("πŸ”„ Check Status", size="sm")
496
+ cancel_btn = gr.Button("❌ Cancel Request", size="sm", variant="secondary")
497
+
498
+ gr.HTML('</div>')
499
+
500
+ # Hidden state for request tracking
501
+ current_request_id = gr.State("")
502
+ session_id = gr.State(lambda: str(uuid.uuid4()))
503
+
504
+ # Auto-refresh components
505
+ def update_displays():
506
+ # Update backend status
507
+ ok, msg = check_backend_connection()
508
+ status_class = "queue-indicator" if ok else "queue-warning"
509
+ backend_html = f'<div class="{status_class}">{msg}</div>'
510
+
511
+ # Update queue info
512
+ queue_html = get_queue_info()
513
+
514
+ return backend_html, queue_html
515
+
516
+ # Event handlers
517
+ submit_btn.click(
518
+ fn=submit_request,
519
+ inputs=[input_img, category, gender, session_id],
520
+ outputs=[result_overlay, result_bg, status_output, submit_btn, current_request_id],
521
+ show_progress=True
522
+ )
523
+
524
+ check_status_btn.click(
525
+ fn=check_request_status,
526
+ inputs=[current_request_id],
527
+ outputs=[result_overlay, result_bg, status_output, submit_btn],
528
+ show_progress=False
529
+ )
530
+
531
+ def cancel_request(request_id, session_id_val):
532
+ if session_id_val in active_requests:
533
+ del active_requests[session_id_val]
534
+ return None, None, "Request cancelled", gr.update(interactive=True), ""
535
+
536
+ cancel_btn.click(
537
+ fn=cancel_request,
538
+ inputs=[current_request_id, session_id],
539
+ outputs=[result_overlay, result_bg, status_output, submit_btn, current_request_id]
540
+ )
541
+
542
+ # Auto-refresh status displays every 3 seconds
543
+ def refresh_displays():
544
+ return update_displays()
545
+
546
+ # Set up periodic refresh
547
+ demo.load(refresh_displays, outputs=[backend_status_display, queue_info_display])
548
+
549
+ # Periodic status check for active requests
550
+ def auto_check_status(request_id):
551
+ if request_id:
552
+ return check_request_status(request_id)
553
+ return None, None, "No active request", gr.update(interactive=True)
554
+
555
+ # Footer
556
  gr.HTML("""
557
+ <div style="text-align:center;padding:30px;background:rgba(255,255,255,0.1);border-radius:15px;margin:20px 0;">
558
+ <h3 style="color:white;">πŸš€ Advanced Queue System</h3>
559
+ <p style="color:#e1bee7;">
560
+ Intelligent queuing β€’ Real-time status β€’ Fair processing order
561
  </p>
562
+ <p style="font-size:12px;color:#d1c4e9;">
563
+ Β© 2024 Snapwear AI | Professional AI tools with enterprise-grade queuing
 
 
 
 
 
564
  </p>
565
  </div>
566
  """)
567
 
568
+ # ───────── Launch Configuration ─────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569
  if __name__ == "__main__":
570
+ logger.info("Starting Jewellery Photography Preview with Advanced Queue System...")
571
+
572
  demo.queue(
573
+ max_size=MAX_QUEUE_SIZE + 10, # Allow some buffer in Gradio's queue
574
+ default_concurrency_limit=1, # Match our single GPU processing
575
  ).launch(
576
  share=False,
577
  server_name="0.0.0.0",
578
  server_port=7860,
579
+ show_error=True,
580
+ show_api=False
581
+ )
582
+
583
+ logger.info(f"Single GPU Queue system ready! Max queue: {MAX_QUEUE_SIZE}, Processing: 1 at a time")