lulavc commited on
Commit
eec0e30
·
verified ·
1 Parent(s): aeead0b

Optimize for NVIDIA T4 GPU

Browse files
Files changed (1) hide show
  1. app.py +19 -18
app.py CHANGED
@@ -1,11 +1,10 @@
1
  """
2
  BubbleScribe - AI Manga & Comic Translator
3
  Translate manga/comics using Qwen2-VL for OCR + Translation and LaMa for inpainting.
4
- ZeroGPU optimized
5
  """
6
 
7
  import gradio as gr
8
- import spaces
9
  import torch
10
  import os
11
  import json
@@ -19,33 +18,36 @@ from concurrent.futures import ThreadPoolExecutor
19
  import threading
20
 
21
  # ============================================================
22
- # HARDWARE OPTIMIZATION: ZeroGPU
23
  # ============================================================
24
 
25
  import cv2
26
 
 
 
 
 
 
27
  # Thread pool for parallel operations
28
  executor = ThreadPoolExecutor(max_workers=4)
29
 
30
  # ============================================================
31
- # MODEL PRELOADING
32
  # ============================================================
33
 
34
  print("🚀 BubbleScribe starting up...")
35
- print(f" Hardware: ZeroGPU")
36
- print(f" OCR: Qwen2-VL")
37
- print(f" Inpainting: LaMa")
38
 
39
- # Load LaMa model for inpainting
40
- lama_model = None
 
 
 
41
 
42
  def load_lama():
43
- """Load LaMa inpainting model."""
44
- global lama_model
45
- if lama_model is None:
46
- from simple_lama_inpainting import SimpleLama
47
- lama_model = SimpleLama()
48
- print("✅ LaMa model loaded")
49
  return lama_model
50
 
51
  # ============================================================
@@ -516,9 +518,8 @@ def draw_detections(image: Image.Image, detections: list) -> Image.Image:
516
  # MAIN PIPELINE
517
  # ============================================================
518
 
519
- @spaces.GPU
520
  def translate_manga(image, source_lang, target_lang, show_boxes, apply_inpaint, progress=gr.Progress()):
521
- """Main translation pipeline."""
522
  if image is None:
523
  return None, None, "Please upload an image"
524
 
@@ -657,7 +658,7 @@ with gr.Blocks(title="BubbleScribe", css=css, theme=gr.themes.Soft()) as demo:
657
 
658
  gr.HTML("""
659
  <div class="stats">
660
- ⚡ <strong>Hardware:</strong> ZeroGPU (H200) • <strong>Models:</strong> Qwen2-VL (OCR & Translation) + LaMa (Inpainting)
661
  </div>
662
  """)
663
 
 
1
  """
2
  BubbleScribe - AI Manga & Comic Translator
3
  Translate manga/comics using Qwen2-VL for OCR + Translation and LaMa for inpainting.
4
+ Optimized for NVIDIA T4 GPU
5
  """
6
 
7
  import gradio as gr
 
8
  import torch
9
  import os
10
  import json
 
18
  import threading
19
 
20
  # ============================================================
21
+ # HARDWARE OPTIMIZATION: NVIDIA T4 (16GB VRAM)
22
  # ============================================================
23
 
24
  import cv2
25
 
26
+ # Enable CUDA optimizations
27
+ torch.backends.cudnn.benchmark = True
28
+ torch.backends.cuda.matmul.allow_tf32 = True
29
+ torch.backends.cudnn.allow_tf32 = True
30
+
31
  # Thread pool for parallel operations
32
  executor = ThreadPoolExecutor(max_workers=4)
33
 
34
  # ============================================================
35
+ # MODEL PRELOADING (Load at startup for faster inference)
36
  # ============================================================
37
 
38
  print("🚀 BubbleScribe starting up...")
39
+ print(f" Hardware: NVIDIA T4 (16GB VRAM)")
40
+ print(f" OCR: Qwen2-VL (API)")
41
+ print(f" Inpainting: LaMa (GPU)")
42
 
43
+ # Load LaMa model at startup
44
+ print("📦 Loading LaMa model...")
45
+ from simple_lama_inpainting import SimpleLama
46
+ lama_model = SimpleLama()
47
+ print("✅ LaMa model loaded and ready!")
48
 
49
  def load_lama():
50
+ """Get LaMa model (already loaded at startup)."""
 
 
 
 
 
51
  return lama_model
52
 
53
  # ============================================================
 
518
  # MAIN PIPELINE
519
  # ============================================================
520
 
 
521
  def translate_manga(image, source_lang, target_lang, show_boxes, apply_inpaint, progress=gr.Progress()):
522
+ """Main translation pipeline (GPU-accelerated on T4)."""
523
  if image is None:
524
  return None, None, "Please upload an image"
525
 
 
658
 
659
  gr.HTML("""
660
  <div class="stats">
661
+ ⚡ <strong>Hardware:</strong> NVIDIA T4 (16GB) • <strong>Models:</strong> Qwen2-VL (OCR & Translation) + LaMa (Inpainting)
662
  </div>
663
  """)
664