imperiusrex commited on
Commit
1bbbe26
·
verified ·
1 Parent(s): 9c5dae2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -1,8 +1,8 @@
1
- import spaces # import before anything else to request GPU
2
- spaces.GPU.require("H200") # request H200 GPU
 
3
 
4
  import gradio as gr
5
- import os
6
  import cv2
7
  import numpy as np
8
  import torch
@@ -11,9 +11,7 @@ from transformers import CLIPProcessor, CLIPModel
11
  from paddleocr import PaddleOCR
12
  import tempfile
13
 
14
- # (Keep all your utility functions here, unchanged)
15
-
16
- # Load your models outside your GPU function to avoid reloading
17
 
18
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
19
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
@@ -32,9 +30,9 @@ candidates = [
32
  "This is Korean text"
33
  ]
34
 
35
- @spaces.GPU # This decorator tells Spaces to run this function on GPU
36
  def process_image(image):
37
- # your processing code here, exactly as before
38
  image_pil = Image.fromarray(image).convert("RGB")
39
  img_path = "uploaded.jpg"
40
  image_pil.save(img_path)
 
1
+ import spaces
2
+
3
+ # No spaces.GPU.require() here, remove it
4
 
5
  import gradio as gr
 
6
  import cv2
7
  import numpy as np
8
  import torch
 
11
  from paddleocr import PaddleOCR
12
  import tempfile
13
 
14
+ # Your utility functions here (run_text_detection, crop_and_warp_regions, etc.)
 
 
15
 
16
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
17
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
 
30
  "This is Korean text"
31
  ]
32
 
33
+ @spaces.GPU # Decorate the function you want to run on GPU
34
  def process_image(image):
35
+ # Your processing logic here
36
  image_pil = Image.fromarray(image).convert("RGB")
37
  img_path = "uploaded.jpg"
38
  image_pil.save(img_path)