Spaces:
Sleeping
Sleeping
jovian
commited on
Commit
·
ab7eec3
1
Parent(s):
20ec170
supported fle
Browse files
app.py
CHANGED
|
@@ -6,7 +6,7 @@ from sahi import AutoDetectionModel
|
|
| 6 |
from PIL import Image
|
| 7 |
import plotly.graph_objects as go
|
| 8 |
import torch
|
| 9 |
-
|
| 10 |
|
| 11 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
|
@@ -209,6 +209,7 @@ def upload_image(image):
|
|
| 209 |
"""Process the uploaded image (if needed) and display it."""
|
| 210 |
return image
|
| 211 |
|
|
|
|
| 212 |
def apply_detection(image):
|
| 213 |
"""Run object detection on the uploaded image and return the annotated image."""
|
| 214 |
# Convert image from PIL to NumPy array
|
|
@@ -399,7 +400,7 @@ with gr.Blocks() as demo:
|
|
| 399 |
with gr.Row(visible=False) as input_row:
|
| 400 |
# Image Upload and Display in two columns
|
| 401 |
with gr.Column():
|
| 402 |
-
gr.Markdown("###
|
| 403 |
upload_image_component = gr.Image(type="pil", label="Select Image")
|
| 404 |
|
| 405 |
with gr.Column():
|
|
|
|
| 6 |
from PIL import Image
|
| 7 |
import plotly.graph_objects as go
|
| 8 |
import torch
|
| 9 |
+
import spaces
|
| 10 |
|
| 11 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
| 12 |
|
|
|
|
| 209 |
"""Process the uploaded image (if needed) and display it."""
|
| 210 |
return image
|
| 211 |
|
| 212 |
+
@spaces.GPU
|
| 213 |
def apply_detection(image):
|
| 214 |
"""Run object detection on the uploaded image and return the annotated image."""
|
| 215 |
# Convert image from PIL to NumPy array
|
|
|
|
| 400 |
with gr.Row(visible=False) as input_row:
|
| 401 |
# Image Upload and Display in two columns
|
| 402 |
with gr.Column():
|
| 403 |
+
gr.Markdown("### Input (Supported Image: bmp,jpg,png,jpeg,gif)")
|
| 404 |
upload_image_component = gr.Image(type="pil", label="Select Image")
|
| 405 |
|
| 406 |
with gr.Column():
|