Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -205,37 +205,58 @@ import torch
|
|
| 205 |
from PIL import Image
|
| 206 |
import torchvision.transforms as T
|
| 207 |
from ultralytics import YOLO
|
|
|
|
|
|
|
|
|
|
| 208 |
|
| 209 |
# Load your model
|
| 210 |
-
|
|
|
|
| 211 |
# model = torch.load("Model_IV.pt")
|
| 212 |
# model.eval()
|
| 213 |
# checkpoint = torch.load("Model_IV.pt")
|
| 214 |
# model.load_state_dict(checkpoint) # Load the saved weights
|
| 215 |
# model.eval() # Set the model to evaluation mode
|
| 216 |
|
|
|
|
|
|
|
|
|
|
| 217 |
# Define preprocessing
|
| 218 |
-
transform = T.Compose([
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
])
|
| 222 |
|
| 223 |
def predict(image):
|
| 224 |
-
# Preprocess the image
|
| 225 |
-
img_tensor = transform(image).unsqueeze(0) # Add batch dimension
|
| 226 |
|
| 227 |
-
# # Make prediction
|
| 228 |
-
# with torch.no_grad():
|
| 229 |
-
# output = model(img_tensor)
|
| 230 |
|
| 231 |
-
# Process output (adjust based on your model's format)
|
| 232 |
-
#
|
| 233 |
-
|
| 234 |
-
#
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 239 |
# Gradio interface
|
| 240 |
demo = gr.Interface(
|
| 241 |
fn=predict,
|
|
|
|
| 205 |
from PIL import Image
|
| 206 |
import torchvision.transforms as T
|
| 207 |
from ultralytics import YOLO
|
| 208 |
+
import onnxruntime as ort
|
| 209 |
+
import cv2
|
| 210 |
+
import numpy as np
|
| 211 |
|
| 212 |
# Load your model
|
| 213 |
+
|
| 214 |
+
# model = YOLO("Model_IV.pt")
|
| 215 |
# model = torch.load("Model_IV.pt")
|
| 216 |
# model.eval()
|
| 217 |
# checkpoint = torch.load("Model_IV.pt")
|
| 218 |
# model.load_state_dict(checkpoint) # Load the saved weights
|
| 219 |
# model.eval() # Set the model to evaluation mode
|
| 220 |
|
| 221 |
+
# Load the onnx model
|
| 222 |
+
model = ort.InferenceSession("Model_IV.onnx")
|
| 223 |
+
|
| 224 |
# Define preprocessing
|
| 225 |
+
# transform = T.Compose([
|
| 226 |
+
# T.Resize((224, 224)), # Adjust to your model's input size
|
| 227 |
+
# T.ToTensor(),
|
| 228 |
+
# ])
|
| 229 |
|
| 230 |
def predict(image):
|
| 231 |
+
# # Preprocess the image
|
| 232 |
+
# img_tensor = transform(image).unsqueeze(0) # Add batch dimension
|
| 233 |
|
| 234 |
+
# # # Make prediction
|
| 235 |
+
# # with torch.no_grad():
|
| 236 |
+
# # output = model(img_tensor)
|
| 237 |
|
| 238 |
+
# # Process output (adjust based on your model's format)
|
| 239 |
+
# results = model(image)
|
| 240 |
+
# annotated_img = results[0].plot()
|
| 241 |
+
# return annotated_img
|
| 242 |
+
|
| 243 |
+
# Preprocess the image (resize, normalize, etc)
|
| 244 |
+
input_name = model.get_inputs()[0].name
|
| 245 |
+
input_shape = model.get_inputs()[0].shape
|
| 246 |
+
|
| 247 |
+
# Resize the image to the model's input shape
|
| 248 |
+
image = cv2.resize(img, (input_shape[2], input_shape[3]))
|
| 249 |
|
| 250 |
+
# Convert the image to a numpy array and add a batch dimension
|
| 251 |
+
image = np.expand_dims(img, axis=0)
|
| 252 |
+
|
| 253 |
+
# Perform inference
|
| 254 |
+
output = model.run(None, {input_name: image})
|
| 255 |
+
print(type(output))
|
| 256 |
+
print(output)
|
| 257 |
+
|
| 258 |
+
return image
|
| 259 |
+
|
| 260 |
# Gradio interface
|
| 261 |
demo = gr.Interface(
|
| 262 |
fn=predict,
|