aje6 commited on
Commit
fb13326
·
verified ·
1 Parent(s): db34d43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -30
app.py CHANGED
@@ -201,34 +201,37 @@
201
  # iface.launch()
202
 
203
  import gradio as gr
204
- import cv2
205
- from ultralytics import YOLO
206
-
207
- model = YOLO('Model_IV.pt')
208
- print(model.names)
209
- # webcamera = cv2.VideoCapture(0)
210
- # webcamera.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
211
- # webcamera.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
212
-
213
- # while True:
214
- # success, frame = webcamera.read()
 
 
 
 
 
 
215
 
216
- # results = model.track(frame, conf=0.2, imgsz=480)
217
- # cv2.putText(frame, f"Total: {len(results[0].boxes)}", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
218
- # cv2.imshow("Live Camera", results[0].plot())
219
-
220
- # if cv2.waitKey(1) == ord('q'):
221
- # break
222
-
223
- def run_model(img):
224
- results = model(img)
225
- annotated_img = results.render()[0]
226
- return annotated_img
227
-
228
- # webcamera.release()
229
- # cv2.destroyAllWindows()
230
-
231
-
232
-
233
- iface = gr.Interface(fn=run_model, inputs="webcam", outputs=annotated_img)
234
- iface.launch()
 
201
  # iface.launch()
202
 
203
  import gradio as gr
204
+ import torch
205
+ from PIL import Image
206
+ import torchvision.transforms as T
207
+
208
+ # Load your model
209
+ model = torch.load("model.pt")
210
+ model.eval()
211
+
212
+ # Define preprocessing
213
+ transform = T.Compose([
214
+ T.Resize((224, 224)), # Adjust to your model's input size
215
+ T.ToTensor(),
216
+ ])
217
+
218
+ def predict(image):
219
+ # Preprocess the image
220
+ img_tensor = transform(image).unsqueeze(0) # Add batch dimension
221
 
222
+ # Make prediction
223
+ with torch.no_grad():
224
+ output = model(img_tensor)
225
+
226
+ # Process output (adjust based on your model's format)
227
+ return output.tolist() # or post-process the results as needed
228
+
229
+ # Gradio interface
230
+ demo = gr.Interface(
231
+ fn=predict,
232
+ inputs=gr.Image(type="pil"), # Accepts image input
233
+ outputs="json" # Customize based on your output format
234
+ )
235
+
236
+ if __name__ == "__main__":
237
+ demo.launch()