Spaces:
Sleeping
Sleeping
Commit ·
d1f37f2
0
Parent(s):
Init repo
Browse files- .gitignore +6 -0
- main.py +15 -0
- validation.py +25 -0
- video.py +49 -0
- we_used.txt +2 -0
.gitignore
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.DS_Store
|
| 2 |
+
__pycache__/
|
| 3 |
+
datasets/
|
| 4 |
+
runs/
|
| 5 |
+
*.pt
|
| 6 |
+
*.mp4
|
main.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ultralytics import YOLO
|
| 2 |
+
|
| 3 |
+
# Load a model
|
| 4 |
+
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
|
| 5 |
+
|
| 6 |
+
# Train the model
|
| 7 |
+
results = model.train(data="./datasets/socker/socker.yaml", epochs=10, imgsz=1920)
|
| 8 |
+
metrics = model.val() # evaluate model performance on the validation set
|
| 9 |
+
print(metrics)
|
| 10 |
+
|
| 11 |
+
model.save("my_model.pt") # save the model
|
| 12 |
+
|
| 13 |
+
# Export the model
|
| 14 |
+
path = model.export(format="onnx")
|
| 15 |
+
print(path)
|
validation.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
from ultralytics import YOLO
|
| 3 |
+
from ultralytics.utils.plotting import Annotator
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
model = YOLO("my_model.pt")
|
| 7 |
+
|
| 8 |
+
# Run inference
|
| 9 |
+
results = model("datasets/socker/valid/images/img_307_jpeg.rf.8ee20e9d3ea38fa3a62c2495e7e824e3.jpg", imgsz=1920)
|
| 10 |
+
print(results)
|
| 11 |
+
|
| 12 |
+
# Draw bounding boxes on the image
|
| 13 |
+
result = results[0]
|
| 14 |
+
|
| 15 |
+
annotator = Annotator(result.orig_img, line_width=3)
|
| 16 |
+
boxes = result.boxes
|
| 17 |
+
for box in boxes:
|
| 18 |
+
b = box.xyxy[0]
|
| 19 |
+
c = box.cls
|
| 20 |
+
annotator.box_label(b, model.names[int(c)], color=(0, 0, 255))
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
img = annotator.result()
|
| 24 |
+
cv2.imshow('YOLO V8 Detection', img)
|
| 25 |
+
cv2.waitKey(0)
|
video.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from ultralytics import YOLO
|
| 3 |
+
import cv2
|
| 4 |
+
import math
|
| 5 |
+
|
| 6 |
+
cap = cv2.VideoCapture('CityUtdR video.mp4')
|
| 7 |
+
|
| 8 |
+
if not cap.isOpened():
|
| 9 |
+
print("Error: Could not open video file.")
|
| 10 |
+
exit()
|
| 11 |
+
|
| 12 |
+
# model
|
| 13 |
+
model = YOLO('my_model.pt')
|
| 14 |
+
|
| 15 |
+
# object classes
|
| 16 |
+
classNames = ["ball", ]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
while True:
|
| 20 |
+
success, img = cap.read()
|
| 21 |
+
results = model(img, stream=True)
|
| 22 |
+
|
| 23 |
+
# coordinates
|
| 24 |
+
for r in results:
|
| 25 |
+
boxes = r.boxes
|
| 26 |
+
|
| 27 |
+
for box in boxes:
|
| 28 |
+
# bounding box
|
| 29 |
+
x1, y1, x2, y2 = box.xyxy[0]
|
| 30 |
+
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) # convert to int values
|
| 31 |
+
|
| 32 |
+
# put box in cam
|
| 33 |
+
cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
|
| 34 |
+
|
| 35 |
+
# object details
|
| 36 |
+
org = [x1, y1]
|
| 37 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
| 38 |
+
fontScale = 1
|
| 39 |
+
color = (255, 0, 0)
|
| 40 |
+
thickness = 2
|
| 41 |
+
|
| 42 |
+
cv2.putText(img, classNames[np.argmax(box.cls)], org, font, fontScale, color, thickness)
|
| 43 |
+
|
| 44 |
+
cv2.imshow('Output', img)
|
| 45 |
+
if cv2.waitKey(1) == ord('q'):
|
| 46 |
+
break
|
| 47 |
+
|
| 48 |
+
cap.release()
|
| 49 |
+
cv2.destroyAllWindows()
|
we_used.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
https://github.com/ultralytics/ultralytics/
|
| 2 |
+
https://www.kaggle.com/datasets/michaelmortenson/football-soccer-ball-detection-dfl
|