Spaces:
Build error
Build error
RyanPham19092002 commited on
Commit ·
9a80b36
1
Parent(s): b822a6f
Add application file
Browse files- app.py +120 -0
- yolov5s.pt +3 -0
app.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
import os
|
| 4 |
+
import requests
|
| 5 |
+
import json
|
| 6 |
+
import cv2
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from timeit import default_timer as timer
|
| 9 |
+
import pathlib
|
| 10 |
+
import platform
|
| 11 |
+
import numpy as np
|
| 12 |
+
model = torch.hub.load('ultralytics/yolov5','yolov5s', pretrained=True)
|
| 13 |
+
cnt = 0
|
| 14 |
+
|
| 15 |
+
def LCR(bbox,x_img, y_img):
|
| 16 |
+
x1 = bbox[0]/x_img
|
| 17 |
+
x2 = bbox[2]/x_img
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
if x1 < 0.2 and x2 < 0.2 :
|
| 21 |
+
location = "Left"
|
| 22 |
+
elif x1 > 0.8 and x2 > 0.8:
|
| 23 |
+
location = "Right"
|
| 24 |
+
elif x1 < 0.2 and (x2 <= 0.8 and x2 >= 0.2):
|
| 25 |
+
if (x1 + x2) < 0.4:
|
| 26 |
+
location = "Left"
|
| 27 |
+
else:
|
| 28 |
+
location = "Center"
|
| 29 |
+
elif x2 > 0.8 and (x1 <= 0.8 and x1 >= 0.2):
|
| 30 |
+
if (x1 + x2) > 1.6:
|
| 31 |
+
location = "Right"
|
| 32 |
+
else:
|
| 33 |
+
location = "Center"
|
| 34 |
+
else:
|
| 35 |
+
location = "Center"
|
| 36 |
+
print(f"x1 {x1} x2 {x2} bbox0 {bbox[0]} bbox2 {bbox[2]} x_img {x_img} LocationLCR {location}")
|
| 37 |
+
return location
|
| 38 |
+
|
| 39 |
+
def ACB(bbox, x_img, y_img, location):
|
| 40 |
+
y1 = bbox[1]/y_img
|
| 41 |
+
y2 = bbox[3]/y_img
|
| 42 |
+
if location == "Center":
|
| 43 |
+
if y1 < 0.33333 and y2 < 0.33333 :
|
| 44 |
+
location = "Above"
|
| 45 |
+
elif y1 > 0.66667 and y2 > 0.66667:
|
| 46 |
+
location = "Below"
|
| 47 |
+
elif y1 < 0.33333 and (y2 <= 0.66667 and y2 >= 0.33333):
|
| 48 |
+
if (y1 + y2) < 0.66667:
|
| 49 |
+
location = "Above"
|
| 50 |
+
else:
|
| 51 |
+
location = "Center"
|
| 52 |
+
elif y2 > 0.66667 and (y1 <= 0.66667 and y1 >= 0.33333):
|
| 53 |
+
if (y1 + y2) > 1.33333:
|
| 54 |
+
location = "Below"
|
| 55 |
+
else:
|
| 56 |
+
location = "Center"
|
| 57 |
+
else:
|
| 58 |
+
location = "Center"
|
| 59 |
+
else:
|
| 60 |
+
pass
|
| 61 |
+
print(f"y1 {y1} y2 {y2} bbox1 {bbox[1]} bbox3 {bbox[3]} y_img {y_img} Location{location}")
|
| 62 |
+
|
| 63 |
+
return location
|
| 64 |
+
#print(bbox[0])
|
| 65 |
+
|
| 66 |
+
def turn_img_into_fileJSON(frame):
|
| 67 |
+
start_time = timer()
|
| 68 |
+
x_img, y_img = frame.size
|
| 69 |
+
print(x_img,y_img)
|
| 70 |
+
global cnt
|
| 71 |
+
objects = []
|
| 72 |
+
|
| 73 |
+
prediction = model(frame)
|
| 74 |
+
for det in prediction.xyxy[0]:
|
| 75 |
+
class_id = int(det[5])
|
| 76 |
+
class_name = model.names[class_id]
|
| 77 |
+
confidence = float(det[4])
|
| 78 |
+
bbox = det[:4].tolist()
|
| 79 |
+
if(confidence >= 0.5):
|
| 80 |
+
location = LCR(bbox, x_img, y_img)
|
| 81 |
+
location = ACB(bbox, x_img, y_img, location)
|
| 82 |
+
# Save the results to the list
|
| 83 |
+
objects.append({
|
| 84 |
+
'Class': class_name,
|
| 85 |
+
#'BoundingBox': bbox,
|
| 86 |
+
'Location': location,
|
| 87 |
+
'Confidence': confidence
|
| 88 |
+
})
|
| 89 |
+
with open('{:05d}.json'.format(cnt) , 'w') as f:
|
| 90 |
+
json.dump(objects, f)
|
| 91 |
+
cnt += 1
|
| 92 |
+
pred_time = round(timer() - start_time, 5)
|
| 93 |
+
json_str = json.dumps(objects)
|
| 94 |
+
return json_str, pred_time
|
| 95 |
+
#path = [["D:/cuoc_thi/object-detection/download.jpg"],["C:/Users/ACER/Pictures/mydestiny/273536337_788402492117531_8798195010554693138_n.jpg"]]
|
| 96 |
+
title = "Object-detection"
|
| 97 |
+
description = "An EfficientNetB2 feature extractor computer vision model to classify images of object."
|
| 98 |
+
article = "Created by Ryan"
|
| 99 |
+
|
| 100 |
+
# json_str1, pred_time1 = turn_img_into_fileJSON("C:/Users/ACER/Pictures/mydestiny/273536337_788402492117531_8798195010554693138_n.jpg")
|
| 101 |
+
# print(json_str1, pred_time1)
|
| 102 |
+
|
| 103 |
+
# json_str, pred_time = turn_img_into_fileJSON("D:/cuoc_thi/object-detection/download.jpg")
|
| 104 |
+
# print(json_str, pred_time)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# Create the Gradio demo
|
| 108 |
+
demo = gr.Interface(fn=turn_img_into_fileJSON, # mapping function from input to output
|
| 109 |
+
inputs="pil", # what are the inputs?
|
| 110 |
+
outputs=[gr.JSON(label="JSON Output"),
|
| 111 |
+
#gr.Label(num_top_classes=80, label="Predictions"),
|
| 112 |
+
gr.Number(label="Prediction time (s)")],
|
| 113 |
+
#gr.outputs.Label(num_top_classes= 80),
|
| 114 |
+
#examples=path,
|
| 115 |
+
title=title,
|
| 116 |
+
description=description,
|
| 117 |
+
article=article,
|
| 118 |
+
live = True)
|
| 119 |
+
demo.launch()
|
| 120 |
+
#demo.launch(share=True)
|
yolov5s.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b3b748c1e592ddd8868022e8732fde20025197328490623cc16c6f24d0782ee
|
| 3 |
+
size 14808437
|