remotewith commited on
Commit
f894347
·
1 Parent(s): 7dff8d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +173 -2
app.py CHANGED
@@ -1,12 +1,50 @@
1
  ### 1. Imports and class names setup ###
2
  import gradio as gr
3
  import os
 
4
  import torch
 
 
 
 
 
 
 
5
 
6
  from model import create_effnetb2_model
7
  from timeit import default_timer as timer
8
  from typing import Tuple, Dict
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Setup class names
11
  class_names = ["hat","nohat"]
12
 
@@ -25,6 +63,54 @@ effnetb2.load_state_dict(
25
  )
26
  )
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  ### 3. Predict function ###
29
 
30
  # Create predict function
@@ -52,6 +138,57 @@ def predict(img) -> Tuple[Dict, float]:
52
  # Return the prediction dictionary and prediction time
53
  return pred_labels_and_probs, pred_time
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  ### 4. Gradio app ###
56
 
57
  # Create title, description and article strings
@@ -61,15 +198,49 @@ article = "(https://www.learnpytorch.io/)."
61
 
62
  # Create examples list from "examples/" directory
63
  #example_list = [["examples/" + example] for example in os.listdir("examples")]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
  # Create the Gradio demo
66
- demo = gr.Interface(fn=predict, # mapping function from input to output
67
  inputs=gr.Image(type="pil"), # what are the inputs?
68
  outputs=[gr.Label(num_top_classes=2, label="Predictions"), # what are the outputs?
69
- gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
 
 
 
70
  title=title,
71
  description=description,
72
  article=article)
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  # Launch the demo!
75
  demo.launch()
 
1
  ### 1. Imports and class names setup ###
2
  import gradio as gr
3
  import os
4
+ import requests
5
  import torch
6
+ import numpy as np
7
+ from roboflow import Roboflow
8
+ import cv2
9
+
10
+ rf = Roboflow(api_key="gjZE3lykkitagkxHplyJ")
11
+ project = rf.workspace().project("hard-hat-sample-gqvqs")
12
+ model = project.version(2).model
13
 
14
  from model import create_effnetb2_model
15
  from timeit import default_timer as timer
16
  from typing import Tuple, Dict
17
 
18
+
19
+
20
+ file_urls = [
21
+ 'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
22
+ ]
23
+
24
+ def download_file(url, save_name):
25
+ url = url
26
+ if not os.path.exists(save_name):
27
+ file = requests.get(url)
28
+ open(save_name, 'wb').write(file.content)
29
+
30
+ for i, url in enumerate(file_urls):
31
+ if 'mp4' in file_urls[i]:
32
+ download_file(
33
+ file_urls[i],
34
+ f"video.mp4"
35
+ )
36
+ else:
37
+ download_file(
38
+ file_urls[i],
39
+ f"image_{i}.jpg"
40
+ )
41
+
42
+
43
+ video_path = [['video.mp4']]
44
+
45
+
46
+
47
+
48
  # Setup class names
49
  class_names = ["hat","nohat"]
50
 
 
63
  )
64
  )
65
 
66
+
67
+
68
+
69
+ def detect(imagepath):
70
+
71
+ pix=model.predict(imagepath, confidence=40, overlap=30)
72
+ pix=pix.json()
73
+ img=cv2.imread(imagepath)
74
+
75
+ x1,x2,y1,y2=[],[],[],[]
76
+ for i in pix.keys():
77
+ if i=="predictions":
78
+ for j in pix["predictions"]:
79
+ for a,b in j.items():
80
+ if a=="x":
81
+ x1.append(b)
82
+ if a=="y":
83
+ y1.append(b)
84
+ if a=="width":
85
+ x2.append(b)
86
+ if a=="height":
87
+ y2.append(b)
88
+
89
+
90
+
91
+ for p in range(0,len(x1)):
92
+ x2[p]=x2[p]+x1[p]
93
+
94
+ for p in range(0,len(x1)):
95
+ y2[p]=y2[p]+x1[p]
96
+
97
+ for (x11,y11,x12,y12) in zip(x1,y1,x2,y2):
98
+ cv2.rectangle(
99
+ img,
100
+ (x11,y11),
101
+ (x12,y12),
102
+ color=(0, 0, 255),
103
+ thickness=2,
104
+ lineType=cv2.LINE_AA
105
+ )
106
+
107
+ return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
108
+
109
+
110
+
111
+
112
+
113
+
114
  ### 3. Predict function ###
115
 
116
  # Create predict function
 
138
  # Return the prediction dictionary and prediction time
139
  return pred_labels_and_probs, pred_time
140
 
141
+
142
+
143
+ def show_preds_video(video_path):
144
+ cap = cv2.VideoCapture(video_path)
145
+ while(cap.isOpened()):
146
+ ret, frame = cap.read()
147
+ if ret:
148
+ frame_copy = frame.copy()
149
+ pix=model.predict(frame, confidence=40, overlap=30)
150
+ pix=pix.json()
151
+ x1,x2,y1,y2=[],[],[],[]
152
+ for i in pix.keys():
153
+ if i=="predictions":
154
+ for j in pix["predictions"]:
155
+ for a,b in j.items():
156
+ if a=="x":
157
+ x1.append(b)
158
+ if a=="y":
159
+ y1.append(b)
160
+ if a=="width":
161
+ x2.append(b)
162
+ if a=="height":
163
+ y2.append(b)
164
+
165
+
166
+
167
+ for p in range(0,len(x1)):
168
+ x2[p]=x2[p]+x1[p]
169
+
170
+ for p in range(0,len(x1)):
171
+ y2[p]=y2[p]+x1[p]
172
+
173
+ for (x11,y11,x12,y12) in zip(x1,y1,x2,y2):
174
+ cv2.rectangle(
175
+ img,
176
+ (x11,y11),
177
+ (x12,y12),
178
+ color=(0, 0, 255),
179
+ thickness=2,
180
+ lineType=cv2.LINE_AA
181
+ )
182
+
183
+
184
+ yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
  ### 4. Gradio app ###
193
 
194
  # Create title, description and article strings
 
198
 
199
  # Create examples list from "examples/" directory
200
  #example_list = [["examples/" + example] for example in os.listdir("examples")]
201
+ inputs_image = [
202
+ gr.components.Image(type="filepath", label="Input Image"),
203
+ ]
204
+
205
+ outputs_image = [
206
+ gr.components.Image(type="numpy", label="Output Image"),
207
+ ]
208
+
209
+
210
+ inputs_video = [
211
+ gr.components.Video(type="filepath", label="Input Video"),
212
+
213
+ ]
214
+ outputs_video = [
215
+ gr.components.Image(type="numpy", label="Output Image"),
216
+ ]
217
+
218
 
219
  # Create the Gradio demo
220
+ app1 = gr.Interface(fn=predict, # mapping function from input to output
221
  inputs=gr.Image(type="pil"), # what are the inputs?
222
  outputs=[gr.Label(num_top_classes=2, label="Predictions"), # what are the outputs?
223
+ gr.Number(label="Prediction time (s)")
224
+ ], # our fn has two outputs, therefore we have two outputs
225
+ # Create examples list from "examples/" directory
226
+ examples=example_list,
227
  title=title,
228
  description=description,
229
  article=article)
230
 
231
+ app2=gr.Interface(fn=detect,
232
+ inputs=inputs_image,
233
+ outputs=outputs_image,
234
+ title=title)
235
+ app3=gr.Interface(
236
+ fn=show_preds_video,
237
+ inputs=inputs_video,
238
+ outputs=outputs_video,
239
+ examples=video_path,
240
+ cache_examples=False,
241
+ )
242
+
243
+ demo = gr.TabbedInterface([app1, app2,app3], ["Classify", "Detect","Video Interface"])
244
+
245
  # Launch the demo!
246
  demo.launch()