sino72 commited on
Commit
795ed6e
·
1 Parent(s): bbde013

replace sort to deepsort

Browse files
Files changed (1) hide show
  1. app.py +17 -32
app.py CHANGED
@@ -9,6 +9,7 @@ import tempfile #创建输出临时文件夹
9
  import os
10
  from detectMotion import * #单独的运动检测
11
  import time
 
12
 
13
 
14
  #导入YoloV8模型,模型名称存储在model_list.txt中
@@ -33,8 +34,6 @@ classNames=['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train'
33
  'scissors',
34
  'teddy bear', 'hair drier', 'toothbrush']
35
 
36
- #运动检测算法参数
37
- tracker=Sort(max_age=20,min_hits=3,iou_threshold=0.3)
38
 
39
  #实时显示人流量统计图表
40
  def plot_number():
@@ -44,11 +43,6 @@ def plot_number():
44
  plt.xlabel('Second')
45
  plt.ylabel('Traffic')
46
  plt.grid(linestyle='--', alpha=0.3, linewidth=2)
47
- #若函数在YOLO模型生成bounding box前运行,则返回空图表
48
- if number_of_people_in_one_frame_list == []:
49
- time.sleep(3) #延迟3秒
50
- plt.plot([])
51
- return fig
52
  plt.plot(number_of_people_in_one_frame_list)
53
  return fig
54
 
@@ -100,6 +94,7 @@ def processImg(img,sigma,median_filter,ISR):
100
  #视频处理
101
  def processVideo(inputPath, codec, model):
102
  global number_of_people_in_one_frame_list
 
103
  if inputPath == None:
104
  raise gr.Error("请先上传视频")
105
  model=YOLO(model)
@@ -138,7 +133,8 @@ def processVideo(inputPath, codec, model):
138
  while True:
139
  ret, img = cap.read()#将每一帧图片读取到img当中
140
  results=model(img,stream=True)#使用YoloV8模型进行推理
141
- detections=np.empty((0, 5))#初始化运动检测
 
142
  if not(ret):#当视频全部读完,ret返回false,终止循环,视频帧读取和写入结束
143
  break
144
  img, __, __= hisEqulColor(img)#视频增强
@@ -147,52 +143,40 @@ def processVideo(inputPath, codec, model):
147
  boxes=r.boxes
148
  for box in boxes:
149
  #读取每一帧识别出的边界信息,并显示
150
- x1,y1,x2,y2=box.xyxy[0]
151
  x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将tensor类型转变为整型
152
  conf=math.ceil(box.conf[0]*100)/100#对conf取2位小数
153
  cls=int(box.cls[0])#获取物体类别标签
154
- #当标签为人,且可信度大于0.3的时候,将人标识出来
155
- if cls==0 and conf > 0.3:
156
  number_of_people_in_one_frame += 1
157
- cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
158
- #print(conf)
159
- #cvzone.putTextRect(img,f'{classNames[cls]}{conf}',(max(0,x1),max(30,y1)),scale=0.7,thickness=1)
160
- currentArray=np.array([x1,y1,x2,y2,conf])
161
  detections=np.vstack((detections,currentArray))#按行堆叠数据
162
  sum_of_frame += 1
163
  if sum_of_frame % fps == 0:
164
  number_of_people_in_one_frame_list.append(number_of_people_in_one_frame)
165
  number_of_people_in_one_frame = 0
166
  #运动检测
167
- resultsTracker=tracker.update(detections)
168
  for result in resultsTracker:
169
  x1,y1,x2,y2,Id=result
170
  number_of_people=max(str(int(Id)),str(number_of_people))
171
  x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将浮点数转变为整型
172
- cvzone.putTextRect(img,f'{int(Id)}',(max(0,x1),max(30,y1)),scale=0.7,thickness=1)
 
173
  #image_np = np.squeeze(img.render())#用np.squeeze将输出结果降维
174
  output_viedo.write(img)#将处理后的图像写入视频
175
  output_viedo.release()#释放
176
  cap.release()#释放
177
- return video_save_path,video_save_path
178
 
179
 
180
-
181
- #WebUi图形界面(interface)
182
- #demo = gr.Interface(
183
- # fn=processVideo,
184
- # inputs=["text","text"],
185
- # outputs=["text"],
186
- # examples=[["D:\\[WPF]JJDown\\Download\\walker.mp4","C:\\Users\\sino\\Downloads\\output.mkv"],["C:\\Users\\sino\\Videos\\test.mp4","C:\\Users\\sino\\Downloads\\output.mkv"]],
187
- # title="运动检测与行人跟踪",
188
- # description="请输入绝对路径"
189
- #)
190
-
191
  #WebUi图形界面(block)
192
  with gr.Blocks() as demo:
193
  gr.Markdown("""
194
  # 运动检测与行人跟踪
195
- 基于opencv + yoloV8 + sort
196
  """)
197
  with gr.Tab("视频识别"):
198
  with gr.Row():
@@ -205,6 +189,7 @@ with gr.Blocks() as demo:
205
  with gr.Column():
206
  text_output = gr.Video()
207
  text_output_path = gr.Text(label="输出路径")
 
208
  with gr.Row():
209
  with gr.Column():
210
  figure_number_output = gr.Plot(label="人流量")
@@ -237,13 +222,13 @@ with gr.Blocks() as demo:
237
 
238
 
239
  videoProcess_button.click(processVideo, inputs=[text_inputPath, codec, model],
240
- outputs=[text_output,text_output_path])
241
  image_button.click(processImg, inputs=[image_input,image_sigma,median_filter,Add_ISR],
242
  outputs=[image_output,figure_pre_output,figure_post_output])
243
  motionProcess_button.click(motionDetection, inputs=[motion_inputPath],
244
  outputs=[motion_output_frame,motion_output_fmask,
245
  frame_output_path,fmask_output_path])
246
- videoProcess_button.click(plot_number,outputs=figure_number_output,every=1)
247
 
248
  demo.queue()#当有多个请求时,排队
249
  demo.launch()#生成内网链接,如需要公网链接,括号内输入share=True
 
9
  import os
10
  from detectMotion import * #单独的运动检测
11
  import time
12
+ import deep_sort.deep_sort.deep_sort as ds
13
 
14
 
15
  #导入YoloV8模型,模型名称存储在model_list.txt中
 
34
  'scissors',
35
  'teddy bear', 'hair drier', 'toothbrush']
36
 
 
 
37
 
38
  #实时显示人流量统计图表
39
  def plot_number():
 
43
  plt.xlabel('Second')
44
  plt.ylabel('Traffic')
45
  plt.grid(linestyle='--', alpha=0.3, linewidth=2)
 
 
 
 
 
46
  plt.plot(number_of_people_in_one_frame_list)
47
  return fig
48
 
 
94
  #视频处理
95
  def processVideo(inputPath, codec, model):
96
  global number_of_people_in_one_frame_list
97
+ tracker = ds.DeepSort('deep_sort/deep_sort/deep/checkpoint/ckpt.t7')
98
  if inputPath == None:
99
  raise gr.Error("请先上传视频")
100
  model=YOLO(model)
 
133
  while True:
134
  ret, img = cap.read()#将每一帧图片读取到img当中
135
  results=model(img,stream=True)#使用YoloV8模型进行推理
136
+ detections=np.empty((0, 4))#初始化运动检测
137
+ confarray = []
138
  if not(ret):#当视频全部读完,ret返回false,终止循环,视频帧读取和写入结束
139
  break
140
  img, __, __= hisEqulColor(img)#视频增强
 
143
  boxes=r.boxes
144
  for box in boxes:
145
  #读取每一帧识别出的边界信息,并显示
146
+ x1,y1,x2,y2=box.xywh[0]
147
  x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将tensor类型转变为整型
148
  conf=math.ceil(box.conf[0]*100)/100#对conf取2位小数
149
  cls=int(box.cls[0])#获取物体类别标签
150
+ #只检测人
151
+ if cls==0:
152
  number_of_people_in_one_frame += 1
153
+ currentArray=np.array([x1,y1,x2,y2])
154
+ confarray.append(conf)
 
 
155
  detections=np.vstack((detections,currentArray))#按行堆叠数据
156
  sum_of_frame += 1
157
  if sum_of_frame % fps == 0:
158
  number_of_people_in_one_frame_list.append(number_of_people_in_one_frame)
159
  number_of_people_in_one_frame = 0
160
  #运动检测
161
+ resultsTracker=tracker.update(detections, confarray, img)
162
  for result in resultsTracker:
163
  x1,y1,x2,y2,Id=result
164
  number_of_people=max(str(int(Id)),str(number_of_people))
165
  x1,y1,x2,y2=int(x1),int(y1),int(x2),int(y2)#将浮点数转变为整型
166
+ cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3)
167
+ cvzone.putTextRect(img,f'{int(Id)}',(max(-10,x1),max(40,y1)),scale=1.3,thickness=2)
168
  #image_np = np.squeeze(img.render())#用np.squeeze将输出结果降维
169
  output_viedo.write(img)#将处理后的图像写入视频
170
  output_viedo.release()#释放
171
  cap.release()#释放
172
+ return video_save_path,video_save_path,number_of_people
173
 
174
 
 
 
 
 
 
 
 
 
 
 
 
175
  #WebUi图形界面(block)
176
  with gr.Blocks() as demo:
177
  gr.Markdown("""
178
  # 运动检测与行人跟踪
179
+ 基于opencv + yoloV8 + deepsort
180
  """)
181
  with gr.Tab("视频识别"):
182
  with gr.Row():
 
189
  with gr.Column():
190
  text_output = gr.Video()
191
  text_output_path = gr.Text(label="输出路径")
192
+ total_ID = gr.Text(label="总人数")
193
  with gr.Row():
194
  with gr.Column():
195
  figure_number_output = gr.Plot(label="人流量")
 
222
 
223
 
224
  videoProcess_button.click(processVideo, inputs=[text_inputPath, codec, model],
225
+ outputs=[text_output,text_output_path, total_ID])
226
  image_button.click(processImg, inputs=[image_input,image_sigma,median_filter,Add_ISR],
227
  outputs=[image_output,figure_pre_output,figure_post_output])
228
  motionProcess_button.click(motionDetection, inputs=[motion_inputPath],
229
  outputs=[motion_output_frame,motion_output_fmask,
230
  frame_output_path,fmask_output_path])
231
+ videoProcess_button.click(plot_number,outputs=figure_number_output,every=2)
232
 
233
  demo.queue()#当有多个请求时,排队
234
  demo.launch()#生成内网链接,如需要公网链接,括号内输入share=True