Spaces:
Build error
Build error
Commit
·
8f7b62d
1
Parent(s):
70ea6df
changed objs_found structure
Browse files- app/api/routes.py +2 -4
- app/demo/routes.py +2 -4
- app/user/routes.py +4 -7
- face_detection/decode_yolo_v2.py +70 -29
- face_detection/helper.py +2 -2
- face_detection/inference.py +13 -9
- face_recognition/helper.py +7 -7
- test copy.py +6 -0
app/api/routes.py
CHANGED
|
@@ -176,9 +176,8 @@ def get_crops(username):
|
|
| 176 |
image=np.array(image)
|
| 177 |
print(image.shape)
|
| 178 |
|
| 179 |
-
|
| 180 |
# print(objs_found)
|
| 181 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 182 |
|
| 183 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 184 |
all_aligned_crops_base64=[]
|
|
@@ -225,8 +224,7 @@ def face_recognition(username):
|
|
| 225 |
print(faces[i],":",db_faces_features[i].shape)
|
| 226 |
|
| 227 |
|
| 228 |
-
|
| 229 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 230 |
h,w=image.shape[:2]
|
| 231 |
|
| 232 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
|
|
|
| 176 |
image=np.array(image)
|
| 177 |
print(image.shape)
|
| 178 |
|
| 179 |
+
objs_found=face_detector.predict(image)
|
| 180 |
# print(objs_found)
|
|
|
|
| 181 |
|
| 182 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 183 |
all_aligned_crops_base64=[]
|
|
|
|
| 224 |
print(faces[i],":",db_faces_features[i].shape)
|
| 225 |
|
| 226 |
|
| 227 |
+
objs_found=face_detector.predict(image)
|
|
|
|
| 228 |
h,w=image.shape[:2]
|
| 229 |
|
| 230 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
app/demo/routes.py
CHANGED
|
@@ -87,8 +87,7 @@ def set_crops():
|
|
| 87 |
face_detector.image_size=get_image_size(session["demo"]['settings']['db_mode'])
|
| 88 |
print(face_detector.image_size)
|
| 89 |
|
| 90 |
-
|
| 91 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 92 |
print(image.shape)
|
| 93 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 94 |
all_aligned_crops_base64=[]
|
|
@@ -156,8 +155,7 @@ def face_recognition():
|
|
| 156 |
|
| 157 |
face_detector.image_size=get_image_size(session["demo"]['settings']['fr_mode'])
|
| 158 |
|
| 159 |
-
|
| 160 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 161 |
h,w=image.shape[:2]
|
| 162 |
|
| 163 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
|
|
|
| 87 |
face_detector.image_size=get_image_size(session["demo"]['settings']['db_mode'])
|
| 88 |
print(face_detector.image_size)
|
| 89 |
|
| 90 |
+
objs_found=face_detector.predict(image)
|
|
|
|
| 91 |
print(image.shape)
|
| 92 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 93 |
all_aligned_crops_base64=[]
|
|
|
|
| 155 |
|
| 156 |
face_detector.image_size=get_image_size(session["demo"]['settings']['fr_mode'])
|
| 157 |
|
| 158 |
+
objs_found=face_detector.predict(image)
|
|
|
|
| 159 |
h,w=image.shape[:2]
|
| 160 |
|
| 161 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
app/user/routes.py
CHANGED
|
@@ -326,9 +326,7 @@ def get_crops():
|
|
| 326 |
print(image.shape)
|
| 327 |
# do your deep learning work
|
| 328 |
face_detector.image_size=get_image_size(settings['db_mode'])
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 332 |
print(image.shape)
|
| 333 |
|
| 334 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
|
@@ -417,15 +415,14 @@ def face_recognition():
|
|
| 417 |
# face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="avg",recognition_mode="repeat")
|
| 418 |
face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="best",recognition_mode="repeat")
|
| 419 |
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 423 |
h,w=image.shape[:2]
|
| 424 |
|
| 425 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
|
|
|
| 426 |
tree=face_recognizer.predict(image,tree)
|
|
|
|
| 427 |
pred_img=fr_helper.show_pred_image(tree,image)
|
| 428 |
-
|
| 429 |
pred_img=image_to_base64(pred_img)
|
| 430 |
|
| 431 |
objs_found=fr_helper.xml_to_objs_found(tree)
|
|
|
|
| 326 |
print(image.shape)
|
| 327 |
# do your deep learning work
|
| 328 |
face_detector.image_size=get_image_size(settings['db_mode'])
|
| 329 |
+
objs_found=face_detector.predict(image)
|
|
|
|
|
|
|
| 330 |
print(image.shape)
|
| 331 |
|
| 332 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
|
|
|
| 415 |
# face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="avg",recognition_mode="repeat")
|
| 416 |
face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="best",recognition_mode="repeat")
|
| 417 |
|
| 418 |
+
objs_found=face_detector.predict(image)
|
|
|
|
|
|
|
| 419 |
h,w=image.shape[:2]
|
| 420 |
|
| 421 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
| 422 |
+
|
| 423 |
tree=face_recognizer.predict(image,tree)
|
| 424 |
+
|
| 425 |
pred_img=fr_helper.show_pred_image(tree,image)
|
|
|
|
| 426 |
pred_img=image_to_base64(pred_img)
|
| 427 |
|
| 428 |
objs_found=fr_helper.xml_to_objs_found(tree)
|
face_detection/decode_yolo_v2.py
CHANGED
|
@@ -8,7 +8,7 @@ from face_detection.config import cell_size,idx_to_class,class_to_idx,class_colo
|
|
| 8 |
|
| 9 |
|
| 10 |
|
| 11 |
-
def get_objects(y_pred,p=0.5,decode_preds=True
|
| 12 |
global tf_anchors
|
| 13 |
output_size=y_pred.shape[1]
|
| 14 |
image_size=cell_size*output_size
|
|
@@ -42,46 +42,87 @@ def get_objects(y_pred,p=0.5,decode_preds=True,idx=None):
|
|
| 42 |
obj_name=idx_to_class[obj[4]]
|
| 43 |
|
| 44 |
|
| 45 |
-
obj_details={'p':prob,'xywh':list(obj[:-1]/output_size),'class_idx':int(obj[4]),'class':obj_name} # xywh are scaled 0 to 1
|
| 46 |
-
|
| 47 |
objs_found.append(obj_details)
|
| 48 |
-
return objs_found
|
| 49 |
|
| 50 |
def list_get_iou(bboxes1, bboxes2):
|
|
|
|
|
|
|
|
|
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
|
|
|
|
|
|
| 54 |
|
| 55 |
-
|
| 56 |
-
yA = max(bboxes1[1], bboxes2[1])
|
| 57 |
-
xB = min(bboxes1[2], bboxes2[2])
|
| 58 |
-
yB = min(bboxes1[3], bboxes2[3])
|
| 59 |
|
| 60 |
-
|
|
|
|
| 61 |
|
| 62 |
-
|
| 63 |
-
box2_area = (bboxes2[2] - bboxes2[0] ) * (bboxes2[3] - bboxes2[1] )
|
| 64 |
|
| 65 |
-
|
| 66 |
|
| 67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
def nms(objs_found,iou_threshold=0.2):
|
| 70 |
-
objs_found
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
best_boxes=[]
|
| 72 |
while len(objs_found)>0:
|
| 73 |
obj=objs_found[0]
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
delete_idx.append(b_idx)
|
| 83 |
-
objs_found=np.delete(objs_found,delete_idx)
|
| 84 |
-
best_boxes.append(obj)
|
| 85 |
return best_boxes
|
| 86 |
|
| 87 |
def show_objects(img,objs_found,return_img=False):
|
|
@@ -105,10 +146,10 @@ def pred_image(img,objs_found,font_scale=2,thickness=4):
|
|
| 105 |
return obj_found
|
| 106 |
|
| 107 |
for i in range(len(objs_found)):
|
| 108 |
-
p
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
|
| 113 |
img=cv2.rectangle(img,(int(obj[0]),int(obj[1])),(int(obj[0]+obj[2]),int(obj[1]+obj[3])),(class_colors[obj_name]*255),thickness)
|
| 114 |
img=cv2.putText(img,obj_name,(int(obj[0]),int(obj[1])),cv2.FONT_HERSHEY_SIMPLEX,font_scale, (0,0,0), thickness, lineType=cv2.LINE_AA)
|
|
|
|
| 8 |
|
| 9 |
|
| 10 |
|
| 11 |
+
def get_objects(y_pred,p=0.5,decode_preds=True):
|
| 12 |
global tf_anchors
|
| 13 |
output_size=y_pred.shape[1]
|
| 14 |
image_size=cell_size*output_size
|
|
|
|
| 42 |
obj_name=idx_to_class[obj[4]]
|
| 43 |
|
| 44 |
|
| 45 |
+
# obj_details={'p':prob,'xywh':list(obj[:-1]/output_size),'class_idx':int(obj[4]),'class':obj_name} # xywh are scaled 0 to 1
|
| 46 |
+
obj_details=[prob,obj[4],*list(obj[:-1]/output_size)] # xywh are scaled 0 to 1 [P,C_IDX,X,Y,W,H]
|
| 47 |
objs_found.append(obj_details)
|
| 48 |
+
return np.array(objs_found)
|
| 49 |
|
| 50 |
def list_get_iou(bboxes1, bboxes2):
|
| 51 |
+
# bboxes has xywh => xmin,ymin,width,height
|
| 52 |
+
bboxes1 = [bboxes1[0],bboxes1[1],bboxes1[0]+bboxes1[2],bboxes1[1]+bboxes1[3]]
|
| 53 |
+
bboxes2 = [bboxes2[0],bboxes2[1],bboxes2[0]+bboxes2[2],bboxes2[1]+bboxes2[3]]
|
| 54 |
|
| 55 |
+
xA = max(bboxes1[0], bboxes2[0])
|
| 56 |
+
yA = max(bboxes1[1], bboxes2[1])
|
| 57 |
+
xB = min(bboxes1[2], bboxes2[2])
|
| 58 |
+
yB = min(bboxes1[3], bboxes2[3])
|
| 59 |
|
| 60 |
+
intersection_area = max(0, xB - xA ) * max(0, yB - yA )
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
+
box1_area = (bboxes1[2] - bboxes1[0] ) * (bboxes1[3] - bboxes1[1] )
|
| 63 |
+
box2_area = (bboxes2[2] - bboxes2[0] ) * (bboxes2[3] - bboxes2[1] )
|
| 64 |
|
| 65 |
+
iou = intersection_area / float(box1_area + box2_area - intersection_area+1e-6)
|
|
|
|
| 66 |
|
| 67 |
+
return iou
|
| 68 |
|
| 69 |
+
def np_iou(bboxes1,bboxes2):
|
| 70 |
+
# bboxes has xywh => xmin,ymin,width,height
|
| 71 |
+
|
| 72 |
+
boxes1_x1 = bboxes1[:,0]
|
| 73 |
+
boxes1_y1 = bboxes1[:,1]
|
| 74 |
+
boxes1_x2 = boxes1_x1 + bboxes1[:,2]
|
| 75 |
+
boxes1_y2 = boxes1_y1 + bboxes1[:,3]
|
| 76 |
+
|
| 77 |
+
boxes2_x1 = bboxes2[:,0]
|
| 78 |
+
boxes2_y1 = bboxes2[:,1]
|
| 79 |
+
boxes2_x2 = boxes2_x1 + bboxes2[:,2]
|
| 80 |
+
boxes2_y2 = boxes2_y1 + bboxes2[:,3]
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
xmins = np.maximum(boxes1_x1,boxes2_x1)
|
| 84 |
+
ymins = np.maximum(boxes1_y1,boxes2_y1)
|
| 85 |
+
|
| 86 |
+
xmaxs = np.minimum(boxes1_x2,boxes2_x2)
|
| 87 |
+
ymaxs = np.minimum(boxes1_y2,boxes2_y2)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
intersection = np.clip((xmaxs-xmins),0,None)*np.clip((ymaxs-ymins),0,None)
|
| 92 |
+
|
| 93 |
+
union = (boxes1_x2-boxes1_x1)*(boxes1_y2-boxes1_y1) + (boxes2_x2-boxes2_x1)*(boxes2_y2-boxes2_y1)
|
| 94 |
+
ious=intersection/((union-intersection)+1e-6)
|
| 95 |
+
|
| 96 |
+
return ious
|
| 97 |
|
| 98 |
def nms(objs_found,iou_threshold=0.2):
|
| 99 |
+
'''objs_found list of list:[
|
| 100 |
+
[p,c_idx,x,y,w,h],
|
| 101 |
+
[p,c_idx,x,y,w,h]
|
| 102 |
+
]
|
| 103 |
+
'''
|
| 104 |
+
if objs_found.size<2 or iou_threshold==1: return objs_found
|
| 105 |
+
|
| 106 |
+
objs_found=objs_found[np.argsort(objs_found[:,0])[::-1] ]# This was very important
|
| 107 |
+
|
| 108 |
best_boxes=[]
|
| 109 |
while len(objs_found)>0:
|
| 110 |
obj=objs_found[0]
|
| 111 |
+
best_boxes.append(list(obj))
|
| 112 |
+
objs_found=objs_found[1:].reshape(-1,6)
|
| 113 |
+
|
| 114 |
+
if len(objs_found)>0:
|
| 115 |
|
| 116 |
+
same_class_idxs=np.where(objs_found[:,1]==obj[1])[0] # same class_idx
|
| 117 |
+
same_class_objs=objs_found[same_class_idxs].reshape(-1,6)
|
| 118 |
+
|
| 119 |
+
ious=np_iou(obj[None,2:],same_class_objs[:,2:])
|
| 120 |
+
|
| 121 |
+
delete_idxs=same_class_idxs[np.where(ious>= iou_threshold)[0]]
|
| 122 |
|
| 123 |
+
objs_found=np.delete(objs_found,delete_idxs,axis=0)
|
| 124 |
+
|
| 125 |
+
|
|
|
|
|
|
|
|
|
|
| 126 |
return best_boxes
|
| 127 |
|
| 128 |
def show_objects(img,objs_found,return_img=False):
|
|
|
|
| 146 |
return obj_found
|
| 147 |
|
| 148 |
for i in range(len(objs_found)):
|
| 149 |
+
# p,c_idx,x,y,w,h
|
| 150 |
+
p=objs_found[i][0]
|
| 151 |
+
obj_name=objs_found[i][1]
|
| 152 |
+
obj=rescale(objs_found[i][2:],img.shape[1],img.shape[0])
|
| 153 |
|
| 154 |
img=cv2.rectangle(img,(int(obj[0]),int(obj[1])),(int(obj[0]+obj[2]),int(obj[1]+obj[3])),(class_colors[obj_name]*255),thickness)
|
| 155 |
img=cv2.putText(img,obj_name,(int(obj[0]),int(obj[1])),cv2.FONT_HERSHEY_SIMPLEX,font_scale, (0,0,0), thickness, lineType=cv2.LINE_AA)
|
face_detection/helper.py
CHANGED
|
@@ -9,8 +9,8 @@ def get_crops(img,objs_found,aligner=None,resize:tuple=None):
|
|
| 9 |
img_h,img_w,_=img.shape
|
| 10 |
all_crops=[]
|
| 11 |
for obj_found in objs_found:
|
| 12 |
-
xmin,ymin=obj_found[
|
| 13 |
-
xmax,ymax=xmin+obj_found[
|
| 14 |
# rescale them
|
| 15 |
xmin,ymin=int(xmin*img_w),int(ymin*img_h)
|
| 16 |
xmax,ymax=int(xmax*img_w),int(ymax*img_h)
|
|
|
|
| 9 |
img_h,img_w,_=img.shape
|
| 10 |
all_crops=[]
|
| 11 |
for obj_found in objs_found:
|
| 12 |
+
xmin,ymin=obj_found[2],obj_found[3]
|
| 13 |
+
xmax,ymax=xmin+obj_found[4],ymin+obj_found[5]
|
| 14 |
# rescale them
|
| 15 |
xmin,ymin=int(xmin*img_w),int(ymin*img_h)
|
| 16 |
xmax,ymax=int(xmax*img_w),int(ymax*img_h)
|
face_detection/inference.py
CHANGED
|
@@ -75,10 +75,10 @@ class square_pad:
|
|
| 75 |
def rescale(self,objs_found):
|
| 76 |
|
| 77 |
for i in range(len(objs_found)):
|
| 78 |
-
objs_found[i][
|
| 79 |
-
objs_found[i][
|
| 80 |
-
objs_found[i][
|
| 81 |
-
objs_found[i][
|
| 82 |
return objs_found
|
| 83 |
|
| 84 |
|
|
@@ -208,7 +208,7 @@ class face_detection:
|
|
| 208 |
resized_img=cv2.resize(img,[self.image_size,self.image_size])
|
| 209 |
objs_found=self.invoke_model(resized_img[None,:,:,:],self.p_thres,self.nms_thres,batch_size=1)[0]
|
| 210 |
|
| 211 |
-
return
|
| 212 |
|
| 213 |
def predict(self,img):
|
| 214 |
|
|
@@ -235,14 +235,18 @@ class face_detection:
|
|
| 235 |
all_image_size=copy.deepcopy(self.image_size)
|
| 236 |
for image_size in all_image_size:
|
| 237 |
self.image_size=image_size
|
| 238 |
-
|
| 239 |
all_objs_found.extend(objs_found)
|
| 240 |
self.image_size=all_image_size
|
| 241 |
|
| 242 |
-
all_objs_found=
|
|
|
|
| 243 |
all_objs_found=nms(all_objs_found,self.nms_thres)
|
| 244 |
-
|
| 245 |
-
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
|
| 248 |
|
|
|
|
| 75 |
def rescale(self,objs_found):
|
| 76 |
|
| 77 |
for i in range(len(objs_found)):
|
| 78 |
+
objs_found[i][2]=(objs_found[i][2]-self.w_added)/(1-2*self.w_added)
|
| 79 |
+
objs_found[i][3]=(objs_found[i][3]-self.h_added)/(1-2*self.h_added)
|
| 80 |
+
objs_found[i][4]=(objs_found[i][4])/(1-2*self.w_added)
|
| 81 |
+
objs_found[i][5]=(objs_found[i][5])/(1-2*self.h_added)
|
| 82 |
return objs_found
|
| 83 |
|
| 84 |
|
|
|
|
| 208 |
resized_img=cv2.resize(img,[self.image_size,self.image_size])
|
| 209 |
objs_found=self.invoke_model(resized_img[None,:,:,:],self.p_thres,self.nms_thres,batch_size=1)[0]
|
| 210 |
|
| 211 |
+
return objs_found
|
| 212 |
|
| 213 |
def predict(self,img):
|
| 214 |
|
|
|
|
| 235 |
all_image_size=copy.deepcopy(self.image_size)
|
| 236 |
for image_size in all_image_size:
|
| 237 |
self.image_size=image_size
|
| 238 |
+
objs_found=self.predict_once(img)
|
| 239 |
all_objs_found.extend(objs_found)
|
| 240 |
self.image_size=all_image_size
|
| 241 |
|
| 242 |
+
all_objs_found=np.array(all_objs_found)
|
| 243 |
+
|
| 244 |
all_objs_found=nms(all_objs_found,self.nms_thres)
|
| 245 |
+
all_objs_found=self.square_preprocessing.rescale(all_objs_found) #rescale coordinates to original image's resolution
|
| 246 |
+
for obj_found in all_objs_found: obj_found[1]=idx_to_class[obj_found[1]]
|
| 247 |
+
# print(all_objs_found)
|
| 248 |
+
|
| 249 |
+
return all_objs_found
|
| 250 |
|
| 251 |
|
| 252 |
|
face_recognition/helper.py
CHANGED
|
@@ -16,7 +16,7 @@ def show_pred_image(tree,img):
|
|
| 16 |
|
| 17 |
xmin,ymin , xmax,ymax=int(bndbox.find('xmin').text),int(bndbox.find('ymin').text),int(bndbox.find('xmax').text),int(bndbox.find('ymax').text)
|
| 18 |
|
| 19 |
-
if
|
| 20 |
color=default_color
|
| 21 |
else:
|
| 22 |
color=random_color
|
|
@@ -89,22 +89,22 @@ def objs_found_to_xml(test_img,w,h,objs_found):
|
|
| 89 |
|
| 90 |
# add all objects
|
| 91 |
for obj_found in objs_found:
|
| 92 |
-
obj_found[
|
| 93 |
|
| 94 |
obj_tag=ET.Element("object")
|
| 95 |
name_tag=ET.Element("name")
|
| 96 |
-
name_tag.text=obj_found[
|
| 97 |
obj_tag.append(name_tag)
|
| 98 |
|
| 99 |
bndbox_tag=ET.Element("bndbox")
|
| 100 |
xmin_tag=ET.Element("xmin")
|
| 101 |
-
xmin_tag.text=str(int(obj_found[
|
| 102 |
ymin_tag=ET.Element("ymin")
|
| 103 |
-
ymin_tag.text=str(int(obj_found[
|
| 104 |
xmax_tag=ET.Element("xmax")
|
| 105 |
-
xmax_tag.text=str(int(obj_found[
|
| 106 |
ymax_tag=ET.Element("ymax")
|
| 107 |
-
ymax_tag.text=str(int(obj_found[
|
| 108 |
|
| 109 |
bndbox_tag.append(xmin_tag)
|
| 110 |
bndbox_tag.append(ymin_tag)
|
|
|
|
| 16 |
|
| 17 |
xmin,ymin , xmax,ymax=int(bndbox.find('xmin').text),int(bndbox.find('ymin').text),int(bndbox.find('xmax').text),int(bndbox.find('ymax').text)
|
| 18 |
|
| 19 |
+
if obj.find("distance") is None:
|
| 20 |
color=default_color
|
| 21 |
else:
|
| 22 |
color=random_color
|
|
|
|
| 89 |
|
| 90 |
# add all objects
|
| 91 |
for obj_found in objs_found:
|
| 92 |
+
obj_found[2:]=rescale(obj_found[2:],w,h)
|
| 93 |
|
| 94 |
obj_tag=ET.Element("object")
|
| 95 |
name_tag=ET.Element("name")
|
| 96 |
+
name_tag.text=obj_found[1]
|
| 97 |
obj_tag.append(name_tag)
|
| 98 |
|
| 99 |
bndbox_tag=ET.Element("bndbox")
|
| 100 |
xmin_tag=ET.Element("xmin")
|
| 101 |
+
xmin_tag.text=str(int(obj_found[2]))
|
| 102 |
ymin_tag=ET.Element("ymin")
|
| 103 |
+
ymin_tag.text=str(int(obj_found[3]))
|
| 104 |
xmax_tag=ET.Element("xmax")
|
| 105 |
+
xmax_tag.text=str(int(obj_found[2]+obj_found[4]))
|
| 106 |
ymax_tag=ET.Element("ymax")
|
| 107 |
+
ymax_tag.text=str(int(obj_found[3]+obj_found[5]))
|
| 108 |
|
| 109 |
bndbox_tag.append(xmin_tag)
|
| 110 |
bndbox_tag.append(ymin_tag)
|
test copy.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
import face_detection as fd
|
| 4 |
+
|
| 5 |
+
fd
|
| 6 |
+
|