Spaces:
Build error
Build error
Commit
·
72de35d
1
Parent(s):
1bc858f
square_preprocessing.rescale applied everywhere
Browse files- app/api/routes.py +3 -2
- app/demo/routes.py +10 -7
- app/helper.py +1 -1
- app/static/demo/index/script.js +3 -3
- app/static/user/dashboard.js +2 -2
- app/user/routes.py +14 -6
app/api/routes.py
CHANGED
|
@@ -176,8 +176,9 @@ def get_crops(username):
|
|
| 176 |
image=np.array(image)
|
| 177 |
print(image.shape)
|
| 178 |
|
| 179 |
-
|
| 180 |
-
print(objs_found)
|
|
|
|
| 181 |
|
| 182 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 183 |
all_aligned_crops_base64=[]
|
|
|
|
| 176 |
image=np.array(image)
|
| 177 |
print(image.shape)
|
| 178 |
|
| 179 |
+
_,objs_found=face_detector.predict(image)
|
| 180 |
+
# print(objs_found)
|
| 181 |
+
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 182 |
|
| 183 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 184 |
all_aligned_crops_base64=[]
|
app/demo/routes.py
CHANGED
|
@@ -87,9 +87,9 @@ def set_crops():
|
|
| 87 |
face_detector.image_size=get_image_size(session["demo"]['settings']['db_mode'])
|
| 88 |
print(face_detector.image_size)
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 94 |
all_aligned_crops_base64=[]
|
| 95 |
all_aligned_crops_names=[]
|
|
@@ -156,11 +156,14 @@ def face_recognition():
|
|
| 156 |
|
| 157 |
face_detector.image_size=get_image_size(session["demo"]['settings']['fr_mode'])
|
| 158 |
|
| 159 |
-
|
| 160 |
-
|
|
|
|
|
|
|
|
|
|
| 161 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
| 162 |
-
tree=face_recognizer.predict(
|
| 163 |
-
pred_img=fr_helper.show_pred_image(tree,
|
| 164 |
|
| 165 |
pred_img=helper.image_to_base64(pred_img)
|
| 166 |
|
|
|
|
| 87 |
face_detector.image_size=get_image_size(session["demo"]['settings']['db_mode'])
|
| 88 |
print(face_detector.image_size)
|
| 89 |
|
| 90 |
+
_,objs_found=face_detector.predict(image)
|
| 91 |
+
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 92 |
+
print(image.shape)
|
| 93 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 94 |
all_aligned_crops_base64=[]
|
| 95 |
all_aligned_crops_names=[]
|
|
|
|
| 156 |
|
| 157 |
face_detector.image_size=get_image_size(session["demo"]['settings']['fr_mode'])
|
| 158 |
|
| 159 |
+
_,objs_found=face_detector.predict(image)
|
| 160 |
+
|
| 161 |
+
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 162 |
+
h,w=image.shape[:2]
|
| 163 |
+
|
| 164 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
| 165 |
+
tree=face_recognizer.predict(image,tree)
|
| 166 |
+
pred_img=fr_helper.show_pred_image(tree,image)
|
| 167 |
|
| 168 |
pred_img=helper.image_to_base64(pred_img)
|
| 169 |
|
app/helper.py
CHANGED
|
@@ -13,7 +13,7 @@ def image_to_base64(img):
|
|
| 13 |
rawBytes=io.BytesIO()
|
| 14 |
img.save(rawBytes,"JPEG")
|
| 15 |
rawBytes.seek(0)
|
| 16 |
-
img_base64=
|
| 17 |
return img_base64
|
| 18 |
|
| 19 |
def base64_to_image(img_base64):
|
|
|
|
| 13 |
rawBytes=io.BytesIO()
|
| 14 |
img.save(rawBytes,"JPEG")
|
| 15 |
rawBytes.seek(0)
|
| 16 |
+
img_base64=base64.b64encode(rawBytes.read()).decode()
|
| 17 |
return img_base64
|
| 18 |
|
| 19 |
def base64_to_image(img_base64):
|
app/static/demo/index/script.js
CHANGED
|
@@ -119,7 +119,7 @@ database_input.addEventListener("change", function(e){
|
|
| 119 |
var img_container_tag=document.createElement("div");
|
| 120 |
var img_remove_tag=document.createElement("p");
|
| 121 |
var img_tag=document.createElement("img");
|
| 122 |
-
img_tag.src="data:image/jpeg;base64,"+response['image']
|
| 123 |
img_tag.setAttribute("class","db_image");
|
| 124 |
img_remove_tag.setAttribute("class","close_text");
|
| 125 |
img_remove_tag.innerHTML="✖";
|
|
@@ -165,7 +165,7 @@ database_input.addEventListener("change", function(e){
|
|
| 165 |
{
|
| 166 |
var crop_container=document.querySelector("#unassigned_faces");
|
| 167 |
var crop_img=document.createElement("img");
|
| 168 |
-
crop_img.src="data:image/jpeg;base64,"+response["crops"][i]
|
| 169 |
crop_img.dataset.image_name=response['image_name'];
|
| 170 |
|
| 171 |
crop_img.setAttribute("class","crop_img");
|
|
@@ -406,7 +406,7 @@ function face_recognition(elem)
|
|
| 406 |
}).then(function(response){
|
| 407 |
hide_loading_bar();
|
| 408 |
console.log(response);
|
| 409 |
-
document.querySelector("#face_rec_image").src="data:image/jpeg;base64,"+response['image']
|
| 410 |
})
|
| 411 |
|
| 412 |
|
|
|
|
| 119 |
var img_container_tag=document.createElement("div");
|
| 120 |
var img_remove_tag=document.createElement("p");
|
| 121 |
var img_tag=document.createElement("img");
|
| 122 |
+
img_tag.src="data:image/jpeg;base64,"+response['image'];
|
| 123 |
img_tag.setAttribute("class","db_image");
|
| 124 |
img_remove_tag.setAttribute("class","close_text");
|
| 125 |
img_remove_tag.innerHTML="✖";
|
|
|
|
| 165 |
{
|
| 166 |
var crop_container=document.querySelector("#unassigned_faces");
|
| 167 |
var crop_img=document.createElement("img");
|
| 168 |
+
crop_img.src="data:image/jpeg;base64,"+response["crops"][i];
|
| 169 |
crop_img.dataset.image_name=response['image_name'];
|
| 170 |
|
| 171 |
crop_img.setAttribute("class","crop_img");
|
|
|
|
| 406 |
}).then(function(response){
|
| 407 |
hide_loading_bar();
|
| 408 |
console.log(response);
|
| 409 |
+
document.querySelector("#face_rec_image").src="data:image/jpeg;base64,"+response['image'];
|
| 410 |
})
|
| 411 |
|
| 412 |
|
app/static/user/dashboard.js
CHANGED
|
@@ -216,7 +216,7 @@ function filterFunction() {
|
|
| 216 |
{
|
| 217 |
|
| 218 |
var crop_img=document.createElement("img");
|
| 219 |
-
crop_img.src="data:image/jpeg;base64,"+response["crops"][i]
|
| 220 |
crop_img.setAttribute("class","crop_img");
|
| 221 |
// crop_img.setAttribute("id",crop_img.dataset.image_name+'\\'+crop_img.dataset.crop_name);
|
| 222 |
crop_img.setAttribute("draggable","true");
|
|
@@ -326,7 +326,7 @@ function face_recoginization(elem){
|
|
| 326 |
}).then(function(response){
|
| 327 |
hide_loading_bar();
|
| 328 |
console.log(response);
|
| 329 |
-
document.querySelector("#face_recognition_image").src="data:image/jpeg;base64,"+response["pred_image"]
|
| 330 |
document.querySelector("#face_recognition_image").style.width="unset";
|
| 331 |
})
|
| 332 |
}
|
|
|
|
| 216 |
{
|
| 217 |
|
| 218 |
var crop_img=document.createElement("img");
|
| 219 |
+
crop_img.src="data:image/jpeg;base64,"+response["crops"][i];
|
| 220 |
crop_img.setAttribute("class","crop_img");
|
| 221 |
// crop_img.setAttribute("id",crop_img.dataset.image_name+'\\'+crop_img.dataset.crop_name);
|
| 222 |
crop_img.setAttribute("draggable","true");
|
|
|
|
| 326 |
}).then(function(response){
|
| 327 |
hide_loading_bar();
|
| 328 |
console.log(response);
|
| 329 |
+
document.querySelector("#face_recognition_image").src="data:image/jpeg;base64,"+response["pred_image"];
|
| 330 |
document.querySelector("#face_recognition_image").style.width="unset";
|
| 331 |
})
|
| 332 |
}
|
app/user/routes.py
CHANGED
|
@@ -326,8 +326,10 @@ def get_crops():
|
|
| 326 |
print(image.shape)
|
| 327 |
# do your deep learning work
|
| 328 |
face_detector.image_size=get_image_size(settings['db_mode'])
|
| 329 |
-
|
| 330 |
-
|
|
|
|
|
|
|
| 331 |
|
| 332 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 333 |
all_aligned_crops_base64=[]
|
|
@@ -414,12 +416,18 @@ def face_recognition():
|
|
| 414 |
|
| 415 |
# face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="avg",recognition_mode="repeat")
|
| 416 |
face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="best",recognition_mode="repeat")
|
| 417 |
-
|
| 418 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 419 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
| 420 |
-
tree=face_recognizer.predict(
|
| 421 |
-
pred_img=fr_helper.show_pred_image(tree,
|
|
|
|
| 422 |
pred_img=image_to_base64(pred_img)
|
|
|
|
| 423 |
objs_found=fr_helper.xml_to_objs_found(tree)
|
| 424 |
|
| 425 |
|
|
|
|
| 326 |
print(image.shape)
|
| 327 |
# do your deep learning work
|
| 328 |
face_detector.image_size=get_image_size(settings['db_mode'])
|
| 329 |
+
_,objs_found=face_detector.predict(image)
|
| 330 |
+
|
| 331 |
+
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 332 |
+
print(image.shape)
|
| 333 |
|
| 334 |
all_aligned_crops=fd_get_crops(image,objs_found,aligner_obj,resize=(face_recognizer.model_config.input_size,face_recognizer.model_config.input_size))
|
| 335 |
all_aligned_crops_base64=[]
|
|
|
|
| 416 |
|
| 417 |
# face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="avg",recognition_mode="repeat")
|
| 418 |
face_recognizer.set_face_db_and_mode(faces=faces,db_faces_features=db_faces_features,distance_mode="best",recognition_mode="repeat")
|
| 419 |
+
|
| 420 |
+
_,objs_found=face_detector.predict(image)
|
| 421 |
+
|
| 422 |
+
objs_found=face_detector.square_preprocessing.rescale(objs_found) #rescale coordinates to original image's resolution
|
| 423 |
+
h,w=image.shape[:2]
|
| 424 |
+
|
| 425 |
tree=fr_helper.objs_found_to_xml("test.jpg",w,h,objs_found)
|
| 426 |
+
tree=face_recognizer.predict(image,tree)
|
| 427 |
+
pred_img=fr_helper.show_pred_image(tree,image)
|
| 428 |
+
|
| 429 |
pred_img=image_to_base64(pred_img)
|
| 430 |
+
|
| 431 |
objs_found=fr_helper.xml_to_objs_found(tree)
|
| 432 |
|
| 433 |
|