Spaces:
Build error
Build error
| import gradio as gr | |
| from deepface import DeepFace | |
| import numpy as np | |
| import PIL | |
| from PIL import Image, ImageDraw, ImageFont | |
| import time | |
| import pandas as pd | |
| from operator import itemgetter | |
| import os | |
| def get_named_people(): | |
| named_people = next(os.walk('db'))[1] | |
| return named_people | |
| dbackends = [ | |
| ['Haar Cascade (OpenCV)','opencv'], | |
| #['π Single Shot MultiBox Detector (OpenCV)','ssd'], # for whatever reason fails | |
| #['Histogram of Oriented Gradients (Dlib)','dlib'], # dlib seems broken on modern ubuntu | |
| ['RetinaFace','retinaface'], | |
| ['You Only Look Once v8','yolov8'], | |
| ['π YuNet','yunet'], | |
| #['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'], | |
| ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn'] | |
| ] | |
| embedding_backends = [ | |
| "VGG-Face", | |
| "Facenet", | |
| "Facenet512", | |
| "ArcFace", | |
| ] | |
| dbackendinfo = 'Detectors with π require a color image.' | |
| with gr.Blocks() as demo: | |
| with gr.Tab("Add Named Person"): | |
| input_image = gr.Image(value="8428_26_SM.jpg") | |
| annotated_image = gr.AnnotatedImage() | |
| selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above") | |
| selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above", height=148) | |
| def findFaces(imgfile,dbackend): | |
| start_time = time.time() | |
| print(start_time) | |
| face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend) | |
| numberoffaces = len(face_objs) | |
| jsontext = '' | |
| global faceannotations | |
| faceannotations = [] | |
| for i, face_obj in enumerate(face_objs,1): | |
| face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"])) | |
| face_confidence = "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"]) | |
| face_result=[face_coordinates,face_confidence] | |
| faceannotations.append(face_result) | |
| #jsontext=faceannotations | |
| #jsontext=face_objs | |
| run_time = str(round((time.time() - start_time),2)) | |
| results = gr.AnnotatedImage( | |
| label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.', | |
| value=(imgfile, faceannotations) | |
| ) | |
| print(run_time) | |
| return(results,numberoffaces,run_time) | |
| dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',info=dbackendinfo,container=True,value='retinaface') | |
| gr.Interface( | |
| allow_flagging = "never", | |
| fn=findFaces, | |
| inputs=[input_image, dbackendchoice], | |
| outputs=[annotated_image,selected_face_info,selected_face_pic], | |
| ) | |
| def select_section(evt: gr.SelectData): | |
| cropped_image = np.array(Image.open(input_image.value['path'])) | |
| cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]] | |
| return faceannotations[evt.index], cropped_image | |
| annotated_image.select(select_section, None, [selected_face_info,selected_face_pic]) | |
| with gr.Tab("Find Named Person in All Images"): | |
| with gr.Row(): | |
| named_people_dropdown = [] | |
| for named_person in get_named_people(): | |
| named_people_dropdown.append(named_person.replace("_"," ")) | |
| find_list = gr.Dropdown(named_people_dropdown, label="Person", info="Select a Named Person."), | |
| find_button = gr.Button(value="Find this person") | |
| with gr.Tab("Identify People in One Image"): | |
| embedding_backendchoice = gr.Radio(choices=embedding_backends,label='Embedding Backend:',container=True,value='ArcFace') | |
| def identify_in_one_image(imgfile, embedding_backendchoice): | |
| oneimageannotations = [] | |
| oneimageresults = DeepFace.find(img_path=imgfile, db_path="db", model_name=embedding_backendchoice) | |
| oneimageresults = pd.concat(oneimageresults) | |
| for i, found_face in oneimageresults.iterrows(): | |
| face_coordinates = (found_face["source_x"],found_face["source_y"], (found_face["source_x"] + found_face["source_w"]),(found_face["source_y"] + found_face["source_h"])) | |
| person = found_face["identity"].split("/")[1].replace("_"," ") | |
| face_confidence = "Matched " + person + " {:.0%}".format(found_face["distance"]) | |
| face_thumbnail = found_face["identity"] | |
| face_result=[face_coordinates,face_confidence] | |
| oneimageannotations.append(face_result) | |
| results = gr.AnnotatedImage( | |
| value=(imgfile, oneimageannotations) | |
| ) | |
| return results, oneimageannotations | |
| oneimage_input_image = gr.Image(value="TEST_spindler.jpg", label='Input image') | |
| found_faces=gr.AnnotatedImage(label='Identified people') | |
| debug_output = gr.Textbox(label="Debug output") | |
| #face_thumbnail = gr.Textbox(label="Identified person") | |
| gr.Interface( | |
| allow_flagging = "never", | |
| fn=identify_in_one_image, | |
| inputs=[oneimage_input_image, embedding_backendchoice], | |
| outputs=[found_faces,debug_output] | |
| ) | |
| with gr.Tab("Modify Named Person") as ModifyNamedPersonTab: | |
| def get_named_people_dropdown(): | |
| named_people_gallery_imgs = [] | |
| named_people_gallery_captions = [] | |
| for named_person in get_named_people(): | |
| #named_person = named_person.replace("_"," ") | |
| named_people_gallery_imgs.append("db/" + named_person + "/" + named_person.replace("_","") + ".jpg") | |
| named_people_gallery_captions.append(named_person.replace("_"," ")) | |
| named_people_gallery_all = list(zip(named_people_gallery_imgs, named_people_gallery_captions)) | |
| images = named_people_gallery_all | |
| images.sort(key=itemgetter(1)) | |
| return images | |
| named_person_gallery = gr.Gallery( | |
| label="Named People", elem_id="gallery", object_fit="none", columns=9) | |
| ModifyNamedPersonTab.select(get_named_people_dropdown, None, named_person_gallery) | |
| #jsontext = gr.Text(label= "deepface extract_faces results") | |
| demo.launch(show_error=True) | |