|
|
|
|
|
from fastapi import FastAPI |
|
|
from pydantic import BaseModel |
|
|
import uvicorn |
|
|
import requests |
|
|
from PIL import Image |
|
|
from io import BytesIO |
|
|
import numpy as np |
|
|
import cv2 as cv |
|
|
from keras_facenet import FaceNet |
|
|
from mtcnn import MTCNN |
|
|
import pickle |
|
|
from sklearn.preprocessing import LabelEncoder |
|
|
|
|
|
embedder = FaceNet() |
|
|
detector = MTCNN() |
|
|
|
|
|
|
|
|
data = np.load('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Embedding.npz') |
|
|
Y = data['labels'] |
|
|
encoder = LabelEncoder() |
|
|
encoder.fit(Y) |
|
|
|
|
|
with open('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Model.pkl', 'rb') as file: |
|
|
model = pickle.load(file) |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
|
def get_embedding(face_img): |
|
|
face_img = face_img.astype('float32') |
|
|
face_img = np.expand_dims(face_img, axis=0) |
|
|
yhat = embedder.embeddings(face_img) |
|
|
return yhat[0] |
|
|
|
|
|
|
|
|
def resize_image(image, max_size=1024): |
|
|
""" |
|
|
Resize image to prevent excessive memory usage for large images |
|
|
""" |
|
|
h, w = image.shape[:2] |
|
|
if max(h, w) > max_size: |
|
|
scale_factor = max_size / max(h, w) |
|
|
new_dim = (int(w * scale_factor), int(h * scale_factor)) |
|
|
image = cv.resize(image, new_dim) |
|
|
return image |
|
|
|
|
|
|
|
|
def get_result(img_url, code): |
|
|
response = requests.get(img_url) |
|
|
img_data = BytesIO(response.content) |
|
|
image = Image.open(img_data) |
|
|
if image.mode != "RGB": |
|
|
image = image.convert("RGB") |
|
|
|
|
|
if code == 'CM_370009': |
|
|
|
|
|
image = image.rotate(-90, expand=True) |
|
|
|
|
|
|
|
|
image_np = np.array(image) |
|
|
|
|
|
|
|
|
image_np = resize_image(image_np) |
|
|
|
|
|
|
|
|
detection_results = detector.detect_faces(image_np) |
|
|
if not detection_results: |
|
|
raise ValueError("No face detected in the image.") |
|
|
|
|
|
|
|
|
x, y, w, h = detection_results[0]['box'] |
|
|
face_image = image_np[y:y+h, x:x+w] |
|
|
|
|
|
|
|
|
face_image_resized = cv.resize(face_image, (160, 160)) |
|
|
|
|
|
|
|
|
test_im = get_embedding(face_image_resized) |
|
|
test_im = [test_im] |
|
|
|
|
|
ypreds = model.predict(test_im) |
|
|
predicted_label = encoder.inverse_transform(ypreds)[0] |
|
|
return predicted_label |
|
|
|
|
|
|
|
|
class ImageRequest(BaseModel): |
|
|
code: str |
|
|
img: str |
|
|
|
|
|
@app.post("/face") |
|
|
async def face_recognition(request_data: ImageRequest): |
|
|
code = request_data.code |
|
|
img_url = request_data.img |
|
|
|
|
|
result = get_result(img_url, code) |
|
|
print(result) |
|
|
|
|
|
if result == code: |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run(app, host="127.0.0.1", port=4444) |
|
|
|