FaceGNN / CODE /ML_api.py
aiyubali's picture
FaceGNN Updated v1.1
19ea92a
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import cv2 as cv
from keras_facenet import FaceNet
from mtcnn import MTCNN
import pickle
from sklearn.preprocessing import LabelEncoder
embedder = FaceNet()
detector = MTCNN()
# Load the face recognition model and labels
data = np.load('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Embedding.npz')
Y = data['labels']
encoder = LabelEncoder()
encoder.fit(Y)
with open('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Model.pkl', 'rb') as file:
model = pickle.load(file)
app = FastAPI()
# Function to get the face embedding
def get_embedding(face_img):
face_img = face_img.astype('float32')
face_img = np.expand_dims(face_img, axis=0)
yhat = embedder.embeddings(face_img)
return yhat[0]
# Function to resize large images
def resize_image(image, max_size=1024):
"""
Resize image to prevent excessive memory usage for large images
"""
h, w = image.shape[:2]
if max(h, w) > max_size:
scale_factor = max_size / max(h, w)
new_dim = (int(w * scale_factor), int(h * scale_factor))
image = cv.resize(image, new_dim)
return image
# Function to process the image URL and make predictions
def get_result(img_url, code):
response = requests.get(img_url)
img_data = BytesIO(response.content)
image = Image.open(img_data)
if image.mode != "RGB":
image = image.convert("RGB")
if code == 'CM_370009':
# Rotate the image 90 degrees (one time) clockwise
image = image.rotate(-90, expand=True)
# Convert PIL image to a NumPy array
image_np = np.array(image)
# Resize large images to avoid excessive memory usage
image_np = resize_image(image_np)
# Use the resized image for face detection
detection_results = detector.detect_faces(image_np)
if not detection_results:
raise ValueError("No face detected in the image.")
# Extract face from the detected bounding box
x, y, w, h = detection_results[0]['box']
face_image = image_np[y:y+h, x:x+w]
# Resize face image to the required input size for the model
face_image_resized = cv.resize(face_image, (160, 160))
# Get embedding for face and make prediction
test_im = get_embedding(face_image_resized)
test_im = [test_im]
ypreds = model.predict(test_im)
predicted_label = encoder.inverse_transform(ypreds)[0]
return predicted_label
# FastAPI Request Model
class ImageRequest(BaseModel):
code: str
img: str
@app.post("/face")
async def face_recognition(request_data: ImageRequest):
code = request_data.code
img_url = request_data.img
result = get_result(img_url, code)
print(result)
if result == code:
return True
else:
return False
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=4444)