File size: 3,032 Bytes
19ea92a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
from fastapi import FastAPI
from pydantic import BaseModel
import uvicorn
import requests
from PIL import Image
from io import BytesIO
import numpy as np
import cv2 as cv
from keras_facenet import FaceNet
from mtcnn import MTCNN
import pickle
from sklearn.preprocessing import LabelEncoder
embedder = FaceNet()
detector = MTCNN()
# Load the face recognition model and labels
data = np.load('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Embedding.npz')
Y = data['labels']
encoder = LabelEncoder()
encoder.fit(Y)
with open('/home/shanin/Desktop/SHANIN/MAIN/ALL_CODE/Face_Recognition/Face_Model.pkl', 'rb') as file:
model = pickle.load(file)
app = FastAPI()
# Function to get the face embedding
def get_embedding(face_img):
face_img = face_img.astype('float32')
face_img = np.expand_dims(face_img, axis=0)
yhat = embedder.embeddings(face_img)
return yhat[0]
# Function to resize large images
def resize_image(image, max_size=1024):
"""
Resize image to prevent excessive memory usage for large images
"""
h, w = image.shape[:2]
if max(h, w) > max_size:
scale_factor = max_size / max(h, w)
new_dim = (int(w * scale_factor), int(h * scale_factor))
image = cv.resize(image, new_dim)
return image
# Function to process the image URL and make predictions
def get_result(img_url, code):
response = requests.get(img_url)
img_data = BytesIO(response.content)
image = Image.open(img_data)
if image.mode != "RGB":
image = image.convert("RGB")
if code == 'CM_370009':
# Rotate the image 90 degrees (one time) clockwise
image = image.rotate(-90, expand=True)
# Convert PIL image to a NumPy array
image_np = np.array(image)
# Resize large images to avoid excessive memory usage
image_np = resize_image(image_np)
# Use the resized image for face detection
detection_results = detector.detect_faces(image_np)
if not detection_results:
raise ValueError("No face detected in the image.")
# Extract face from the detected bounding box
x, y, w, h = detection_results[0]['box']
face_image = image_np[y:y+h, x:x+w]
# Resize face image to the required input size for the model
face_image_resized = cv.resize(face_image, (160, 160))
# Get embedding for face and make prediction
test_im = get_embedding(face_image_resized)
test_im = [test_im]
ypreds = model.predict(test_im)
predicted_label = encoder.inverse_transform(ypreds)[0]
return predicted_label
# FastAPI Request Model
class ImageRequest(BaseModel):
code: str
img: str
@app.post("/face")
async def face_recognition(request_data: ImageRequest):
code = request_data.code
img_url = request_data.img
result = get_result(img_url, code)
print(result)
if result == code:
return True
else:
return False
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=4444)
|