File size: 3,275 Bytes
ddda863
 
 
f59e7c5
65ae6d0
363d6df
 
ddda863
 
 
363d6df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ddda863
65ae6d0
 
 
 
 
f59e7c5
ddda863
f59e7c5
 
 
 
 
 
 
ddda863
 
 
 
 
 
65ae6d0
ddda863
 
b8731d7
363d6df
 
 
 
65ae6d0
ddda863
23102fd
 
b8731d7
4f86615
 
23102fd
 
ae5fc80
23102fd
b8731d7
23102fd
 
 
363d6df
 
 
ddda863
 
 
 
 
23102fd
 
 
 
 
 
 
 
 
363d6df
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from fastapi import FastAPI, File, UploadFile
import deepface.DeepFace as DeepFace
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import numpy as np
from mivolo.predictor import Predictor
import base64
from PIL import Image
import numpy as np
from AttributesHolder import Namespace
import cv2

config = {
    "checkpoint": 'models/mivolo_imbd.pth.tar',
    "detector_weights": 'models/yolov8x_person_face.pt',
    "device": 'cpu',
    "draw": False,
    "with_persons": True,
    "disable_faces": False,
    "output": 'output'
}

namespace = Namespace()
setattr(namespace, 'checkpoint', 'models/mivolo_imbd.pth.tar')
setattr(namespace, 'detector_weights', 'models/yolov8x_person_face.pt')
setattr(namespace, 'device', 'cpu')
setattr(namespace, 'draw', False)
setattr(namespace, 'with_persons', True)
setattr(namespace, 'disable_faces', False)
setattr(namespace, 'output', 'output')

predictor = Predictor(config=namespace)

app = FastAPI()

class Base64Data(BaseModel):
    base64_data: str


origins = ["*"]

app.add_middleware(
    CORSMiddleware,
    allow_origins=origins,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)
@app.get("/")
def index():
    return {"message": "Hello, World!!!"}

# post image to server
@app.post("/predict-actor/")
async def create_upload_file(contents: Base64Data):
    try:
        # Read the file
        print("-----Starting Prediction----------")
        loaded_image = base64_to_cv2(contents.base64_data)
        detected_objects, out_im = predictor.recognize(loaded_image)
        age = detected_objects.ages[0]
        gender = detected_objects.genders[0]
        contents = contents.base64_data
        df = DeepFace.find(img_path = contents, db_path = "dataset/",model_name ='GhostFaceNet', threshold=0.9)
        
        filename = df[0].head()['identity'][0]
        filename = filename.replace("\\", "/")

        print(f"filename: {filename}")

        # Convert the image to base64
        base64_output = image_to_base64("dataset/" + filename)

        print("------Prediction Done-----------")
        return JSONResponse(content={
            "celeb_image": base64_output,
            "celeb":df[0].head()['identity'][0], "res":{
            "age": age,
            "gender": gender
        } }, status_code=200)
    except Exception as e:
        return JSONResponse(content={"message": "Error processing the file.", "error": str(e)}, status_code=500)    



def image_to_base64(image_path):
    with open(image_path, "rb") as img_file:
        # Read the image file
        img_data = img_file.read()
        # Encode the image data as base64
        base64_data = base64.b64encode(img_data)
        # Decode bytes-like object to ASCII string
        base64_str = base64_data.decode("ascii")
        return base64_str


def base64_to_cv2(base64_string):
    base64_string = base64_string.split(",")[1]
    # Decode the base64 string into bytes
    decoded_bytes = base64.b64decode(base64_string)
    
    # Convert bytes to numpy array
    np_array = np.frombuffer(decoded_bytes, np.uint8)
    
    # Decode the numpy array into an image
    image = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
    
    return image