akhfzl commited on
Commit
c242a30
·
1 Parent(s): 5db9301

'try-to-test'

Browse files
app.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from faceVerificationUtilization import demo
2
+
3
+ if __name__ == "__main__":
4
+ demo.launch()
faceVerificationModel/efficientnetv2_s_features.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:431d1c7064464949b5895965e09db9c1c265bb0c14524804ac626439546a87d9
3
+ size 81622458
faceVerificationModel/pca_xgb_pipeline.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c1181564c00e26d980cf8d30770fd8a678e3cb396327e6fa68c83cfef75b3b
3
+ size 357574
faceVerificationUtilization/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .utils import demo
faceVerificationUtilization/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (247 Bytes). View file
 
faceVerificationUtilization/__pycache__/setConfig.cpython-313.pyc ADDED
Binary file (1.48 kB). View file
 
faceVerificationUtilization/__pycache__/utils.cpython-313.pyc ADDED
Binary file (3.13 kB). View file
 
faceVerificationUtilization/setConfig.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import torch, joblib
3
+ from huggingface_hub import hf_hub_download
4
+ from torchvision import transforms
5
+ import torchvision.models as models
6
+ import torch.nn as nn
7
+
8
+ model_path = hf_hub_download(repo_id="arnabdhar/YOLOv8-Face-Detection", filename="model.pt")
9
+ face_detector = YOLO(model_path)
10
+
11
+ efficientnet_model = models.efficientnet_v2_s(weights=None)
12
+ efficientnet_model.classifier = nn.Identity()
13
+
14
+ state_dict = torch.load("faceVerificationModel/efficientnetv2_s_features.pth", map_location="cpu")
15
+ efficientnet_model.load_state_dict(state_dict)
16
+ efficientnet_model.eval()
17
+
18
+ pca_xgb = joblib.load("faceVerificationModel/pca_xgb_pipeline.pkl")
19
+
20
+ transform = transforms.Compose([
21
+ transforms.Resize((224, 224)),
22
+ transforms.ToTensor(),
23
+ transforms.Normalize([0.485, 0.456, 0.406],
24
+ [0.229, 0.224, 0.225])
25
+ ])
faceVerificationUtilization/utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2 as cv
2
+ import numpy as np
3
+ from PIL import Image
4
+ import torch
5
+ from .setConfig import efficientnet_model, face_detector, transform, pca_xgb
6
+ import gradio as gr
7
+
8
+ def ImgPreprocessing(img):
9
+ if len(img.shape) == 2 or img.shape[2] == 1:
10
+ img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
11
+
12
+ img_yuv = cv.cvtColor(img, cv.COLOR_BGR2YUV)
13
+ clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
14
+ img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
15
+ img = cv.cvtColor(img_yuv, cv.COLOR_YUV2BGR)
16
+ return img
17
+
18
+ def predict(frame: np.ndarray):
19
+ if frame is None:
20
+ return "No frame captured from webcam"
21
+
22
+ if isinstance(frame, dict):
23
+ frame = frame['image']
24
+
25
+ img = ImgPreprocessing(frame)
26
+
27
+ results = face_detector.predict(img)
28
+ if len(results[0].boxes) == 0:
29
+ return "No face detected"
30
+
31
+ x1, y1, x2, y2 = map(int, results[0].boxes[0].xyxy[0].cpu().numpy())
32
+ face_crop = img[y1:y2, x1:x2]
33
+
34
+ fface_crop = cv.cvtColor(face_crop, cv.COLOR_BGR2RGB) # Konversi BGR ke RGB
35
+
36
+ face_pil = Image.fromarray(face_crop)
37
+ face_tensor = transform(face_pil).unsqueeze(0)
38
+
39
+ with torch.no_grad():
40
+ features = efficientnet_model(face_tensor).cpu().numpy()
41
+
42
+ pred = pca_xgb.predict(features)[0]
43
+ pred = 'Wajah Valid' if pred == 0 else 'Wajah Tidak Valid'
44
+
45
+ return f"Predicted class: {pred}"
46
+
47
+ demo = gr.Interface(
48
+ fn=predict,
49
+ inputs=gr.Image(sources="webcam", streaming=True),
50
+ outputs="text",
51
+ live=True
52
+ )