Thiago Hersan commited on
Commit
c0aacde
·
0 Parent(s):

init commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ models/lbfmodel.yaml filter=lfs diff=lfs merge=lfs -text
2
+ imgs/03.webp filter=lfs diff=lfs merge=lfs -text
3
+ imgs/11.jpg filter=lfs diff=lfs merge=lfs -text
4
+ imgs/people.jpg filter=lfs diff=lfs merge=lfs -text
.github/workflows/deploy-hf.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Deploy to Hugging Face spaces
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+
12
+ steps:
13
+ - name: Checkout Dev Repo
14
+ uses: actions/checkout@v3
15
+ with:
16
+ fetch-depth: 0
17
+ lfs: true
18
+
19
+ - name: Push to HF
20
+ env:
21
+ HFTOKEN: ${{ secrets.HFTOKEN }}
22
+
23
+ run: |
24
+ git remote add hf https://thiagohersan:$HFTOKEN@huggingface.co/spaces/visualizedata/PSAM5020-FaceAlign-Gradio
25
+ git push -f hf main
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .DS_S*
2
+ __pycache__/
3
+ gradio_cached_examples/
4
+ .gradio/
5
+ .ipynb_checkpoints/
README.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: PSAM5020 Face Align
3
+ emoji: 📐
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ python_version: 3.10.12
8
+ sdk_version: 5.0.2
9
+ app_file: app.py
10
+ pinned: false
11
+ ---
app.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ from huggingface_hub import hf_hub_download
6
+ from math import atan2
7
+ from PIL import Image as PImage
8
+ from ultralytics import YOLO
9
+
10
+ OUT_W = 130
11
+ OUT_H = 170
12
+
13
+ OUT_EYE_SPACE = 64
14
+ OUT_FACE_WIDTH = 89
15
+ OUT_NOSE_TOP = 72
16
+
17
+ EYE_0_IDX = 36
18
+ EYE_1_IDX = 45
19
+
20
+ TEMPLE_0_IDX = 0
21
+ TEMPLE_1_IDX = 16
22
+
23
+ yolo_model_path = hf_hub_download(repo_id="AdamCodd/YOLOv11n-face-detection", filename="model.pt")
24
+ face_detector = YOLO(yolo_model_path)
25
+
26
+ LBFmodel = "./models/lbfmodel.yaml"
27
+ landmark_detector = cv2.face.createFacemarkLBF()
28
+ landmark_detector.loadModel(LBFmodel)
29
+
30
+ NUM_OUTS = 16
31
+ all_outputs = [gr.Image(format="jpeg", visible=False) for _ in range(NUM_OUTS)]
32
+
33
+ def face(img_in):
34
+ out_pad = NUM_OUTS * [gr.Image(visible=False)]
35
+ if img_in is None:
36
+ return out_pad
37
+
38
+ img = img_in.copy()
39
+ img.thumbnail((1000,1000))
40
+ img_np = np.array(img).copy()
41
+
42
+ iw,ih = img.size
43
+
44
+ output = face_detector.predict(img, verbose=False)
45
+
46
+ if len(output) < 1 or len(output[0]) < 1:
47
+ return out_pad
48
+
49
+ faces_xyxy = output[0].boxes.xyxy.numpy()
50
+ faces = np.array([[x0, y0, (x1 - x0), (y1 - y0)] for x0,y0,x1,y1 in faces_xyxy])
51
+
52
+ biggest_faces = faces[np.argsort(-faces[:,2])]
53
+ _, landmarks = landmark_detector.fit(img_np, biggest_faces)
54
+
55
+ if len(landmarks) < 1:
56
+ return out_pad
57
+
58
+ out_images = []
59
+ for landmark in landmarks:
60
+ eye0 = np.array(landmark[0][EYE_0_IDX])
61
+ eye1 = np.array(landmark[0][EYE_1_IDX])
62
+ temple0 = np.array(landmark[0][TEMPLE_0_IDX])
63
+ temple1 = np.array(landmark[0][TEMPLE_1_IDX])
64
+
65
+ mid = np.mean([eye0, eye1], axis=0)
66
+
67
+ eye_line = eye1 - eye0
68
+ tilt = atan2(eye_line[1], eye_line[0])
69
+ tilt_deg = 180 * tilt / np.pi
70
+
71
+
72
+ scale = min(OUT_EYE_SPACE / np.linalg.norm(eye1 - eye0),
73
+ OUT_FACE_WIDTH / np.linalg.norm(temple1 - temple0))
74
+
75
+ img_s = img.resize((int(iw * scale), int(ih * scale)))
76
+
77
+ # rotate around nose
78
+ new_mid = [int(c * scale) for c in mid]
79
+ crop_box = (new_mid[0] - (OUT_W // 2),
80
+ new_mid[1] - OUT_NOSE_TOP,
81
+ new_mid[0] + (OUT_W // 2),
82
+ new_mid[1] + (OUT_H - OUT_NOSE_TOP))
83
+
84
+ img_out = img_s.rotate(tilt_deg, center=new_mid, resample=PImage.Resampling.BICUBIC).crop(crop_box).convert("L")
85
+ out_images.append(gr.Image(img_out, visible=True))
86
+
87
+ out_images += out_pad
88
+ return out_images[:NUM_OUTS]
89
+
90
+
91
+ with gr.Blocks() as demo:
92
+ gr.Markdown("""
93
+ # PSAM 5020 Face Alignment Tool.
94
+ ## Interface for face detection, alignment, cropping\
95
+ to help create dataset for [WK12](https://github.com/PSAM-5020-2025F-A/WK11) / [HW12](https://github.com/PSAM-5020-2025F-A/Homework11).
96
+ """)
97
+
98
+ gr.Interface(
99
+ face,
100
+ inputs=gr.Image(type="pil"),
101
+ outputs=all_outputs,
102
+ cache_examples=True,
103
+ examples=[["./imgs/03.webp"], ["./imgs/11.jpg"], ["./imgs/people.jpg"]],
104
+ allow_flagging="never",
105
+ )
106
+
107
+ if __name__ == "__main__":
108
+ demo.launch()
imgs/03.webp ADDED

Git LFS Details

  • SHA256: d0617b2f0e999df6a59d5094f380acf5bae98b2d5c9e3b9c0f4aea1d7747531f
  • Pointer size: 131 Bytes
  • Size of remote file: 145 kB
imgs/11.jpg ADDED

Git LFS Details

  • SHA256: 96f7ff78c03f6e6e2aa0c45a63c707c0ecfcd8632fa7e0c72ce3d516227ced28
  • Pointer size: 131 Bytes
  • Size of remote file: 323 kB
imgs/people.jpg ADDED

Git LFS Details

  • SHA256: 75a71a44bbf69d30b0d64c53feb39de92a108712bb2767e5c2b5af9f9659efce
  • Pointer size: 131 Bytes
  • Size of remote file: 359 kB
models/lbfmodel.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70dd8b1657c42d1595d6bd13d97d932877b3bed54a95d3c4733a0f740d1fd66b
3
+ size 56375857
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ pydantic==2.8.2
2
+ huggingface-hub==0.34.3
3
+ opencv-python==4.10.0.84
4
+ opencv-python-headless==4.10.0.84
5
+ opencv-contrib-python==4.10.0.84
6
+ opencv-contrib-python-headless==4.10.0.84
7
+ ultralytics==8.3.102