Spaces:
Sleeping
Sleeping
M Bilal Naeem commited on
Commit Β·
669a5fa
1
Parent(s): edb77cb
your commit message
Browse files
app/__pycache__/main.cpython-39.pyc
CHANGED
|
Binary files a/app/__pycache__/main.cpython-39.pyc and b/app/__pycache__/main.cpython-39.pyc differ
|
|
|
app/services/__pycache__/acne_service.cpython-39.pyc
CHANGED
|
Binary files a/app/services/__pycache__/acne_service.cpython-39.pyc and b/app/services/__pycache__/acne_service.cpython-39.pyc differ
|
|
|
app/services/__pycache__/puffy_eyes_service.cpython-39.pyc
CHANGED
|
Binary files a/app/services/__pycache__/puffy_eyes_service.cpython-39.pyc and b/app/services/__pycache__/puffy_eyes_service.cpython-39.pyc differ
|
|
|
app/services/puffy_eyes_service.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
from ultralytics import YOLO
|
| 2 |
-
import logging
|
| 3 |
from PIL import Image, ImageDraw
|
| 4 |
import io
|
| 5 |
import base64
|
|
@@ -8,40 +7,39 @@ import base64
|
|
| 8 |
MODEL_PATH = "app/models/puffy_eyes_model.pt"
|
| 9 |
model = YOLO(MODEL_PATH)
|
| 10 |
|
| 11 |
-
|
| 12 |
-
CONFIDENCE_THRESHOLD = 0.25
|
| 13 |
-
MAX_SHOW = 2 # Only allow 2 detections
|
| 14 |
|
| 15 |
def predict_puffy_eyes(image: Image):
|
| 16 |
-
"""
|
| 17 |
-
|
| 18 |
-
# Run YOLO detection
|
| 19 |
results = model(image, conf=CONFIDENCE_THRESHOLD)
|
| 20 |
-
|
| 21 |
-
|
| 22 |
for result in results:
|
| 23 |
for box in result.boxes:
|
| 24 |
conf = float(box.conf)
|
| 25 |
if conf >= CONFIDENCE_THRESHOLD:
|
| 26 |
x1, y1, x2, y2 = [float(c) for c in box.xyxy[0]]
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
}
|
| 38 |
|
| 39 |
-
#
|
| 40 |
-
all_dets.sort(key=lambda d: d["confidence"], reverse=True)
|
| 41 |
-
|
| 42 |
-
# Draw the two boxes
|
| 43 |
draw = ImageDraw.Draw(image)
|
| 44 |
-
for det in
|
| 45 |
x1, y1, x2, y2 = det["bbox"]
|
| 46 |
draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
|
| 47 |
|
|
@@ -51,6 +49,6 @@ def predict_puffy_eyes(image: Image):
|
|
| 51 |
encoded_image = base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 52 |
|
| 53 |
return {
|
| 54 |
-
"detections":
|
| 55 |
"labeled_image": encoded_image
|
| 56 |
}
|
|
|
|
| 1 |
from ultralytics import YOLO
|
|
|
|
| 2 |
from PIL import Image, ImageDraw
|
| 3 |
import io
|
| 4 |
import base64
|
|
|
|
| 7 |
MODEL_PATH = "app/models/puffy_eyes_model.pt"
|
| 8 |
model = YOLO(MODEL_PATH)
|
| 9 |
|
| 10 |
+
CONFIDENCE_THRESHOLD = 0.3
|
|
|
|
|
|
|
| 11 |
|
| 12 |
def predict_puffy_eyes(image: Image):
|
| 13 |
+
"""Return exactly 2 highest-confidence puffy eye detections with annotated image (if available)."""
|
| 14 |
+
|
|
|
|
| 15 |
results = model(image, conf=CONFIDENCE_THRESHOLD)
|
| 16 |
+
detections = []
|
| 17 |
+
|
| 18 |
for result in results:
|
| 19 |
for box in result.boxes:
|
| 20 |
conf = float(box.conf)
|
| 21 |
if conf >= CONFIDENCE_THRESHOLD:
|
| 22 |
x1, y1, x2, y2 = [float(c) for c in box.xyxy[0]]
|
| 23 |
+
detections.append({
|
| 24 |
+
"confidence": conf,
|
| 25 |
+
"bbox": [x1, y1, x2, y2]
|
| 26 |
+
})
|
| 27 |
+
|
| 28 |
+
# Sort and keep only top 2
|
| 29 |
+
detections.sort(key=lambda d: d["confidence"], reverse=True)
|
| 30 |
+
top_two = detections[:2]
|
| 31 |
+
|
| 32 |
+
# If fewer than 2 detections, return empty
|
| 33 |
+
if len(top_two) < 2:
|
| 34 |
+
return {
|
| 35 |
+
"detections": [],
|
| 36 |
+
"labeled_image": None,
|
| 37 |
+
"message": "Fewer than 2 puffy eyes detected"
|
| 38 |
}
|
| 39 |
|
| 40 |
+
# Draw boxes on image
|
|
|
|
|
|
|
|
|
|
| 41 |
draw = ImageDraw.Draw(image)
|
| 42 |
+
for det in top_two:
|
| 43 |
x1, y1, x2, y2 = det["bbox"]
|
| 44 |
draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
|
| 45 |
|
|
|
|
| 49 |
encoded_image = base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 50 |
|
| 51 |
return {
|
| 52 |
+
"detections": top_two,
|
| 53 |
"labeled_image": encoded_image
|
| 54 |
}
|
app/services/verify_and_crop_face_service.py
CHANGED
|
@@ -31,12 +31,21 @@ async def verify_and_crop_face_service(file: UploadFile) -> str:
|
|
| 31 |
if len(faces) == 0:
|
| 32 |
print("β [Error] No face detected.")
|
| 33 |
raise HTTPException(status_code=400, detail="No face detected. Upload a clear front-facing photo.")
|
| 34 |
-
if len(faces) > 1:
|
| 35 |
-
print("β [Error] Multiple faces detected.")
|
| 36 |
-
raise HTTPException(status_code=400, detail="Multiple faces detected. Upload only one face.")
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
# Expand the crop with margin
|
| 42 |
margin = 0.5 # 50% margin
|
|
|
|
| 31 |
if len(faces) == 0:
|
| 32 |
print("β [Error] No face detected.")
|
| 33 |
raise HTTPException(status_code=400, detail="No face detected. Upload a clear front-facing photo.")
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
+
# Select the most centered face
|
| 36 |
+
image_center = (img.shape[1] / 2, img.shape[0] / 2)
|
| 37 |
+
print(f"π― [Image Center] x={image_center[0]}, y={image_center[1]}")
|
| 38 |
+
|
| 39 |
+
def center_distance(face):
|
| 40 |
+
x, y, w, h = face
|
| 41 |
+
face_center = (x + w / 2, y + h / 2)
|
| 42 |
+
dx = face_center[0] - image_center[0]
|
| 43 |
+
dy = face_center[1] - image_center[1]
|
| 44 |
+
return dx**2 + dy**2
|
| 45 |
+
|
| 46 |
+
best_face = sorted(faces, key=center_distance)[0]
|
| 47 |
+
(x, y, w, h) = best_face
|
| 48 |
+
print(f"π [Selected Face] x={x}, y={y}, w={w}, h={h}")
|
| 49 |
|
| 50 |
# Expand the crop with margin
|
| 51 |
margin = 0.5 # 50% margin
|