File size: 4,047 Bytes
9299862
b4090e5
 
d359701
b4090e5
 
 
 
 
 
28e74d8
0b20be1
 
a132be7
d57ec0b
 
6a43dcd
b77606a
 
 
 
6a43dcd
 
 
 
 
 
 
 
a132be7
b4090e5
 
 
 
 
 
 
6693978
28e74d8
172be10
0fd38aa
172be10
 
28e74d8
 
 
172be10
fe7ab5c
172be10
 
28e74d8
 
 
 
b4090e5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9299862
b4090e5
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import streamlit as st
import numpy as np
from PIL import Image
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.vgg19 import preprocess_input
from mtcnn import MTCNN
import joblib
import tensorflow.keras.backend as K
from huggingface_hub import hf_hub_download
import os

# βœ… Safe directory for Spaces
import os

# Ensure these directories exist before writing
# os.makedirs("/app/.streamlit", exist_ok=True)
# os.makedirs("/app/.config/matplotlib", exist_ok=True)
# os.makedirs("/app/.cache", exist_ok=True)
# os.makedirs("/app/.hf_home/hub_cache", exist_ok=True)

# Set environment variables again in case app is restarted
os.environ["STREAMLIT_HOME"] = "/app/.streamlit"
os.environ["XDG_CONFIG_HOME"] = "/app/.config"
os.environ["XDG_CACHE_HOME"] = "/app/.cache"
os.environ["MPLCONFIGDIR"] = "/app/.config/matplotlib"
os.environ["HF_HOME"] = "/app/.hf_home"
os.environ["HF_HUB_CACHE"] = "/app/.hf_home/hub_cache"

# === Custom Metric ===
def pearson_corr(y_true, y_pred):
    x = y_true - K.mean(y_true)
    y = y_pred - K.mean(y_pred)
    return K.sum(x * y) / (K.sqrt(K.sum(K.square(x))) * K.sqrt(K.sum(K.square(y))) + K.epsilon())

# === Load model and scaler ===

model_path = hf_hub_download(
    repo_id="jiinkwan/bmi_application",   # βœ… Your Space ID
    filename="bmi_model_gender.keras",  # βœ… Relative path inside repo
    repo_type="space",                      # βœ… This is crucial
    local_dir="/tmp",                       # Optional: keeps things clean
)

scaler_path = hf_hub_download(
    repo_id="jiinkwan/bmi_application",  # πŸ” This should match your new model repo
    filename="label_scaler.pkl",
    repo_type="space",
    local_dir="/tmp"
)

model = tf.keras.models.load_model(model_path, custom_objects={'pearson_corr': pearson_corr})
scaler = joblib.load(scaler_path)

# === Sidebar ===
st.sidebar.title("Settings")
gender_option = st.sidebar.radio("Select Gender", ['Male', 'Female'])
gender_value = 1.0 if gender_option.lower() == 'male' else 0.0

# === Main UI ===
st.title("Face-based BMI Prediction App")
st.markdown("Upload a facial image to estimate BMI using a deep learning model.")

# === Face Detector ===
detector = MTCNN()

def detect_and_align_face(image):
    image_rgb = np.array(image.convert('RGB'))
    results = detector.detect_faces(image_rgb)
    if not results:
        return None, "❌ No face detected."

    box = results[0]['box']
    keypoints = results[0]['keypoints']
    x, y, w, h = box
    x, y = max(0, x), max(0, y)

    left_eye = keypoints['left_eye']
    right_eye = keypoints['right_eye']
    dx, dy = right_eye[0] - left_eye[0], right_eye[1] - left_eye[1]
    angle = np.degrees(np.arctan2(dy, dx))
    center = np.mean([left_eye, right_eye], axis=0)
    eyes_center = (float(center[0]), float(center[1]))

    M = cv2.getRotationMatrix2D(eyes_center, angle, 1.0)
    aligned = cv2.warpAffine(image_rgb, M, (image_rgb.shape[1], image_rgb.shape[0]), flags=cv2.INTER_CUBIC)

    face = aligned[y:y+h, x:x+w]
    face_resized = cv2.resize(face, (224, 224))
    return face_resized, None

# === Upload ===
uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png", "bmp"], label_visibility="collapsed")

# === Result & Image Display ===
if uploaded_file:
    image = Image.open(uploaded_file)
    face_img, error = detect_and_align_face(image)

    if error:
        st.error(error)
    else:
        img_array = img_to_array(face_img).astype(np.float32)
        img_array = preprocess_input(img_array)
        img_batch = np.expand_dims(img_array, axis=0)
        gender_batch = np.array([[gender_value]], dtype=np.float32)

        bmi_scaled = model.predict([img_batch, gender_batch])[0][0]
        bmi = scaler.inverse_transform([[bmi_scaled]])[0][0]

        st.success(f"🎯 **Predicted BMI: {bmi:.2f}**")
        st.image(image, caption="Uploaded Image", use_container_width=True)
else:
    st.markdown("πŸ“€ *Upload an image above to receive a prediction.*")