bmi_application / src /streamlit_app.py
jiinkwan's picture
Upload streamlit_app.py
b77606a verified
import streamlit as st
import numpy as np
from PIL import Image
import cv2
import tensorflow as tf
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.vgg19 import preprocess_input
from mtcnn import MTCNN
import joblib
import tensorflow.keras.backend as K
from huggingface_hub import hf_hub_download
import os
# βœ… Safe directory for Spaces
import os
# Ensure these directories exist before writing
# os.makedirs("/app/.streamlit", exist_ok=True)
# os.makedirs("/app/.config/matplotlib", exist_ok=True)
# os.makedirs("/app/.cache", exist_ok=True)
# os.makedirs("/app/.hf_home/hub_cache", exist_ok=True)
# Set environment variables again in case app is restarted
os.environ["STREAMLIT_HOME"] = "/app/.streamlit"
os.environ["XDG_CONFIG_HOME"] = "/app/.config"
os.environ["XDG_CACHE_HOME"] = "/app/.cache"
os.environ["MPLCONFIGDIR"] = "/app/.config/matplotlib"
os.environ["HF_HOME"] = "/app/.hf_home"
os.environ["HF_HUB_CACHE"] = "/app/.hf_home/hub_cache"
# === Custom Metric ===
def pearson_corr(y_true, y_pred):
x = y_true - K.mean(y_true)
y = y_pred - K.mean(y_pred)
return K.sum(x * y) / (K.sqrt(K.sum(K.square(x))) * K.sqrt(K.sum(K.square(y))) + K.epsilon())
# === Load model and scaler ===
model_path = hf_hub_download(
repo_id="jiinkwan/bmi_application", # βœ… Your Space ID
filename="bmi_model_gender.keras", # βœ… Relative path inside repo
repo_type="space", # βœ… This is crucial
local_dir="/tmp", # Optional: keeps things clean
)
scaler_path = hf_hub_download(
repo_id="jiinkwan/bmi_application", # πŸ” This should match your new model repo
filename="label_scaler.pkl",
repo_type="space",
local_dir="/tmp"
)
model = tf.keras.models.load_model(model_path, custom_objects={'pearson_corr': pearson_corr})
scaler = joblib.load(scaler_path)
# === Sidebar ===
st.sidebar.title("Settings")
gender_option = st.sidebar.radio("Select Gender", ['Male', 'Female'])
gender_value = 1.0 if gender_option.lower() == 'male' else 0.0
# === Main UI ===
st.title("Face-based BMI Prediction App")
st.markdown("Upload a facial image to estimate BMI using a deep learning model.")
# === Face Detector ===
detector = MTCNN()
def detect_and_align_face(image):
image_rgb = np.array(image.convert('RGB'))
results = detector.detect_faces(image_rgb)
if not results:
return None, "❌ No face detected."
box = results[0]['box']
keypoints = results[0]['keypoints']
x, y, w, h = box
x, y = max(0, x), max(0, y)
left_eye = keypoints['left_eye']
right_eye = keypoints['right_eye']
dx, dy = right_eye[0] - left_eye[0], right_eye[1] - left_eye[1]
angle = np.degrees(np.arctan2(dy, dx))
center = np.mean([left_eye, right_eye], axis=0)
eyes_center = (float(center[0]), float(center[1]))
M = cv2.getRotationMatrix2D(eyes_center, angle, 1.0)
aligned = cv2.warpAffine(image_rgb, M, (image_rgb.shape[1], image_rgb.shape[0]), flags=cv2.INTER_CUBIC)
face = aligned[y:y+h, x:x+w]
face_resized = cv2.resize(face, (224, 224))
return face_resized, None
# === Upload ===
uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png", "bmp"], label_visibility="collapsed")
# === Result & Image Display ===
if uploaded_file:
image = Image.open(uploaded_file)
face_img, error = detect_and_align_face(image)
if error:
st.error(error)
else:
img_array = img_to_array(face_img).astype(np.float32)
img_array = preprocess_input(img_array)
img_batch = np.expand_dims(img_array, axis=0)
gender_batch = np.array([[gender_value]], dtype=np.float32)
bmi_scaled = model.predict([img_batch, gender_batch])[0][0]
bmi = scaler.inverse_transform([[bmi_scaled]])[0][0]
st.success(f"🎯 **Predicted BMI: {bmi:.2f}**")
st.image(image, caption="Uploaded Image", use_container_width=True)
else:
st.markdown("πŸ“€ *Upload an image above to receive a prediction.*")