import streamlit as st import torch import torch.nn as nn from torchvision import models, transforms from PIL import Image import numpy as np import cv2 import streamlit.components.v1 as components # --- GOOGLE ANALYTICS INJECTION (THE NUCLEAR FIX) --- GA_ID = "G-JRWLD5D22V" ga_script = f""" """ def inject_ga(): # Locate the streamlit index.html file import pathlib import shutil # Find where streamlit is installed streamlit_path = pathlib.Path(st.__file__).parent index_path = streamlit_path / "static" / "index.html" # Read the original file with open(index_path, 'r') as f: html_content = f.read() # Check if GA is already injected to avoid duplicates if GA_ID not in html_content: # Inject the script into the tag new_html = html_content.replace('', f'{ga_script}') # Save the modified file with open(index_path, 'w') as f: f.write(new_html) # Run the injection function try: inject_ga() except Exception as e: # If file permissions fail (rare on HF), fallback to standard st.html print(f"GA Injection Failed: {e}") # --- 1. CONFIGURATION & STYLING --- st.set_page_config( page_title="Aesthetix AI", page_icon="✨", layout="centered", initial_sidebar_state="collapsed" ) # Custom CSS for Premium White/Clean Theme st.markdown(""" """, unsafe_allow_html=True) # Header st.markdown("

✨ Aesthetix AI

", unsafe_allow_html=True) st.markdown("

Facial Symmetry & Feature Analysis Engine

", unsafe_allow_html=True) # --- 2. MODEL LOADING --- @st.cache_resource def load_models(): device = torch.device("cpu") # Rating Model (ResNet18) rater = models.resnet18(weights=None) num_ftrs = rater.fc.in_features rater.fc = nn.Linear(num_ftrs, 1) try: rater.load_state_dict(torch.load("best_face_rater_colab.pth", map_location=device)) except FileNotFoundError: st.error("⚠️ Model file missing. Upload 'best_face_rater_colab.pth'.") return None, None rater.eval() # Segmentation Model (DeepLabV3) seg_model = models.segmentation.deeplabv3_resnet50(weights='DEFAULT') seg_model.eval() return rater, seg_model rater_model, seg_model = load_models() # --- 3. PROCESSING LOGIC --- def isolate_face_pixels(image): # Prepare for DeepLabV3 seg_transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) input_tensor = seg_transform(image).unsqueeze(0) with torch.no_grad(): output = seg_model(input_tensor)['out'][0] output_predictions = output.argmax(0) # Class 15 is Person mask = (output_predictions == 15).byte().numpy() image_resized = image.resize((224, 224)) img_np = np.array(image_resized) # Apply Mask (Black Background) mask_3d = np.stack([mask, mask, mask], axis=2) foreground = img_np * mask_3d return Image.fromarray(foreground) def crop_to_face_strict(image_pil): img_np = np.array(image_pil) if len(img_np.shape) == 2: img_np = cv2.cvtColor(img_np, cv2.COLOR_GRAY2RGB) # Haar Cascade face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') gray = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) if len(faces) == 0: return image_pil, False # Largest Face x, y, w, h = max(faces, key=lambda f: f[2] * f[3]) # Margin logic margin = int(h * 0.20) x = max(0, x - margin) y = max(0, y - margin) w = min(img_np.shape[1] - x, w + 2*margin) h = min(img_np.shape[0] - y, h + 2*margin) return image_pil.crop((x, y, x+w, y+h)), True # Grad-CAM Setup gradients = None activations = None def backward_hook(module, grad_input, grad_output): global gradients gradients = grad_output[0] def forward_hook(module, input, output): global activations activations = output def generate_heatmap(model, input_tensor): target_layer = model.layer4[-1] handle_f = target_layer.register_forward_hook(forward_hook) handle_b = target_layer.register_full_backward_hook(backward_hook) output = model(input_tensor) model.zero_grad() output.backward() pooled_gradients = torch.mean(gradients, dim=[0, 2, 3]) for i in range(512): activations[:, i, :, :] *= pooled_gradients[i] heatmap = torch.mean(activations, dim=1).squeeze() heatmap = np.maximum(heatmap.detach().numpy(), 0) if np.max(heatmap) > 0: heatmap /= np.max(heatmap) handle_f.remove(); handle_b.remove() return heatmap def overlay_heatmap(heatmap, original_image): heatmap = cv2.resize(heatmap, (original_image.width, original_image.height)) heatmap = np.uint8(255 * heatmap) heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) img_np = np.array(original_image) img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) superimposed_img = heatmap * 0.4 + img_np return Image.fromarray(cv2.cvtColor(np.uint8(superimposed_img), cv2.COLOR_BGR2RGB)) # --- 4. MAIN INTERFACE --- uploaded_file = st.file_uploader("Upload a clear portrait", type=["jpg", "jpeg", "png"]) if uploaded_file is not None and rater_model: image = Image.open(uploaded_file).convert('RGB') # Processing Flow with st.spinner("Isolating facial geometry..."): cropped_img, found = crop_to_face_strict(image) final_input = isolate_face_pixels(cropped_img) # UI Columns col1, col2 = st.columns(2) with col1: st.image(image, caption='Original', use_column_width=True) with col2: st.image(final_input, caption='AI Analysis View', use_column_width=True) st.write("") if st.button('Calculate Score'): progress_bar = st.progress(0) # 1. Transform transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) input_tensor = transform(final_input).unsqueeze(0) input_tensor.requires_grad = True progress_bar.progress(60) # 2. Score with torch.no_grad(): output = rater_model(input_tensor) score = output.item() score = max(1.0, min(5.0, score)) # 3. Heatmap (Visual Reasoning) heatmap = generate_heatmap(rater_model, input_tensor) overlay = overlay_heatmap(heatmap, final_input) progress_bar.progress(100) # --- RESULTS DISPLAY --- st.markdown("
", unsafe_allow_html=True) # Determine Color Code if score >= 4.0: score_color = "#4CAF50" # Green elif score >= 3.0: score_color = "#FF9800" # Orange else: score_color = "#F44336" # Red # Metric Card HTML st.markdown(f"""

Aesthetic Rating

{score:.2f}

out of 5.0

""", unsafe_allow_html=True) st.write("") st.image(overlay, caption='Feature Activation Map (Visual Reasoning)', use_column_width=True) if score >= 4.0: st.success("Exceptional features detected. High symmetry and proportion.") st.balloons() elif score >= 3.0: st.info("Strong features detected. Above average structure.") else: st.warning("Average structure detected. Lighting or angle may affect result.")