Spaces:
Running
Running
File size: 8,007 Bytes
1e4fc28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
# app/model_loader.py
import os
import json
from pathlib import Path
from typing import Tuple, Any, Optional, Dict
DEFAULT_LABELS = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
# HardlyHumans model uses 8 emotions (adds contempt)
HARDLYHUMANS_LABELS = ['anger', 'contempt', 'sad', 'happy', 'neutral', 'disgust', 'fear', 'surprise']
def load_emotion_model(force_model: str = None):
"""
Load emotion detection model. Supports both Keras and Vision Transformer models.
Args:
force_model: 'base' to force base model, 'fine-tuned' to force fine-tuned, None for auto
Returns: (model_dict, labels, model_version, model_type)
model_dict: For ViT: {'model': model, 'processor': processor, 'type': 'vit'}
For Keras: model object
model_type: 'keras' or 'vit' (Vision Transformer)
"""
this_dir = Path(__file__).resolve().parent # app/
repo_root = this_dir.parent # project root (/app in container)
models_dir = repo_root / "models"
fine_tuned_dir = models_dir / "fine_tuned_vit"
# Try to load fine-tuned model first (trained on FER2013 for better happy/surprise detection)
# Unless force_model is 'base'
if force_model != 'base':
try:
from transformers import AutoImageProcessor, AutoModelForImageClassification
# Check if fine-tuned model exists
if fine_tuned_dir.exists() and (fine_tuned_dir / "model.safetensors").exists():
print(f"[MODEL] 🎯 Loading Asripa model (FER2013 Enhanced): {fine_tuned_dir}")
print(f"[MODEL] Accuracy: 78.26% (fine-tuned on FER2013)")
print(f"[MODEL] Optimized for happy/surprise detection!")
processor = AutoImageProcessor.from_pretrained(
str(fine_tuned_dir),
local_files_only=True
)
model = AutoModelForImageClassification.from_pretrained(
str(fine_tuned_dir),
local_files_only=True,
low_cpu_mem_usage=True
)
# Get labels from model config
raw_labels = [model.config.id2label[i] for i in range(len(model.config.id2label))]
print(f"[MODEL] Raw labels from model config: {raw_labels}")
# Normalize label names to match our format (lowercase, standardize)
label_map = {
'anger': 'angry',
'disgust': 'disgust',
'fear': 'fear',
'happy': 'happy',
'neutral': 'neutral',
'sad': 'sad',
'surprise': 'surprise',
'contempt': 'contempt'
}
labels = [label_map.get(label.lower(), label.lower()) for label in raw_labels]
print(f"[MODEL] Normalized labels: {labels}")
print(f"[MODEL] ✅ Fine-tuned ViT model loaded successfully!")
return {
'model': model,
'processor': processor,
'type': 'vit'
}, labels, "asripa-vit-78.26%", 'vit'
else:
if force_model == 'fine-tuned':
print(f"[MODEL] ⚠️ Fine-tuned model requested but not found!")
raise FileNotFoundError("Fine-tuned model not found")
print(f"[MODEL] Fine-tuned model not found, using base model...")
except Exception as e:
if force_model == 'fine-tuned':
print(f"[MODEL] ⚠️ Failed to load fine-tuned model: {e}")
raise
print(f"[MODEL] ⚠️ Failed to load fine-tuned model: {e}")
print(f"[MODEL] Falling back to base HardlyHumans model...")
# Fall back to base HardlyHumans ViT model (best accuracy - 92.2%)
try:
from transformers import AutoImageProcessor, AutoModelForImageClassification
model_id = "HardlyHumans/Facial-expression-detection"
print(f"[MODEL] Loading Base Model: {model_id}")
print(f"[MODEL] Accuracy: 92.2% - BASE MODEL")
print(f"[MODEL] Downloading from HuggingFace if not cached...")
# Load from HuggingFace - will download and cache automatically
# Use low_cpu_mem_usage to reduce memory footprint during loading
processor = AutoImageProcessor.from_pretrained(
model_id,
cache_dir=str(models_dir),
local_files_only=False # Allow download if not cached
)
model = AutoModelForImageClassification.from_pretrained(
model_id,
cache_dir=str(models_dir),
local_files_only=False, # Allow download if not cached
low_cpu_mem_usage=True # Reduce memory usage during loading
)
# Get labels from model config
raw_labels = [model.config.id2label[i] for i in range(len(model.config.id2label))]
print(f"[MODEL] Raw labels from model config: {raw_labels}")
print(f"[MODEL] Label mapping (id2label): {model.config.id2label}")
# Normalize label names to match our format (lowercase, standardize)
label_map = {
'anger': 'angry',
'disgust': 'disgust',
'fear': 'fear',
'happy': 'happy',
'neutral': 'neutral',
'sad': 'sad',
'surprise': 'surprise',
'contempt': 'contempt' # New emotion in this model
}
labels = [label_map.get(label.lower(), label.lower()) for label in raw_labels]
print(f"[MODEL] Normalized labels: {labels}")
print(f"[MODEL] ✅ ViT model loaded successfully!")
return {
'model': model,
'processor': processor,
'type': 'vit'
}, labels, "base-vit-92.2%", 'vit'
except ImportError as e:
print(f"[MODEL] ❌ transformers library not installed: {e}")
print("[MODEL] Install with: pip install transformers torch")
print("[MODEL] Falling back to Keras model...")
except Exception as e:
print(f"[MODEL] ❌ Failed to load ViT model: {e}")
print(f"[MODEL] Error type: {type(e).__name__}")
print(f"[MODEL] Error message: {str(e)}")
import traceback
print(f"[MODEL] Full traceback:")
print(traceback.format_exc())
print("[MODEL] ⚠️ Falling back to Keras model (lower accuracy)...")
# Fall back to Keras models
try:
from tensorflow.keras.models import load_model
except ImportError:
raise ImportError("Neither transformers nor tensorflow.keras available. Install one of them.")
candidate_names = ["emotion_model.keras", "emotion_model.h5", "emotion_model.hdf5"]
model_path = None
for name in candidate_names:
p = models_dir / name
if p.exists():
model_path = str(p)
break
if model_path is None:
raise FileNotFoundError(f"No model file found in {models_dir}. Please add emotion_model.keras or emotion_model.h5")
print(f"[MODEL] Loading Keras model: {model_path}")
model = load_model(model_path)
# Load labels if available
labels_path = models_dir / "labels.json"
labels = DEFAULT_LABELS
if labels_path.exists():
try:
with labels_path.open("r", encoding="utf-8") as f:
labels = json.load(f)
except Exception:
labels = DEFAULT_LABELS
# Model version
version_path = models_dir / "MODEL_VERSION.txt"
version = "v_unknown"
if os.path.exists(version_path):
try:
with open(version_path, "r", encoding="utf-8") as f:
version = f.read().strip()
except Exception:
pass
return model, labels, version, 'keras'
|