face-emotion-recognition / comparison2.py
kalpit sharma
adding changes
5b6acdd
# import os
# import joblib
# import json
# import numpy as np
# import tensorflow as tf
# from tqdm import tqdm
# from skimage.io import imread
# from skimage.transform import resize
# from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
# from tensorflow.keras.models import model_from_json
# from transformers import CLIPProcessor, CLIPModel
# from torchvision import transforms
# from PIL import Image
# from sklearn.model_selection import train_test_split
# # ========== Constants ==========
# IMG_SIZE = 48
# DATASET_PATH = "train"
# EMOTIONS = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
# MODEL_PATH = '' # Update with the correct path to your model files
# # ========== Feature Extraction ==========
# def extract_hog(img):
# from skimage.feature import hog
# return hog(img, pixels_per_cell=(8, 8), cells_per_block=(2, 2), feature_vector=True)
# def extract_lbp(img):
# from skimage.feature import local_binary_pattern
# lbp = local_binary_pattern(img, P=8, R=1, method="uniform")
# (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, 10), range=(0, 9))
# hist = hist.astype("float")
# hist /= (hist.sum() + 1e-7)
# return hist
# def extract_gabor(img):
# import cv2
# filters = []
# ksize = 31
# for theta in np.arange(0, np.pi, np.pi / 4):
# kernel = cv2.getGaborKernel((ksize, ksize), 4.0, theta, 10.0, 0.5, 0, ktype=cv2.CV_32F)
# filters.append(kernel)
# feats = [np.mean(cv2.filter2D(img, cv2.CV_8UC3, k)) for k in filters]
# return feats
# def extract_features(img):
# features = []
# features.extend(extract_hog(img))
# features.extend(extract_lbp(img))
# features.extend(extract_gabor(img))
# return features
# # ========== Dataset Loader ==========
# def load_dataset_features():
# X, y = [], []
# for label in EMOTIONS:
# folder = os.path.join(DATASET_PATH, label)
# if not os.path.exists(folder): continue
# for file in tqdm(os.listdir(folder), desc=f"Extracting {label}"):
# path = os.path.join(folder, file)
# try:
# img = imread(path, as_gray=True)
# img = resize(img, (IMG_SIZE, IMG_SIZE), anti_aliasing=True)
# feat = extract_features(img)
# X.append(feat)
# y.append(EMOTIONS.index(label))
# except Exception as e:
# print(f"[WARN] Skipped {file}: {e}")
# return np.array(X), np.array(y)
# def load_images():
# images, labels = [], []
# for label in EMOTIONS:
# folder = os.path.join(DATASET_PATH, label)
# if not os.path.exists(folder): continue
# for file in os.listdir(folder):
# path = os.path.join(folder, file)
# try:
# img = imread(path, as_gray=False)
# img = resize(img, (IMG_SIZE, IMG_SIZE), anti_aliasing=True)
# images.append(img)
# labels.append(EMOTIONS.index(label))
# except:
# continue
# return np.array(images), np.array(labels)
# # ========== Evaluation Metrics ==========
# def evaluate_model(y_true, y_pred, model_name):
# print(f"\n[RESULTS] {model_name}")
# print("Accuracy:", accuracy_score(y_true, y_pred))
# print("Precision:", precision_score(y_true, y_pred, average='weighted'))
# print("Recall:", recall_score(y_true, y_pred, average='weighted'))
# print("F1 Score:", f1_score(y_true, y_pred, average='weighted'))
# print("Confusion Matrix:\n", confusion_matrix(y_true, y_pred))
# # ========== Classical Models ==========
# # def evaluate_classical_models():
# # X_test, y_test = load_dataset_features()
# # for model_file in ["k-nn_model.joblib", "logistic_regression_model.joblib", "random_forest_model.joblib", "svm_model.joblib"]:
# # model = joblib.load(model_file)
# # y_pred = model.predict(X_test)
# # evaluate_model(y_test, y_pred, model_file)
# def evaluate_classical_models():
# print("\n[INFO] Evaluating classical ML models...\n")
# X, y = load_dataset_features()
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# model_files = {
# 'KNN': 'k-nn_model.joblib',
# 'Logistic Regression': 'logistic_regression_model.joblib',
# 'Random Forest': 'random_forest_model.joblib',
# 'SVM': 'svm_model.joblib',
# }
# for name, file in model_files.items():
# print(f"\n--- {name} ---")
# model_path = os.path.join(MODEL_PATH, file)
# model = joblib.load(model_path)
# expected_input_size = model.n_features_in_
# if X_test.shape[1] != expected_input_size:
# print(f"[WARNING] Feature size mismatch for {name}: "
# f"Expected {expected_input_size}, Got {X_test.shape[1]}. Skipping...")
# continue
# y_pred = model.predict(X_test)
# acc = accuracy_score(y_test, y_pred)
# prec = precision_score(y_test, y_pred, average='weighted', zero_division=0)
# rec = recall_score(y_test, y_pred, average='weighted', zero_division=0)
# f1 = f1_score(y_test, y_pred, average='weighted', zero_division=0)
# print(f"Accuracy: {acc:.4f}")
# print(f"Precision: {prec:.4f}")
# print(f"Recall: {rec:.4f}")
# print(f"F1 Score: {f1:.4f}")
# print("Confusion Matrix:")
# print(confusion_matrix(y_test, y_pred))
# # ========== CNN/RNN Models ==========
# def evaluate_keras_model(model_path, X_test, y_test, model_name):
# model = tf.keras.models.load_model(model_path)
# y_pred = np.argmax(model.predict(X_test), axis=1)
# evaluate_model(y_test, y_pred, model_name)
# def evaluate_json_model(json_path, weights_path, X_test, y_test, model_name):
# with open(json_path, 'r') as f:
# model = model_from_json(f.read())
# model.load_weights(weights_path)
# y_pred = np.argmax(model.predict(X_test), axis=1)
# evaluate_model(y_test, y_pred, model_name)
# # ========== ViT/CLIP Model ==========
# def evaluate_clip_model():
# processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
# model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
# X_test, y_test = load_images()
# y_pred = []
# for i in tqdm(range(len(X_test))):
# img = Image.fromarray((X_test[i] * 255).astype(np.uint8))
# text = [f"a face showing {emotion} emotion" for emotion in EMOTIONS]
# inputs = processor(text=text, images=img, return_tensors="pt", padding=True)
# outputs = model(**inputs)
# logits_per_image = outputs.logits_per_image
# pred = logits_per_image.argmax().item()
# y_pred.append(pred)
# evaluate_model(y_test, y_pred, "ViT-B/32 (CLIP)")
# # ========== Run All ==========
# if __name__ == '__main__':
# # evaluate_classical_models()
# X_raw, y_raw = load_images()
# X_raw = X_raw.reshape(-1, IMG_SIZE, IMG_SIZE)
# # X_raw = X_raw.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
# evaluate_keras_model("emotion_detector_model.h5", X_raw, y_raw, "CNN Emotion Model")
# evaluate_keras_model("cnn_rnn_model_from_dir.h5", X_raw, y_raw, "CNN + RNN Emotion Model")
# evaluate_json_model("model_cleaned.json", "model.weights.h5", X_raw, y_raw, "Custom JSON + Weights Model")
# # evaluate_clip_model()