|
|
|
|
|
|
|
|
import numpy as np |
|
|
from skimage.io import imread |
|
|
from skimage.transform import resize |
|
|
from skimage.feature import hog, local_binary_pattern |
|
|
from skimage.filters import gabor |
|
|
from sklearn.model_selection import train_test_split |
|
|
from sklearn.svm import SVC |
|
|
from sklearn.ensemble import RandomForestClassifier |
|
|
from sklearn.neighbors import KNeighborsClassifier |
|
|
from sklearn.linear_model import LogisticRegression |
|
|
from sklearn.metrics import classification_report |
|
|
import os |
|
|
from tqdm import tqdm |
|
|
import joblib |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
IMG_SIZE = 48 |
|
|
DATASET_PATH = "./train" |
|
|
EMOTIONS = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise'] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_hog(img): |
|
|
return hog(img, pixels_per_cell=(8, 8), cells_per_block=(2, 2)) |
|
|
|
|
|
def extract_lbp(img): |
|
|
lbp = local_binary_pattern(img, P=8, R=1, method="uniform") |
|
|
(hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, 59)) |
|
|
hist = hist.astype("float") |
|
|
hist /= (hist.sum() + 1e-6) |
|
|
return hist |
|
|
|
|
|
def extract_gabor(img): |
|
|
filt_real, _ = gabor(img, frequency=0.6) |
|
|
return filt_real.ravel()[::64] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_dataset(): |
|
|
data = [] |
|
|
labels = [] |
|
|
print("[INFO] Loading dataset and extracting features...") |
|
|
|
|
|
for label in EMOTIONS: |
|
|
folder = os.path.join(DATASET_PATH, label) |
|
|
if not os.path.exists(folder): |
|
|
continue |
|
|
for file in tqdm(os.listdir(folder), desc=label): |
|
|
path = os.path.join(folder, file) |
|
|
try: |
|
|
img = imread(path, as_gray=True) |
|
|
img = resize(img, (IMG_SIZE, IMG_SIZE), anti_aliasing=True) |
|
|
except Exception as e: |
|
|
print(f"[WARNING] Skipped {file}: {e}") |
|
|
continue |
|
|
|
|
|
feat = [] |
|
|
feat.extend(extract_hog(img)) |
|
|
feat.extend(extract_lbp(img)) |
|
|
feat.extend(extract_gabor(img)) |
|
|
|
|
|
data.append(feat) |
|
|
labels.append(EMOTIONS.index(label)) |
|
|
|
|
|
return np.array(data), np.array(labels) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train_and_evaluate(X, y): |
|
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
|
|
|
|
|
classifiers = { |
|
|
"SVM": SVC(kernel='linear'), |
|
|
"Random Forest": RandomForestClassifier(n_estimators=100), |
|
|
"k-NN": KNeighborsClassifier(n_neighbors=5), |
|
|
"Logistic Regression": LogisticRegression(max_iter=1000) |
|
|
} |
|
|
|
|
|
for name, clf in classifiers.items(): |
|
|
print(f"\n[INFO] Training {name}...") |
|
|
clf.fit(X_train, y_train) |
|
|
preds = clf.predict(X_test) |
|
|
print(f"\n[RESULTS] {name} Classification Report") |
|
|
print(classification_report(y_test, preds, target_names=EMOTIONS)) |
|
|
|
|
|
model_filename = f'{name.lower().replace(" ", "_")}_model.joblib' |
|
|
print(f"[INFO] Saving {name} model to {model_filename}...") |
|
|
joblib.dump(clf, model_filename) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
X, y = load_dataset() |
|
|
train_and_evaluate(X, y) |
|
|
|