Spaces:
Runtime error
Runtime error
Create train_model.py
Browse files- train_model.py +81 -0
train_model.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train_model.py
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
from glob import glob
|
| 5 |
+
from sklearn.preprocessing import LabelEncoder
|
| 6 |
+
from sklearn.model_selection import train_test_split
|
| 7 |
+
from tensorflow.keras.models import Sequential
|
| 8 |
+
from tensorflow.keras.layers import LSTM, Dense, Dropout, BatchNormalization
|
| 9 |
+
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
|
| 10 |
+
import joblib
|
| 11 |
+
|
| 12 |
+
DATA_DIR = "gesture_data"
|
| 13 |
+
SEQ_LEN = 30
|
| 14 |
+
MODEL_OUT = "gesture_lstm.h5"
|
| 15 |
+
LABELS_OUT = "labels.joblib"
|
| 16 |
+
|
| 17 |
+
def load_data(data_dir):
|
| 18 |
+
X = []
|
| 19 |
+
y = []
|
| 20 |
+
for label in sorted(os.listdir(data_dir)):
|
| 21 |
+
p = os.path.join(data_dir, label)
|
| 22 |
+
if not os.path.isdir(p): continue
|
| 23 |
+
files = glob(os.path.join(p, "*.npz"))
|
| 24 |
+
for f in files:
|
| 25 |
+
d = np.load(f)['data']
|
| 26 |
+
if d.shape[0] != SEQ_LEN:
|
| 27 |
+
continue
|
| 28 |
+
X.append(d) # shape (seq_len, features)
|
| 29 |
+
y.append(label)
|
| 30 |
+
X = np.array(X)
|
| 31 |
+
y = np.array(y)
|
| 32 |
+
return X, y
|
| 33 |
+
|
| 34 |
+
def build_model(input_shape, num_classes):
|
| 35 |
+
model = Sequential([
|
| 36 |
+
LSTM(128, return_sequences=True, input_shape=input_shape),
|
| 37 |
+
Dropout(0.3),
|
| 38 |
+
LSTM(64),
|
| 39 |
+
BatchNormalization(),
|
| 40 |
+
Dropout(0.3),
|
| 41 |
+
Dense(64, activation="relu"),
|
| 42 |
+
Dense(num_classes, activation="softmax")
|
| 43 |
+
])
|
| 44 |
+
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
|
| 45 |
+
return model
|
| 46 |
+
|
| 47 |
+
def main():
|
| 48 |
+
X, y = load_data(DATA_DIR)
|
| 49 |
+
print("Loaded X:", X.shape, "y:", y.shape)
|
| 50 |
+
le = LabelEncoder()
|
| 51 |
+
y_enc = le.fit_transform(y)
|
| 52 |
+
joblib.dump(le, LABELS_OUT)
|
| 53 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y_enc, test_size=0.2, random_state=42, stratify=y_enc)
|
| 54 |
+
# normalize coords per sample (optional)
|
| 55 |
+
# Flatten features along coordinates scale? We'll standardize by sample mean/std
|
| 56 |
+
# Here we scale coords to zero mean unit std per sample for robustness
|
| 57 |
+
def normalize_samples(arr):
|
| 58 |
+
arr2 = arr.copy()
|
| 59 |
+
for i in range(arr2.shape[0]):
|
| 60 |
+
s = arr2[i]
|
| 61 |
+
mean = s.mean(axis=0)
|
| 62 |
+
std = s.std(axis=0) + 1e-8
|
| 63 |
+
arr2[i] = (s - mean) / std
|
| 64 |
+
return arr2
|
| 65 |
+
X_train = normalize_samples(X_train)
|
| 66 |
+
X_test = normalize_samples(X_test)
|
| 67 |
+
input_shape = (X_train.shape[1], X_train.shape[2]) # (seq_len, features)
|
| 68 |
+
model = build_model(input_shape, len(le.classes_))
|
| 69 |
+
print(model.summary())
|
| 70 |
+
ckpt = ModelCheckpoint(MODEL_OUT, save_best_only=True, monitor="val_loss", verbose=1)
|
| 71 |
+
es = EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=True)
|
| 72 |
+
history = model.fit(X_train, y_train, validation_data=(X_test, y_test),
|
| 73 |
+
epochs=100, batch_size=16, callbacks=[ckpt, es])
|
| 74 |
+
# final evaluation
|
| 75 |
+
loss, acc = model.evaluate(X_test, y_test)
|
| 76 |
+
print("Test loss/acc:", loss, acc)
|
| 77 |
+
print("Saved model to", MODEL_OUT)
|
| 78 |
+
print("Saved labels to", LABELS_OUT)
|
| 79 |
+
|
| 80 |
+
if __name__ == "__main__":
|
| 81 |
+
main()
|