File size: 2,938 Bytes
33096a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# train_model.py
import numpy as np
import os
from glob import glob
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, BatchNormalization
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import joblib

DATA_DIR = "gesture_data"
SEQ_LEN = 30
MODEL_OUT = "gesture_lstm.h5"
LABELS_OUT = "labels.joblib"

def load_data(data_dir):
    X = []
    y = []
    for label in sorted(os.listdir(data_dir)):
        p = os.path.join(data_dir, label)
        if not os.path.isdir(p): continue
        files = glob(os.path.join(p, "*.npz"))
        for f in files:
            d = np.load(f)['data']
            if d.shape[0] != SEQ_LEN:
                continue
            X.append(d)  # shape (seq_len, features)
            y.append(label)
    X = np.array(X)
    y = np.array(y)
    return X, y

def build_model(input_shape, num_classes):
    model = Sequential([
        LSTM(128, return_sequences=True, input_shape=input_shape),
        Dropout(0.3),
        LSTM(64),
        BatchNormalization(),
        Dropout(0.3),
        Dense(64, activation="relu"),
        Dense(num_classes, activation="softmax")
    ])
    model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
    return model

def main():
    X, y = load_data(DATA_DIR)
    print("Loaded X:", X.shape, "y:", y.shape)
    le = LabelEncoder()
    y_enc = le.fit_transform(y)
    joblib.dump(le, LABELS_OUT)
    X_train, X_test, y_train, y_test = train_test_split(X, y_enc, test_size=0.2, random_state=42, stratify=y_enc)
    # normalize coords per sample (optional)
    # Flatten features along coordinates scale? We'll standardize by sample mean/std
    # Here we scale coords to zero mean unit std per sample for robustness
    def normalize_samples(arr):
        arr2 = arr.copy()
        for i in range(arr2.shape[0]):
            s = arr2[i]
            mean = s.mean(axis=0)
            std = s.std(axis=0) + 1e-8
            arr2[i] = (s - mean) / std
        return arr2
    X_train = normalize_samples(X_train)
    X_test = normalize_samples(X_test)
    input_shape = (X_train.shape[1], X_train.shape[2])  # (seq_len, features)
    model = build_model(input_shape, len(le.classes_))
    print(model.summary())
    ckpt = ModelCheckpoint(MODEL_OUT, save_best_only=True, monitor="val_loss", verbose=1)
    es = EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=True)
    history = model.fit(X_train, y_train, validation_data=(X_test, y_test),
                        epochs=100, batch_size=16, callbacks=[ckpt, es])
    # final evaluation
    loss, acc = model.evaluate(X_test, y_test)
    print("Test loss/acc:", loss, acc)
    print("Saved model to", MODEL_OUT)
    print("Saved labels to", LABELS_OUT)

if __name__ == "__main__":
    main()