File size: 5,180 Bytes
7796991 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 | import os
import time
import numpy as np
import pickle
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Activation, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
# ==============================
# CONFIG
# ==============================
MODEL_NAME = "Tigrigna_convnet"
EPOCHS = 100
BATCH_SIZE = 32
CHANNELS = 1
IMG_HEIGHT, IMG_WIDTH = 28, 28
# ==============================
# LOAD DATA
# ==============================
def load_data(dataset_path):
with open(dataset_path, "rb") as f:
data, labels = pickle.load(f)
print(f"[INFO] dataset loaded. Shape: {data.shape}, Labels: {len(labels)}")
print(f"[INFO] Unique labels: {len(np.unique(labels))}")
# Our data is already normalized (0-1) and has the right shape
# No need to reshape or normalize again
# Convert labels to categorical
num_classes = len(np.unique(labels))
labels = to_categorical(labels, num_classes=num_classes)
# Split the data
X_train, X_test, y_train, y_test = train_test_split(
data, labels, test_size=0.2, random_state=42, stratify=np.argmax(labels, axis=1)
)
return X_train, y_train, X_test, y_test, num_classes
# ==============================
# BUILD MODEL
# ==============================
def build_model(num_classes):
model = Sequential()
# First Convolutional Layer
model.add(Conv2D(32, (3, 3), padding="same", input_shape=(IMG_HEIGHT, IMG_WIDTH, CHANNELS)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second Convolutional Layer
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Fully connected layers
model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
return model
# ==============================
# PLOT TRAINING HISTORY
# ==============================
def plot_model_history(history):
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# Accuracy
axs[0].plot(history.history["accuracy"], label="train")
if "val_accuracy" in history.history:
axs[0].plot(history.history["val_accuracy"], label="val")
axs[0].set_title("Model Accuracy")
axs[0].set_xlabel("Epoch")
axs[0].set_ylabel("Accuracy")
axs[0].legend()
# Loss
axs[1].plot(history.history["loss"], label="train")
if "val_loss" in history.history:
axs[1].plot(history.history["val_loss"], label="val")
axs[1].set_title("Model Loss")
axs[1].set_xlabel("Epoch")
axs[1].set_ylabel("Loss")
axs[1].legend()
plt.savefig('training_history.png')
plt.show()
# ==============================
# TRAIN MODEL
# ==============================
def train_model(model, X_train, y_train, X_test, y_test):
print("[INFO] training model...")
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
# Data augmentation
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
)
datagen.fit(X_train)
# Callbacks
early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)
start = time.time()
history = model.fit(
datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
steps_per_epoch=len(X_train) // BATCH_SIZE,
epochs=EPOCHS,
validation_data=(X_test, y_test),
callbacks=[early_stop, reduce_lr],
verbose=1,
)
end = time.time()
print(f"[INFO] Training finished in {end - start:.2f} seconds")
plot_model_history(history)
_, acc = model.evaluate(X_test, y_test, verbose=0)
print(f"[INFO] Test Accuracy: {acc * 100:.2f}%")
return model, history
# ==============================
# MAIN
# ==============================
def main():
X_train, y_train, X_test, y_test, num_classes = load_data("dataset_pickles/tigrigna_dataset.pickle")
print(f"[INFO] using {num_classes} classes")
print(f"[INFO] Training data shape: {X_train.shape}")
print(f"[INFO] Training labels shape: {y_train.shape}")
model = build_model(num_classes)
model.summary()
model, history = train_model(model, X_train, y_train, X_test, y_test)
os.makedirs("out", exist_ok=True)
model.save("out/Tig_Model.h5")
print("[INFO] Model saved at out/Tig_Model.h5")
if __name__ == "__main__":
main() |