Pneumonia_Detection_Systam / train_model.py
saad1BM's picture
Upload 4 files
f8f24a1 verified
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import matplotlib.pyplot as plt
DATASET_DIR = 'archive/chest_xray'
TRAIN_DIR = os.path.join(DATASET_DIR, 'train')
VAL_DIR = os.path.join(DATASET_DIR, 'val')
TEST_DIR = os.path.join(DATASET_DIR, 'test')
MODEL_FILENAME = 'pneumonia_detection_model.keras'
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 32
EPOCHS = 10
NUM_CLASSES = 2 # Pneumonia aur Normal
CLASS_NAMES = ['NORMAL', 'PNEUMONIA']
print(f"TensorFlow Version: {tf.__version__}")
print(f"Looking for data in: {TRAIN_DIR}")
train_datagen = ImageDataGenerator(
rescale=1./255, # Normalization
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest'
)
val_test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
TRAIN_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
classes=CLASS_NAMES # Class order define karna
)
validation_generator = val_test_datagen.flow_from_directory(
VAL_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
classes=CLASS_NAMES
)
test_generator = val_test_datagen.flow_from_directory(
TEST_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode='categorical',
classes=CLASS_NAMES,
shuffle=False # Test data ko shuffle nahi karna hai
)
base_model = MobileNetV2(
input_shape=IMAGE_SIZE + (3,),
include_top=False,
weights='imagenet'
)
base_model.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x) # Overfitting kam karne ke liye
predictions = Dense(NUM_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print("\n--- Starting Model Training ---")
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // BATCH_SIZE,
epochs=EPOCHS,
validation_data=validation_generator,
validation_steps=validation_generator.samples // BATCH_SIZE
)
print("\n--- Evaluating Model on Test Data ---")
loss, acc = model.evaluate(test_generator, steps=test_generator.samples // BATCH_SIZE)
print(f"Final Test Accuracy: {acc*100:.2f}%")
model.save(MODEL_FILENAME)
print(f"\nModel saved successfully as {MODEL_FILENAME}")
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()