Spaces:
Sleeping
Sleeping
| import os | |
| import cv2 | |
| import numpy as np | |
| import tensorflow as tf | |
| from sklearn.utils import shuffle | |
| from huggingface_hub import push_to_hub_keras | |
| from sklearn.model_selection import train_test_split | |
| from sklearn.utils.class_weight import compute_class_weight | |
| from tensorflow.keras.preprocessing.image import ImageDataGenerator | |
| # Environment variable for Hugging Face token | |
| sac = os.getenv('accesstoken') | |
| class_names = ['buildings', 'forest', 'glacier'] | |
| class_names_label = {class_name: i for i, class_name in enumerate(class_names)} | |
| IMAGE_SIZE = (150, 150) | |
| def load_data(): | |
| DIRECTORY = "imgdataset" | |
| CATEGORY = ["seg_train", "seg_test"] | |
| output = [] | |
| for category in CATEGORY: | |
| path = os.path.join(DIRECTORY, category) | |
| images = [] | |
| labels = [] | |
| print("Loading {}".format(category)) | |
| for folder in os.listdir(path): | |
| label = class_names_label[folder] | |
| for file in os.listdir(os.path.join(path, folder)): | |
| img_path = os.path.join(os.path.join(path, folder), file) | |
| image = cv2.imread(img_path) | |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| image = cv2.resize(image, IMAGE_SIZE) | |
| images.append(image) | |
| labels.append(label) | |
| images = np.array(images, dtype='float32') | |
| labels = np.array(labels, dtype='int32') | |
| output.append((images, labels)) | |
| return output | |
| (train_images, train_labels), (test_images, test_labels) = load_data() | |
| train_images, train_labels = shuffle(train_images, train_labels, random_state=25) | |
| # Split the training set into training and validation sets | |
| train_images, val_images, train_labels, val_labels = train_test_split( | |
| train_images, train_labels, test_size=0.2, random_state=42 | |
| ) | |
| # Data Augmentation | |
| datagen = ImageDataGenerator( | |
| rotation_range=20, | |
| width_shift_range=0.2, | |
| height_shift_range=0.2, | |
| shear_range=0.2, | |
| zoom_range=0.2, | |
| horizontal_flip=True, | |
| fill_mode='nearest' | |
| ) | |
| # Calculate class weights to handle data imbalance | |
| # class_weights = compute_class_weight('balanced', np.unique(train_labels), train_labels) | |
| class_weights = compute_class_weight(class_weight='balanced',classes=np.unique(train_labels),y= train_labels) | |
| # Model Architecture | |
| model = tf.keras.models.Sequential([ | |
| tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)), | |
| tf.keras.layers.MaxPooling2D(2, 2), | |
| tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), | |
| tf.keras.layers.MaxPooling2D(2, 2), | |
| tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), | |
| tf.keras.layers.MaxPooling2D(2, 2), | |
| tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), | |
| tf.keras.layers.MaxPooling2D(2, 2), | |
| tf.keras.layers.Flatten(), | |
| tf.keras.layers.Dense(512, activation='relu'), | |
| tf.keras.layers.Dense(3, activation='softmax') | |
| ]) | |
| # Model Compilation | |
| model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) | |
| # Model Training with Data Augmentation | |
| batch_size = 32 | |
| epochs = 10 | |
| history = model.fit( | |
| datagen.flow(train_images, train_labels, batch_size=batch_size), | |
| steps_per_epoch=len(train_images) // batch_size, | |
| epochs=epochs, | |
| validation_data=(val_images, val_labels), | |
| class_weight=dict(enumerate(class_weights)) | |
| ) | |
| # Model Evaluation | |
| model.evaluate(test_images, test_labels) | |
| # Save the model | |
| model.save("model_with_augmentation.keras") | |
| # Upload the model to your Hugging Face space repository | |
| push_to_hub_keras( | |
| model, | |
| repo_id="okeowo1014/imageaugmentationa", | |
| commit_message="Model with data augmentation and class weights", | |
| tags=["image-classifier", "data-augmentation", "class-weights"], | |
| include_optimizer=True, | |
| token=sac | |
| ) | |