Spaces:
Sleeping
Sleeping
File size: 3,768 Bytes
4b13ee5 0b000d9 cb3ce56 4b13ee5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import os
import cv2
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
from huggingface_hub import push_to_hub_keras
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from tensorflow.keras.preprocessing.image import ImageDataGenerator
# Environment variable for Hugging Face token
sac = os.getenv('accesstoken')
class_names = ['buildings', 'forest', 'glacier']
class_names_label = {class_name: i for i, class_name in enumerate(class_names)}
IMAGE_SIZE = (150, 150)
def load_data():
DIRECTORY = "imgdataset"
CATEGORY = ["seg_train", "seg_test"]
output = []
for category in CATEGORY:
path = os.path.join(DIRECTORY, category)
images = []
labels = []
print("Loading {}".format(category))
for folder in os.listdir(path):
label = class_names_label[folder]
for file in os.listdir(os.path.join(path, folder)):
img_path = os.path.join(os.path.join(path, folder), file)
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, IMAGE_SIZE)
images.append(image)
labels.append(label)
images = np.array(images, dtype='float32')
labels = np.array(labels, dtype='int32')
output.append((images, labels))
return output
(train_images, train_labels), (test_images, test_labels) = load_data()
train_images, train_labels = shuffle(train_images, train_labels, random_state=25)
# Split the training set into training and validation sets
train_images, val_images, train_labels, val_labels = train_test_split(
train_images, train_labels, test_size=0.2, random_state=42
)
# Data Augmentation
datagen = ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
# Calculate class weights to handle data imbalance
# class_weights = compute_class_weight('balanced', np.unique(train_labels), train_labels)
class_weights = compute_class_weight(class_weight='balanced',classes=np.unique(train_labels),y= train_labels)
# Model Architecture
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
# Model Compilation
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Model Training with Data Augmentation
batch_size = 32
epochs = 10
history = model.fit(
datagen.flow(train_images, train_labels, batch_size=batch_size),
steps_per_epoch=len(train_images) // batch_size,
epochs=epochs,
validation_data=(val_images, val_labels),
class_weight=dict(enumerate(class_weights))
)
# Model Evaluation
model.evaluate(test_images, test_labels)
# Save the model
model.save("model_with_augmentation.keras")
# Upload the model to your Hugging Face space repository
push_to_hub_keras(
model,
repo_id="okeowo1014/imageaugmentationa",
commit_message="Model with data augmentation and class weights",
tags=["image-classifier", "data-augmentation", "class-weights"],
include_optimizer=True,
token=sac
)
|