mFaizann137 commited on
Commit
d931164
·
verified ·
1 Parent(s): d81ae0a

Upload 4 files

Browse files
house_price_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8acc22df8f21e40119fee45e08cb8575ee66d063c8768ad2cea77197a74868e
3
+ size 76800
house_price_prediction.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Assignment 1: House Price Prediction using Deep Learning
2
+ # This program predicts house prices based on various features like size, bedrooms, location, etc.
3
+
4
+ # Step 1: Import all necessary libraries
5
+ import numpy as np # For numerical calculations
6
+ import pandas as pd # For handling data in table format
7
+ import matplotlib.pyplot as plt # For creating graphs
8
+ import seaborn as sns # For better looking graphs
9
+ from sklearn.model_selection import train_test_split # To split data for training and testing
10
+ from sklearn.preprocessing import StandardScaler # To normalize our data
11
+ from sklearn.datasets import fetch_california_housing # Built-in housing dataset
12
+ from tensorflow import keras # Deep learning library
13
+ from tensorflow.keras import layers # To build neural network layers
14
+ import warnings
15
+ warnings.filterwarnings('ignore') # Hide unnecessary warnings
16
+
17
+ # Step 2: Load the California Housing Dataset
18
+ # This is a famous dataset that's built into sklearn - no need to download separately!
19
+ print("Loading the California Housing Dataset...")
20
+ housing = fetch_california_housing() # Get the dataset
21
+ X = housing.data # Features (things we know about houses)
22
+ y = housing.target # Target (actual prices we want to predict)
23
+
24
+ # Convert to pandas DataFrame for easier handling
25
+ feature_names = housing.feature_names
26
+ df = pd.DataFrame(X, columns=feature_names)
27
+ df['Price'] = y
28
+
29
+ # Step 3: Explore the data
30
+ print("\n=== Dataset Information ===")
31
+ print(f"Total number of houses: {len(df)}")
32
+ print(f"Number of features: {len(feature_names)}")
33
+ print("\nFeatures in our dataset:")
34
+ for i, feature in enumerate(feature_names, 1):
35
+ print(f"{i}. {feature}")
36
+
37
+ print("\n=== First 5 houses in our dataset ===")
38
+ print(df.head())
39
+
40
+ print("\n=== Basic Statistics ===")
41
+ print(df.describe())
42
+
43
+ # Step 4: Create visualizations to understand our data
44
+ print("\n Creating visualizations...")
45
+
46
+ # Create a figure with multiple plots
47
+ fig, axes = plt.subplots(2, 2, figsize=(12, 10))
48
+
49
+ # Plot 1: Distribution of house prices
50
+ axes[0, 0].hist(df['Price'], bins=50, color='blue', alpha=0.7)
51
+ axes[0, 0].set_title('Distribution of House Prices')
52
+ axes[0, 0].set_xlabel('Price (in hundreds of thousands)')
53
+ axes[0, 0].set_ylabel('Number of Houses')
54
+
55
+ # Plot 2: Relationship between house age and price
56
+ axes[0, 1].scatter(df['HouseAge'], df['Price'], alpha=0.5, color='green')
57
+ axes[0, 1].set_title('House Age vs Price')
58
+ axes[0, 1].set_xlabel('House Age (years)')
59
+ axes[0, 1].set_ylabel('Price')
60
+
61
+ # Plot 3: Relationship between rooms and price
62
+ axes[1, 0].scatter(df['AveRooms'], df['Price'], alpha=0.5, color='red')
63
+ axes[1, 0].set_title('Average Rooms vs Price')
64
+ axes[1, 0].set_xlabel('Average Rooms')
65
+ axes[1, 0].set_ylabel('Price')
66
+
67
+ # Plot 4: Relationship between income and price
68
+ axes[1, 1].scatter(df['MedInc'], df['Price'], alpha=0.5, color='purple')
69
+ axes[1, 1].set_title('Median Income vs Price')
70
+ axes[1, 1].set_xlabel('Median Income')
71
+ axes[1, 1].set_ylabel('Price')
72
+
73
+ plt.tight_layout()
74
+ plt.savefig('house_data_exploration.png')
75
+ plt.show()
76
+
77
+ # Step 5: Prepare data for the deep learning model
78
+ print("\n=== Preparing Data for Model ===")
79
+
80
+ # Split features (X) and target (y)
81
+ X = df.drop('Price', axis=1).values # Everything except price
82
+ y = df['Price'].values # Just the prices
83
+
84
+ # Split into training set (80%) and testing set (20%)
85
+ # Training set: Used to teach the model
86
+ # Testing set: Used to check how well the model learned
87
+ X_train, X_test, y_train, y_test = train_test_split(
88
+ X, y, test_size=0.2, random_state=42
89
+ )
90
+
91
+ print(f"Training set size: {len(X_train)} houses")
92
+ print(f"Testing set size: {len(X_test)} houses")
93
+
94
+ # Step 6: Normalize the data
95
+ # This makes all features have similar scales (important for neural networks)
96
+ scaler = StandardScaler()
97
+ X_train_scaled = scaler.fit_transform(X_train) # Learn the scaling from training data
98
+ X_test_scaled = scaler.transform(X_test) # Apply same scaling to test data
99
+
100
+ # Step 7: Build the Deep Learning Model
101
+ print("\n=== Building Deep Learning Model ===")
102
+
103
+ # Create a Sequential model (layers stacked one after another)
104
+ model = keras.Sequential([
105
+ # Input layer - takes our 8 features
106
+ layers.Dense(64, activation='relu', input_shape=[8]),
107
+ # Hidden layer 1 - 64 neurons (brain cells)
108
+ layers.Dropout(0.2), # Prevents overfitting by randomly turning off neurons
109
+
110
+ # Hidden layer 2 - 32 neurons
111
+ layers.Dense(32, activation='relu'),
112
+ layers.Dropout(0.2),
113
+
114
+ # Hidden layer 3 - 16 neurons
115
+ layers.Dense(16, activation='relu'),
116
+
117
+ # Output layer - 1 neuron for the predicted price
118
+ layers.Dense(1)
119
+ ])
120
+
121
+ # Step 8: Compile the model
122
+ # This tells the model how to learn
123
+ model.compile(
124
+ optimizer='adam', # Algorithm for learning (Adam is very popular)
125
+ loss='mse', # Mean Squared Error - measures how wrong our predictions are
126
+ metrics=['mae'] # Mean Absolute Error - average difference from actual price
127
+ )
128
+
129
+ # Print model architecture
130
+ print("\nModel Architecture:")
131
+ model.summary()
132
+
133
+ # Step 9: Train the model
134
+ print("\n=== Training the Model ===")
135
+ print("This may take 1-2 minutes...")
136
+
137
+ # Train the model
138
+ history = model.fit(
139
+ X_train_scaled, # Training features
140
+ y_train, # Training prices
141
+ epochs=100, # Number of times to go through all data
142
+ batch_size=32, # Number of samples to process at once
143
+ validation_split=0.2, # Use 20% of training data for validation
144
+ verbose=0 # Don't print progress for each epoch
145
+ )
146
+
147
+ print("Training completed!")
148
+
149
+ # Step 10: Visualize the training process
150
+ print("\n Creating training visualizations...")
151
+
152
+ fig, axes = plt.subplots(1, 2, figsize=(12, 4))
153
+
154
+ # Plot training & validation loss
155
+ axes[0].plot(history.history['loss'], label='Training Loss')
156
+ axes[0].plot(history.history['val_loss'], label='Validation Loss')
157
+ axes[0].set_title('Model Loss During Training')
158
+ axes[0].set_xlabel('Epoch')
159
+ axes[0].set_ylabel('Loss (MSE)')
160
+ axes[0].legend()
161
+
162
+ # Plot training & validation MAE
163
+ axes[1].plot(history.history['mae'], label='Training MAE')
164
+ axes[1].plot(history.history['val_mae'], label='Validation MAE')
165
+ axes[1].set_title('Model MAE During Training')
166
+ axes[1].set_xlabel('Epoch')
167
+ axes[1].set_ylabel('Mean Absolute Error')
168
+ axes[1].legend()
169
+
170
+ plt.tight_layout()
171
+ plt.savefig('training_history.png')
172
+ plt.show()
173
+
174
+ # Step 11: Evaluate the model on test data
175
+ print("\n=== Model Evaluation ===")
176
+ test_loss, test_mae = model.evaluate(X_test_scaled, y_test, verbose=0)
177
+ print(f"Test Loss (MSE): {test_loss:.4f}")
178
+ print(f"Test MAE: {test_mae:.4f}")
179
+ print(f"This means our predictions are off by ${test_mae*100000:.2f} on average")
180
+
181
+ # Step 12: Make predictions and visualize results
182
+ print("\n=== Making Predictions ===")
183
+ predictions = model.predict(X_test_scaled, verbose=0)
184
+
185
+ # Plot actual vs predicted prices
186
+ plt.figure(figsize=(10, 6))
187
+ plt.scatter(y_test, predictions, alpha=0.5)
188
+ plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'r--', lw=2)
189
+ plt.xlabel('Actual Price')
190
+ plt.ylabel('Predicted Price')
191
+ plt.title('Actual vs Predicted House Prices')
192
+ plt.savefig('predictions.png')
193
+ plt.show()
194
+
195
+ # Step 13: Show some example predictions
196
+ print("\n=== Sample Predictions ===")
197
+ print("Comparing actual prices with our model's predictions:\n")
198
+ for i in range(5):
199
+ actual = y_test[i] * 100000
200
+ predicted = predictions[i][0] * 100000
201
+ difference = abs(actual - predicted)
202
+ print(f"House {i+1}:")
203
+ print(f" Actual Price: ${actual:,.2f}")
204
+ print(f" Predicted Price: ${predicted:,.2f}")
205
+ print(f" Difference: ${difference:,.2f}")
206
+ print()
207
+
208
+ # Step 14: Save the model
209
+ print("=== Saving the Model ===")
210
+ model.save('house_price_model.h5')
211
+ print("Model saved as 'house_price_model.h5'")
212
+
213
+ # Step 15: Calculate accuracy metrics
214
+ from sklearn.metrics import r2_score
215
+ r2 = r2_score(y_test, predictions)
216
+ print(f"\n=== Final Model Performance ===")
217
+ print(f"R² Score: {r2:.4f}")
218
+ print(f"This means our model explains {r2*100:.2f}% of the price variations")
219
+
220
+ print("\n✅ Assignment 1 Complete! Your deep learning model is ready!")
221
+ print("Files created: house_price_model.h5, house_data_exploration.png, training_history.png, predictions.png")
image_classification.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Assignment 2: Image Classification using Deep Learning (Computer Vision)
2
+ # This program classifies images into different categories using a Convolutional Neural Network (CNN)
3
+
4
+ # Step 1: Import all necessary libraries
5
+ import numpy as np # For numerical calculations
6
+ import matplotlib.pyplot as plt # For showing images and graphs
7
+ import tensorflow as tf # Main deep learning library
8
+ from tensorflow import keras # High-level API for building models
9
+ from tensorflow.keras import layers # For creating neural network layers
10
+ from tensorflow.keras.datasets import cifar10 # Built-in image dataset
11
+ from tensorflow.keras.utils import to_categorical # For preparing labels
12
+ import warnings
13
+ warnings.filterwarnings('ignore') # Hide unnecessary warnings
14
+
15
+ # Step 2: Load the CIFAR-10 Dataset
16
+ # CIFAR-10 is a famous dataset with 60,000 small color images in 10 classes
17
+ print("=== Loading CIFAR-10 Dataset ===")
18
+ print("This dataset contains 60,000 32x32 color images in 10 categories")
19
+ print("Downloading dataset (this may take a minute on first run)...\n")
20
+
21
+ # Load the data - it's automatically split into training and testing sets
22
+ (X_train, y_train), (X_test, y_test) = cifar10.load_data()
23
+
24
+ # Define the 10 classes in CIFAR-10
25
+ class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
26
+ 'dog', 'frog', 'horse', 'ship', 'truck']
27
+
28
+ # Step 3: Explore the dataset
29
+ print("=== Dataset Information ===")
30
+ print(f"Training images: {X_train.shape[0]}")
31
+ print(f"Testing images: {X_test.shape[0]}")
32
+ print(f"Image shape: {X_train.shape[1:]} (32x32 pixels, 3 color channels for RGB)")
33
+ print(f"Number of classes: {len(class_names)}")
34
+ print(f"Classes: {', '.join(class_names)}\n")
35
+
36
+ # Step 4: Visualize sample images from the dataset
37
+ print("=== Visualizing Sample Images ===")
38
+ fig, axes = plt.subplots(3, 5, figsize=(12, 8))
39
+ fig.suptitle('Sample Images from CIFAR-10 Dataset', fontsize=16)
40
+
41
+ for i in range(15):
42
+ # Select random image
43
+ idx = np.random.randint(0, len(X_train))
44
+ image = X_train[idx]
45
+ label = class_names[y_train[idx][0]]
46
+
47
+ # Plot the image
48
+ ax = axes[i // 5, i % 5]
49
+ ax.imshow(image)
50
+ ax.set_title(f'Class: {label}')
51
+ ax.axis('off')
52
+
53
+ plt.tight_layout()
54
+ plt.savefig('sample_images.png')
55
+ plt.show()
56
+
57
+ # Step 5: Preprocess the data
58
+ print("\n=== Preprocessing Data ===")
59
+
60
+ # Normalize pixel values to be between 0 and 1 (instead of 0-255)
61
+ # This helps the neural network learn better
62
+ X_train = X_train.astype('float32') / 255.0
63
+ X_test = X_test.astype('float32') / 255.0
64
+ print("✓ Normalized pixel values to range [0, 1]")
65
+
66
+ # Convert labels to categorical (one-hot encoding)
67
+ # Example: label 3 becomes [0,0,0,1,0,0,0,0,0,0]
68
+ y_train_categorical = to_categorical(y_train, 10)
69
+ y_test_categorical = to_categorical(y_test, 10)
70
+ print("✓ Converted labels to categorical format")
71
+
72
+ # Step 6: Build the Convolutional Neural Network (CNN)
73
+ print("\n=== Building CNN Model ===")
74
+ print("Creating a Convolutional Neural Network for image classification...")
75
+
76
+ # Create the model
77
+ model = keras.Sequential([
78
+ # First Convolutional Block
79
+ # Conv2D layer: Detects features like edges and shapes
80
+ layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
81
+ layers.BatchNormalization(), # Normalizes the outputs to improve training
82
+ layers.Conv2D(32, (3, 3), activation='relu'),
83
+ layers.BatchNormalization(),
84
+ layers.MaxPooling2D((2, 2)), # Reduces image size while keeping important features
85
+ layers.Dropout(0.25), # Prevents overfitting
86
+
87
+ # Second Convolutional Block
88
+ # These layers detect more complex features
89
+ layers.Conv2D(64, (3, 3), activation='relu'),
90
+ layers.BatchNormalization(),
91
+ layers.Conv2D(64, (3, 3), activation='relu'),
92
+ layers.BatchNormalization(),
93
+ layers.MaxPooling2D((2, 2)),
94
+ layers.Dropout(0.25),
95
+
96
+ # Third Convolutional Block
97
+ # These layers detect even more complex patterns
98
+ layers.Conv2D(128, (3, 3), activation='relu'),
99
+ layers.BatchNormalization(),
100
+ layers.MaxPooling2D((2, 2)),
101
+ layers.Dropout(0.25),
102
+
103
+ # Flatten and Dense Layers
104
+ layers.Flatten(), # Convert 2D features to 1D for classification
105
+ layers.Dense(128, activation='relu'), # Fully connected layer
106
+ layers.BatchNormalization(),
107
+ layers.Dropout(0.5),
108
+ layers.Dense(10, activation='softmax') # Output layer with 10 classes
109
+ ])
110
+
111
+ # Step 7: Compile the model
112
+ print("\nCompiling the model...")
113
+ model.compile(
114
+ optimizer='adam', # Optimization algorithm
115
+ loss='categorical_crossentropy', # Loss function for multi-class classification
116
+ metrics=['accuracy'] # Track accuracy during training
117
+ )
118
+
119
+ # Display model architecture
120
+ print("\nModel Architecture:")
121
+ model.summary()
122
+ print(f"\nTotal parameters: {model.count_params():,}")
123
+
124
+ # Step 8: Set up data augmentation (optional but improves accuracy)
125
+ print("\n=== Setting up Data Augmentation ===")
126
+ print("Data augmentation creates variations of images to improve model generalization")
127
+
128
+ # Create data augmentation layer
129
+ data_augmentation = keras.Sequential([
130
+ layers.RandomFlip("horizontal"), # Randomly flip images horizontally
131
+ layers.RandomRotation(0.1), # Randomly rotate images
132
+ layers.RandomZoom(0.1), # Randomly zoom images
133
+ ])
134
+
135
+ # Step 9: Train the model
136
+ print("\n=== Training the Model ===")
137
+ print("This will take 3-5 minutes depending on your computer...")
138
+ print("The model will learn to recognize patterns in the images\n")
139
+
140
+ # Use callbacks for better training
141
+ early_stopping = keras.callbacks.EarlyStopping(
142
+ monitor='val_loss',
143
+ patience=10,
144
+ restore_best_weights=True
145
+ )
146
+
147
+ reduce_lr = keras.callbacks.ReduceLROnPlateau(
148
+ monitor='val_loss',
149
+ factor=0.5,
150
+ patience=5,
151
+ min_lr=0.00001
152
+ )
153
+
154
+ # Train the model
155
+ history = model.fit(
156
+ X_train, y_train_categorical,
157
+ batch_size=64, # Number of images to process at once
158
+ epochs=30, # Number of times to go through the entire dataset
159
+ validation_data=(X_test, y_test_categorical), # Test data for validation
160
+ callbacks=[early_stopping, reduce_lr], # Training helpers
161
+ verbose=1 # Show progress bar
162
+ )
163
+
164
+ print("\n✓ Training completed!")
165
+
166
+ # Step 10: Visualize training history
167
+ print("\n=== Visualizing Training History ===")
168
+
169
+ fig, axes = plt.subplots(1, 2, figsize=(12, 4))
170
+
171
+ # Plot accuracy
172
+ axes[0].plot(history.history['accuracy'], label='Training Accuracy')
173
+ axes[0].plot(history.history['val_accuracy'], label='Validation Accuracy')
174
+ axes[0].set_title('Model Accuracy')
175
+ axes[0].set_xlabel('Epoch')
176
+ axes[0].set_ylabel('Accuracy')
177
+ axes[0].legend()
178
+ axes[0].grid(True)
179
+
180
+ # Plot loss
181
+ axes[1].plot(history.history['loss'], label='Training Loss')
182
+ axes[1].plot(history.history['val_loss'], label='Validation Loss')
183
+ axes[1].set_title('Model Loss')
184
+ axes[1].set_xlabel('Epoch')
185
+ axes[1].set_ylabel('Loss')
186
+ axes[1].legend()
187
+ axes[1].grid(True)
188
+
189
+ plt.tight_layout()
190
+ plt.savefig('training_history_cv.png')
191
+ plt.show()
192
+
193
+ # Step 11: Evaluate the model
194
+ print("\n=== Model Evaluation ===")
195
+ test_loss, test_accuracy = model.evaluate(X_test, y_test_categorical, verbose=0)
196
+ print(f"Test Accuracy: {test_accuracy*100:.2f}%")
197
+ print(f"Test Loss: {test_loss:.4f}")
198
+
199
+ # Step 12: Make predictions and show results
200
+ print("\n=== Making Predictions on Test Images ===")
201
+
202
+ # Get predictions for test set
203
+ predictions = model.predict(X_test[:20], verbose=0)
204
+
205
+ # Visualize predictions
206
+ fig, axes = plt.subplots(4, 5, figsize=(15, 12))
207
+ fig.suptitle('Model Predictions on Test Images', fontsize=16)
208
+
209
+ for i in range(20):
210
+ # Get image and predictions
211
+ image = X_test[i]
212
+ true_label = class_names[y_test[i][0]]
213
+ predicted_label = class_names[np.argmax(predictions[i])]
214
+ confidence = np.max(predictions[i]) * 100
215
+
216
+ # Plot image
217
+ ax = axes[i // 5, i % 5]
218
+ ax.imshow(image)
219
+
220
+ # Color code: green for correct, red for incorrect
221
+ color = 'green' if true_label == predicted_label else 'red'
222
+ ax.set_title(f'True: {true_label}\nPred: {predicted_label}\nConf: {confidence:.1f}%',
223
+ color=color, fontsize=10)
224
+ ax.axis('off')
225
+
226
+ plt.tight_layout()
227
+ plt.savefig('predictions_cv.png')
228
+ plt.show()
229
+
230
+ # Step 13: Create confusion matrix
231
+ print("\n=== Creating Confusion Matrix ===")
232
+ from sklearn.metrics import confusion_matrix, classification_report
233
+ import seaborn as sns
234
+
235
+ # Get predictions for entire test set
236
+ y_pred = model.predict(X_test, verbose=0)
237
+ y_pred_classes = np.argmax(y_pred, axis=1)
238
+ y_true_classes = y_test.reshape(-1)
239
+
240
+ # Create confusion matrix
241
+ cm = confusion_matrix(y_true_classes, y_pred_classes)
242
+
243
+ # Plot confusion matrix
244
+ plt.figure(figsize=(10, 8))
245
+ sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
246
+ xticklabels=class_names, yticklabels=class_names)
247
+ plt.title('Confusion Matrix')
248
+ plt.xlabel('Predicted Label')
249
+ plt.ylabel('True Label')
250
+ plt.savefig('confusion_matrix.png')
251
+ plt.show()
252
+
253
+ # Step 14: Print classification report
254
+ print("\n=== Classification Report ===")
255
+ print(classification_report(y_true_classes, y_pred_classes, target_names=class_names))
256
+
257
+ # Step 15: Save the model
258
+ print("\n=== Saving the Model ===")
259
+ model.save('image_classifier_model.h5')
260
+ print("Model saved as 'image_classifier_model.h5'")
261
+
262
+ # Step 16: Test with a single image
263
+ print("\n=== Testing with a Single Image ===")
264
+
265
+ # Pick a random test image
266
+ test_idx = np.random.randint(0, len(X_test))
267
+ test_image = X_test[test_idx]
268
+ test_label = class_names[y_test[test_idx][0]]
269
+
270
+ # Make prediction
271
+ single_prediction = model.predict(test_image.reshape(1, 32, 32, 3), verbose=0)
272
+ predicted_class = class_names[np.argmax(single_prediction)]
273
+ confidence = np.max(single_prediction) * 100
274
+
275
+ # Display the image and prediction
276
+ plt.figure(figsize=(6, 6))
277
+ plt.imshow(test_image)
278
+ plt.title(f'Actual: {test_label}\nPredicted: {predicted_class}\nConfidence: {confidence:.2f}%')
279
+ plt.axis('off')
280
+ plt.savefig('single_prediction.png')
281
+ plt.show()
282
+
283
+ print(f"Actual class: {test_label}")
284
+ print(f"Predicted class: {predicted_class}")
285
+ print(f"Confidence: {confidence:.2f}%")
286
+
287
+ print("\n✅ Assignment 2 Complete! Your computer vision model is ready!")
288
+ print("Files created: image_classifier_model.h5, sample_images.png, training_history_cv.png,")
289
+ print(" predictions_cv.png, confusion_matrix.png, single_prediction.png")
290
+ print("\nYour model can now classify images into 10 different categories!")
image_classifier_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37bd8b9196513b614e1b5d632b9c880b4b81d411d76a426e452553313b02f193
3
+ size 2017456