d-e-e-k-11 commited on
Commit
294928d
·
verified ·
1 Parent(s): a0d2afd

Upload folder using huggingface_hub

Browse files
Files changed (8) hide show
  1. .dockerignore +13 -0
  2. Dockerfile +2 -2
  3. README.md +47 -10
  4. check_libs.py +25 -0
  5. deploy_to_hf.py +56 -0
  6. implementation_plan.md +39 -0
  7. train_cifar10.py +131 -0
  8. training_plot.png +0 -0
.dockerignore ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ .git
6
+ .gitignore
7
+ .env
8
+ venv
9
+ data/
10
+ training_plot.png
11
+ check_libs.py
12
+ implementation_plan.md
13
+ train_cifar10.py
Dockerfile CHANGED
@@ -16,8 +16,8 @@ COPY requirements.txt .
16
  # Install any needed packages specified in requirements.txt
17
  RUN pip install --no-cache-dir -r requirements.txt
18
 
19
- # Copy the application code and model
20
- COPY web_app/ .
21
 
22
  # Create uploads directory
23
  RUN mkdir -p uploads && chmod 777 uploads
 
16
  # Install any needed packages specified in requirements.txt
17
  RUN pip install --no-cache-dir -r requirements.txt
18
 
19
+ # Copy everything from the current directory (where Dockerfile is)
20
+ COPY . .
21
 
22
  # Create uploads directory
23
  RUN mkdir -p uploads && chmod 777 uploads
README.md CHANGED
@@ -1,10 +1,47 @@
1
- ---
2
- title: CNN2
3
- emoji: 🦀
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CIFAR-10 CNN Classifier
2
+
3
+ This project implements a Convolutional Neural Network (CNN) to classify images from the CIFAR-10 dataset into 10 categories.
4
+
5
+ ## Dataset
6
+ The CIFAR-10 dataset consists of 60,000 32x32 color images in 10 classes, with 6,000 images per class. There are 50,000 training images and 10,000 test images.
7
+
8
+ ## Model Architecture
9
+ The CNN uses a multi-block architecture:
10
+ - 3 Convolutional Blocks:
11
+ - 2x Conv2D layers with ReLU activation
12
+ - Batch Normalization
13
+ - Max Pooling
14
+ - Dropout for regularization
15
+ - Flattened layer
16
+ - Dense hidden layer (128 units)
17
+ - Output layer (10 units with Softmax)
18
+
19
+ ## Setup and Usage
20
+ 1. Install dependencies:
21
+ ```bash
22
+ pip install -r requirements.txt
23
+ ```
24
+ 2. Run the training script:
25
+ ```bash
26
+ python train_cifar10.py
27
+ ```
28
+
29
+ ## Files
30
+ - `train_cifar10.py`: The main training and evaluation script.
31
+ - `web_app/`: Complete web application for interactive inference.
32
+ - `server.py`: Flask backend.
33
+ - `static/`, `templates/`: Frontend assets.
34
+ - `implementation_plan.md`: Detailed plan of the implementation.
35
+ - `requirements.txt`: Python package dependencies.
36
+
37
+ ## Web Application
38
+ To run the interactive vision tool:
39
+ 1. Navigate to the web app directory:
40
+ ```bash
41
+ cd web_app
42
+ ```
43
+ 2. Start the server:
44
+ ```bash
45
+ python server.py
46
+ ```
47
+ 3. Open `http://127.0.0.1:5000` in your browser.
check_libs.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ print(f"Python version: {sys.version}")
3
+ try:
4
+ import tensorflow as tf
5
+ print(f"TensorFlow version: {tf.__version__}")
6
+ except Exception as e:
7
+ print(f"TensorFlow import failed: {e}")
8
+
9
+ try:
10
+ import keras
11
+ print(f"Keras version: {keras.__version__}")
12
+ except Exception as e:
13
+ print(f"Keras import failed: {e}")
14
+
15
+ try:
16
+ from tensorflow import keras as tf_keras
17
+ print("from tensorflow import keras successful")
18
+ except Exception as e:
19
+ print(f"from tensorflow import keras failed: {e}")
20
+
21
+ try:
22
+ import torch
23
+ print(f"PyTorch version: {torch.__version__}")
24
+ except Exception as e:
25
+ print(f"PyTorch import failed: {e}")
deploy_to_hf.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi, login
2
+ import os
3
+ import sys
4
+
5
+ def deploy():
6
+ # Use the token provided by the user in the next step
7
+ # For now, this script will try to use the stored token or prompt
8
+ api = HfApi()
9
+
10
+ try:
11
+ user_info = api.whoami()
12
+ print(f"Logged in as: {user_info['name']}")
13
+ except Exception as e:
14
+ print(f"Authentication failed: {e}")
15
+ print("Please provide a valid WRITE token from https://huggingface.co/settings/tokens")
16
+ token = input("Enter your Hugging Face Write Token: ")
17
+ try:
18
+ login(token=token, add_to_git_credential=True)
19
+ user_info = api.whoami()
20
+ print(f"Successfully logged in as: {user_info['name']}")
21
+ except Exception as login_e:
22
+ print(f"Login failed even with token: {login_e}")
23
+ return
24
+
25
+ repo_id = f"{user_info['name']}/CNN2"
26
+ print(f"Creating/Checking Space: {repo_id}")
27
+
28
+ try:
29
+ api.create_repo(
30
+ repo_id=repo_id,
31
+ repo_type="space",
32
+ space_sdk="docker",
33
+ exist_ok=True
34
+ )
35
+ print(f"Space {repo_id} is ready.")
36
+
37
+ print("Uploading files...")
38
+ # Uploading the files from the current directory
39
+ # We upload Dockerfile, requirements.txt and everything in web_app/
40
+
41
+ # Upload everything in the directory
42
+ api.upload_folder(
43
+ folder_path=".",
44
+ repo_id=repo_id,
45
+ repo_type="space",
46
+ ignore_patterns=[".git*", "data*", "__pycache__*", "venv*", "*.pyc", "web_app*"]
47
+ )
48
+
49
+ print(f"\nDeployment Successful!")
50
+ print(f"View your Space at: https://huggingface.co/spaces/{repo_id}")
51
+
52
+ except Exception as e:
53
+ print(f"Deployment failed: {e}")
54
+
55
+ if __name__ == "__main__":
56
+ deploy()
implementation_plan.md ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Implementation Plan - CIFAR-10 CNN Classifier
2
+
3
+ This plan outlines the steps to build, train, and evaluate a Convolutional Neural Network (CNN) for the CIFAR-10 dataset.
4
+
5
+ ## 1. Environment Setup
6
+ - Verify installation of `torch`, `torchvision`, `matplotlib`.
7
+ - Import necessary modules.
8
+
9
+ ## 2. Data Preparation
10
+ - Load CIFAR-10 dataset using `torchvision.datasets`.
11
+ - Normalize and transform data to Tensors.
12
+ - Explore data shapes and visualize sample images.
13
+
14
+ ## 3. Model Architecture
15
+ - Build a PyTorch `nn.Module` CNN:
16
+ - Input layer: 32x32x3 images.
17
+ - Multiple Convolutional blocks (Conv2d -> BatchNorm2d -> ReLU -> MaxPool2d -> Dropout).
18
+ - Flatten layer.
19
+ - Fully connected layers with BatchNorm and Dropout.
20
+ - Output layer: 10 units.
21
+
22
+ ## 4. Training Configuration
23
+ - Loss Function: `nn.CrossEntropyLoss()`.
24
+ - Optimizer: `optim.Adam`.
25
+ - Device: Use CUDA if available, else CPU.
26
+
27
+ ## 5. Model Training
28
+ - Train the model on the training set.
29
+ - Validate on the test/validation set.
30
+ - Save the training history.
31
+
32
+ ## 6. Evaluation and Visualization
33
+ - Evaluate the model on the test set.
34
+ - Plot Training vs. Validation Accuracy/Loss.
35
+ - Display a confusion matrix or classification report.
36
+ - Save the final model.
37
+
38
+ ## 7. Inference Script (Optional)
39
+ - Create a script to load the model and predict labels for new images.
train_cifar10.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tensorflow as tf
3
+ from tensorflow.keras import layers, models, callbacks
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+
7
+ # Set random seed for reproducibility
8
+ tf.random.set_seed(42)
9
+ np.random.seed(42)
10
+
11
+ def load_and_preprocess_data():
12
+ """Loads CIFAR-10 dataset and normalizes pixel values."""
13
+ print("Loading CIFAR-10 dataset...")
14
+ (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
15
+
16
+ # Normalize pixel values to be between 0 and 1
17
+ train_images, test_images = train_images / 255.0, test_images / 255.0
18
+
19
+ print(f"Train images shape: {train_images.shape}")
20
+ print(f"Test images shape: {test_images.shape}")
21
+
22
+ return (train_images, train_labels), (test_images, test_labels)
23
+
24
+ def build_cnn_model():
25
+ """Defines a robust CNN architecture for CIFAR-10."""
26
+ print("Building CNN architecture...")
27
+ model = models.Sequential([
28
+ # Block 1
29
+ layers.Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(32, 32, 3)),
30
+ layers.BatchNormalization(),
31
+ layers.Conv2D(32, (3, 3), padding='same', activation='relu'),
32
+ layers.BatchNormalization(),
33
+ layers.MaxPooling2D((2, 2)),
34
+ layers.Dropout(0.2),
35
+
36
+ # Block 2
37
+ layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
38
+ layers.BatchNormalization(),
39
+ layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
40
+ layers.BatchNormalization(),
41
+ layers.MaxPooling2D((2, 2)),
42
+ layers.Dropout(0.3),
43
+
44
+ # Block 3
45
+ layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
46
+ layers.BatchNormalization(),
47
+ layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
48
+ layers.BatchNormalization(),
49
+ layers.MaxPooling2D((2, 2)),
50
+ layers.Dropout(0.4),
51
+
52
+ # Classification Head
53
+ layers.Flatten(),
54
+ layers.Dense(128, activation='relu'),
55
+ layers.BatchNormalization(),
56
+ layers.Dropout(0.5),
57
+ layers.Dense(10, activation='softmax')
58
+ ])
59
+
60
+ model.compile(optimizer='adam',
61
+ loss='sparse_categorical_crossentropy',
62
+ metrics=['accuracy'])
63
+
64
+ return model
65
+
66
+ def train_and_evaluate():
67
+ # 1. Prepare Data
68
+ (train_images, train_labels), (test_images, test_labels) = load_and_preprocess_data()
69
+
70
+ # 2. Build Model
71
+ model = build_cnn_model()
72
+ model.summary()
73
+
74
+ # 3. Callbacks
75
+ early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
76
+ reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=1e-6)
77
+
78
+ # 4. Train
79
+ print("\nStarting training (limited to 2 epochs for demonstration)...")
80
+ history = model.fit(
81
+ train_images, train_labels,
82
+ epochs=2, # Increase to 50 for full training
83
+ batch_size=64,
84
+ validation_data=(test_images, test_labels),
85
+ callbacks=[early_stop, reduce_lr]
86
+ )
87
+
88
+ # 5. Evaluate
89
+ print("\nEvaluating model...")
90
+ test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
91
+ print(f"\nFinal Test Accuracy: {test_acc*100:.2f}%")
92
+
93
+ # 6. Save Model
94
+ model.save('cifar10_cnn_v1.h5')
95
+ print("Model saved to cifar10_cnn_v1.h5")
96
+
97
+ return history
98
+
99
+ def plot_results(history):
100
+ """Visualizes training history."""
101
+ acc = history.history['accuracy']
102
+ val_acc = history.history['val_accuracy']
103
+ loss = history.history['loss']
104
+ val_loss = history.history['val_loss']
105
+ epochs_range = range(len(acc))
106
+
107
+ plt.figure(figsize=(12, 5))
108
+
109
+ plt.subplot(1, 2, 1)
110
+ plt.plot(epochs_range, acc, label='Training Accuracy')
111
+ plt.plot(epochs_range, val_acc, label='Validation Accuracy')
112
+ plt.title('Accuracy')
113
+ plt.legend()
114
+
115
+ plt.subplot(1, 2, 2)
116
+ plt.plot(epochs_range, loss, label='Training Loss')
117
+ plt.plot(epochs_range, val_loss, label='Validation Loss')
118
+ plt.title('Loss')
119
+ plt.legend()
120
+
121
+ plt.savefig('training_plot.png')
122
+ print("Training plots saved as training_plot.png")
123
+
124
+ if __name__ == "__main__":
125
+ try:
126
+ hist = train_and_evaluate()
127
+ plot_results(hist)
128
+ except Exception as e:
129
+ print(f"\n[ERROR] An error occurred: {e}")
130
+ print("\nNote: If you encounter DLL errors or ModuleNotFound errors, please ensure "
131
+ "TensorFlow is correctly installed in your environment (e.g., pip install tensorflow).")
training_plot.png ADDED