Shanmuk4622 commited on
Commit
56f86b2
·
verified ·
1 Parent(s): 9eadada

Upload test1/Algo_CIFAR_10_convneXt.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. test1/Algo_CIFAR_10_convneXt.py +266 -0
test1/Algo_CIFAR_10_convneXt.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.optim as optim
4
+ from torch.utils.data import Dataset, DataLoader
5
+ import torchvision.transforms as transforms
6
+ import torchvision.models as models
7
+ from torchvision.models import ConvNeXt_Tiny_Weights
8
+ from codecarbon import EmissionsTracker
9
+ from carbontracker.tracker import CarbonTracker
10
+ from fvcore.nn import FlopCountAnalysis
11
+ from sklearn.metrics import precision_recall_fscore_support, accuracy_score
12
+ from tqdm import tqdm
13
+ import pandas as pd
14
+ import numpy as np
15
+ import pickle
16
+ import os
17
+ import time
18
+ import logging
19
+ import warnings
20
+ import gc
21
+
22
+ # --- Environment Optimization ---
23
+ warnings.filterwarnings("ignore", category=UserWarning)
24
+ logging.getLogger("codecarbon").setLevel(logging.ERROR)
25
+
26
+ # --- Configurations ---
27
+ DATA_DIR = r"C:\Users\shanm\Dataset Download\cifar-10-batches-py"
28
+ LOG_FILE = "eden_unfrozen_cifar10_convnext.csv"
29
+ MODEL_SAVE_PATH = "eden_unfrozen_convnext_cifar10.pth"
30
+
31
+ BATCH_SIZE = 32
32
+ ACCUMULATION_STEPS = 4
33
+ LEARNING_RATE = 1e-3
34
+ NUM_EPOCHS = 20
35
+ UNFREEZE_EPOCH = 5
36
+ L1_LAMBDA = 1e-5
37
+
38
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
39
+
40
+ # --- Dataset Loader (RAM Cached for 64GB System) ---
41
+ class CIFAR10Binary(Dataset):
42
+ def __init__(self, root, train=True, transform=None):
43
+ self.data = []
44
+ self.labels = []
45
+ self.transform = transform
46
+
47
+ if train:
48
+ for i in range(1, 6):
49
+ file_path = os.path.join(root, f'data_batch_{i}')
50
+ with open(file_path, 'rb') as f:
51
+ entry = pickle.load(f, encoding='latin1')
52
+ self.data.append(entry['data'])
53
+ self.labels.extend(entry['labels'])
54
+ self.data = np.vstack(self.data).reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
55
+ else:
56
+ file_path = os.path.join(root, 'test_batch')
57
+ with open(file_path, 'rb') as f:
58
+ entry = pickle.load(f, encoding='latin1')
59
+ self.data = entry['data'].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
60
+ self.labels = entry['labels']
61
+
62
+ def __len__(self): return len(self.data)
63
+
64
+ def __getitem__(self, idx):
65
+ img, target = self.data[idx], self.labels[idx]
66
+ if self.transform:
67
+ img = self.transform(img)
68
+ return img, target
69
+
70
+ # --- Main Profiling Engine ---
71
+ def run_experiment():
72
+ torch.cuda.empty_cache()
73
+ gc.collect()
74
+
75
+ # --- 1. Pure PyTorch Transfer Learning Setup ---
76
+ print("\n[EDEN Phase 1] Loading Native PyTorch ConvNeXt-Tiny...")
77
+
78
+ weights = ConvNeXt_Tiny_Weights.DEFAULT
79
+ model = models.convnext_tiny(weights=weights)
80
+
81
+ # Freeze the ConvNeXt backbone initially
82
+ for param in model.features.parameters():
83
+ param.requires_grad = False
84
+
85
+ # Isolate and unfreeze the classification head natively
86
+ # In ConvNeXt, the classifier is a Sequential block; the Linear layer is at index 2
87
+ in_features = model.classifier[2].in_features
88
+ model.classifier[2] = nn.Linear(in_features, 10)
89
+
90
+ for param in model.classifier.parameters():
91
+ param.requires_grad = True
92
+
93
+ model = model.to(DEVICE)
94
+ optimizer = optim.Adam(model.classifier.parameters(), lr=LEARNING_RATE)
95
+
96
+ # Complexity Metrics (Static)
97
+ dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)
98
+ with warnings.catch_warnings():
99
+ warnings.simplefilter("ignore")
100
+ total_flops = FlopCountAnalysis(model, dummy_input).total()
101
+ total_params = sum(p.numel() for p in model.parameters())
102
+
103
+ transform = transforms.Compose([
104
+ transforms.ToPILImage(),
105
+ transforms.Resize(224),
106
+ transforms.ToTensor(),
107
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
108
+ ])
109
+
110
+ train_set = CIFAR10Binary(DATA_DIR, train=True, transform=transform)
111
+ loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True)
112
+
113
+ criterion = nn.CrossEntropyLoss()
114
+ scaler = torch.cuda.amp.GradScaler()
115
+
116
+ # --- 2. Trackers Initialization ---
117
+ cc_tracker = EmissionsTracker(measure_power_secs=1, save_to_file=False)
118
+ ct_tracker = CarbonTracker(epochs=NUM_EPOCHS, monitor_epochs=NUM_EPOCHS, update_interval=1)
119
+
120
+ cc_tracker.start()
121
+ all_logs = []
122
+ total_iterations_counter = 0
123
+ session_start_time = time.time()
124
+
125
+ prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = 0.0, 0.0, 0.0
126
+ prev_acc = 0.0
127
+
128
+ print(f"\nEDEN PROFILING STARTED | DEVICE: {torch.cuda.get_device_name(0)}")
129
+ print(f"Dataset: CIFAR-10 | Architecture: ConvNeXt-Tiny")
130
+ print(f"Params: {total_params:,} | FLOPs: {total_flops:.2e}\n")
131
+
132
+ for epoch in range(NUM_EPOCHS):
133
+ # --- Stage 2: Progressive Unfreezing ---
134
+ if epoch + 1 == UNFREEZE_EPOCH:
135
+ print("\n[STAGE 2 INITIATED] Unfreezing ConvNeXt Backbone for Fine-Tuning...")
136
+ for param in model.parameters():
137
+ param.requires_grad = True
138
+ optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE * 0.1)
139
+
140
+ ct_tracker.epoch_start()
141
+ torch.cuda.reset_peak_memory_stats()
142
+ epoch_start_time = time.time()
143
+ model.train()
144
+
145
+ running_loss = 0.0
146
+ all_preds, all_labels = [], []
147
+ epoch_grad_norms = []
148
+
149
+ optimizer.zero_grad()
150
+ pbar = tqdm(loader, desc=f"Epoch {epoch+1}/{NUM_EPOCHS}", unit="batch", leave=False)
151
+
152
+ for i, (images, labels) in enumerate(pbar):
153
+ images, labels = images.to(DEVICE), labels.to(DEVICE)
154
+
155
+ with torch.cuda.amp.autocast():
156
+ outputs = model(images)
157
+ loss = criterion(outputs, labels)
158
+
159
+ # Active Sparse Training (L1 Penalty)
160
+ trainable_params = [p for p in model.parameters() if p.requires_grad]
161
+ l1_penalty = sum(p.abs().sum() for p in trainable_params)
162
+
163
+ total_loss = loss + (L1_LAMBDA * l1_penalty)
164
+ scaled_loss = total_loss / ACCUMULATION_STEPS
165
+
166
+ scaler.scale(scaled_loss).backward()
167
+
168
+ grad_norm = 0.0
169
+ for p in model.parameters():
170
+ if p.requires_grad and p.grad is not None:
171
+ grad_norm += p.grad.data.norm(2).item() ** 2
172
+ epoch_grad_norms.append(grad_norm ** 0.5)
173
+
174
+ if (i + 1) % ACCUMULATION_STEPS == 0:
175
+ scaler.step(optimizer)
176
+ scaler.update()
177
+ optimizer.zero_grad()
178
+
179
+ running_loss += loss.item() * ACCUMULATION_STEPS
180
+
181
+ _, preds = torch.max(outputs, 1)
182
+ all_preds.extend(preds.cpu().numpy())
183
+ all_labels.extend(labels.cpu().numpy())
184
+ total_iterations_counter += 1
185
+
186
+ pbar.set_postfix(loss=f"{(loss.item()*ACCUMULATION_STEPS):.4f}")
187
+
188
+ # --- A. Evaluation ---
189
+ ct_tracker.epoch_end()
190
+ epoch_end_time = time.time()
191
+ epoch_duration = epoch_end_time - epoch_start_time
192
+ avg_it_per_sec = len(loader) / epoch_duration
193
+
194
+ acc = accuracy_score(all_labels, all_preds)
195
+ p, r, f1, _ = precision_recall_fscore_support(all_labels, all_preds, average='macro', zero_division=0)
196
+
197
+ # Rigorous Inference Latency (With Warm-up)
198
+ model.eval()
199
+ with torch.no_grad():
200
+ sample_img = torch.randn(1, 3, 224, 224).to(DEVICE)
201
+ _ = model(sample_img)
202
+ torch.cuda.synchronize()
203
+
204
+ starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
205
+ starter.record()
206
+ _ = model(sample_img)
207
+ ender.record()
208
+ torch.cuda.synchronize()
209
+ lat_ms = starter.elapsed_time(ender)
210
+
211
+ # --- B. Energy & Power Calculations ---
212
+ emissions_data = cc_tracker._prepare_emissions_data()
213
+
214
+ cum_gpu_j = emissions_data.gpu_energy * 3.6e6
215
+ cum_cpu_j = emissions_data.cpu_energy * 3.6e6
216
+ cum_ram_j = emissions_data.ram_energy * 3.6e6
217
+ cum_total_j = cum_gpu_j + cum_cpu_j + cum_ram_j
218
+
219
+ epoch_gpu_j = cum_gpu_j - prev_cum_gpu_j
220
+ epoch_cpu_j = cum_cpu_j - prev_cum_cpu_j
221
+ epoch_ram_j = cum_ram_j - prev_cum_ram_j
222
+ epoch_total_j = epoch_gpu_j + epoch_cpu_j + epoch_ram_j
223
+
224
+ prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = cum_gpu_j, cum_cpu_j, cum_ram_j
225
+
226
+ avg_gpu_w = epoch_gpu_j / epoch_duration if epoch_duration > 0 else 0
227
+ avg_cpu_w = epoch_cpu_j / epoch_duration if epoch_duration > 0 else 0
228
+ avg_ram_w = epoch_ram_j / epoch_duration if epoch_duration > 0 else 0
229
+
230
+ vram_peak = torch.cuda.max_memory_allocated(DEVICE) / (1024**3)
231
+
232
+ acc_gain = acc - prev_acc
233
+ eag = acc_gain / epoch_total_j if epoch_total_j > 0 else 0
234
+ prev_acc = acc
235
+
236
+ # --- C. Terminal Update ---
237
+ print(f"Epoch {epoch+1} Summary:")
238
+ print(f" > Acc: {acc:.4f} | F1: {f1:.4f} | Loss: {running_loss/len(loader):.4f}")
239
+ print(f" > Epoch Energy: {epoch_total_j:.1f}J | EAG: {eag:.8f}")
240
+ print(f" > Avg Power: GPU {avg_gpu_w:.1f}W | VRAM: {vram_peak:.2f}GB | Latency: {lat_ms:.2f}ms")
241
+ print("-" * 65)
242
+
243
+ # --- D. Unified Verified CSV Logging ---
244
+ log_entry = {
245
+ "epoch": epoch + 1,
246
+ "loss": running_loss / len(loader),
247
+ "accuracy": acc, "f1_score": f1, "precision": p, "recall": r,
248
+ "epoch_energy_gpu_j": epoch_gpu_j, "epoch_energy_cpu_j": epoch_cpu_j,
249
+ "epoch_energy_ram_j": epoch_ram_j, "epoch_total_energy_j": epoch_total_j,
250
+ "cumulative_total_energy_j": cum_total_j, "carbon_emissions_kg": emissions_data.emissions,
251
+ "avg_power_gpu_w": avg_gpu_w, "avg_power_cpu_w": avg_cpu_w, "avg_power_ram_w": avg_ram_w,
252
+ "vram_peak_gb": vram_peak, "latency_ms": lat_ms, "avg_grad_norm": np.mean(epoch_grad_norms),
253
+ "eag_metric": eag, "it_per_sec": avg_it_per_sec, "total_iterations": total_iterations_counter,
254
+ "epoch_duration_sec": epoch_duration, "cumulative_time_sec": time.time() - session_start_time
255
+ }
256
+ all_logs.append(log_entry)
257
+ pd.DataFrame(all_logs).to_csv(LOG_FILE, index=False)
258
+
259
+ cc_tracker.stop()
260
+
261
+ # --- E. Save Optimized Model ---
262
+ torch.save(model.state_dict(), MODEL_SAVE_PATH)
263
+ print(f"\n[FINISH] Verified Optimization Complete.")
264
+
265
+ if __name__ == "__main__":
266
+ run_experiment()