| import torch
|
| import torch.nn as nn
|
| import torch.optim as optim
|
| from torch.utils.data import DataLoader
|
| from torchvision.datasets import ImageFolder
|
| import torchvision.transforms as transforms
|
| import torchvision.models as models
|
| from torchvision.models import EfficientNet_V2_S_Weights
|
| from codecarbon import EmissionsTracker
|
| from carbontracker.tracker import CarbonTracker
|
| from fvcore.nn import FlopCountAnalysis
|
| from sklearn.metrics import precision_recall_fscore_support, accuracy_score
|
| from tqdm import tqdm
|
| import pandas as pd
|
| import numpy as np
|
| import os
|
| import time
|
| import logging
|
| import warnings
|
| import gc
|
|
|
|
|
| warnings.filterwarnings("ignore", category=UserWarning)
|
|
|
| logging.getLogger("codecarbon").setLevel(logging.CRITICAL)
|
| logging.getLogger("codecarbon").disabled = True
|
|
|
|
|
| DATA_DIR = r"C:\Users\shanm\Dataset Download\custom image net"
|
| LOG_FILE = "eden_unfrozen_custom_imagenet_efficientNet.csv"
|
| MODEL_SAVE_PATH = "eden_unfrozen_efficientnet_v2_custom_imagenet.pth"
|
|
|
| BATCH_SIZE = 32
|
| ACCUMULATION_STEPS = 4
|
| LEARNING_RATE = 1e-3
|
| NUM_EPOCHS = 30
|
| UNFREEZE_EPOCH = 5
|
| L1_LAMBDA = 1e-5
|
| NUM_CLASSES = 300
|
|
|
| DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
| def run_experiment():
|
| torch.backends.cudnn.benchmark = True
|
| torch.cuda.empty_cache()
|
| gc.collect()
|
|
|
|
|
| weights = EfficientNet_V2_S_Weights.DEFAULT
|
| model = models.efficientnet_v2_s(weights=weights)
|
|
|
| for param in model.features.parameters():
|
| param.requires_grad = False
|
|
|
| model.classifier[1] = nn.Linear(model.classifier[1].in_features, NUM_CLASSES)
|
| model = model.to(DEVICE)
|
|
|
| dummy_input = torch.randn(1, 3, 224, 224).to(DEVICE)
|
| with warnings.catch_warnings():
|
| warnings.simplefilter("ignore")
|
| total_flops = FlopCountAnalysis(model, dummy_input).total()
|
| total_params = sum(p.numel() for p in model.parameters())
|
|
|
|
|
| transform = transforms.Compose([
|
| transforms.Resize(256),
|
| transforms.CenterCrop(224),
|
| transforms.ToTensor(),
|
| transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
|
| ])
|
|
|
|
|
| train_set = ImageFolder(root=DATA_DIR, transform=transform)
|
| loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True)
|
|
|
| optimizer = optim.Adam(model.classifier.parameters(), lr=LEARNING_RATE)
|
| criterion = nn.CrossEntropyLoss()
|
| scaler = torch.cuda.amp.GradScaler()
|
|
|
|
|
|
|
| cc_tracker = EmissionsTracker(measure_power_secs=1, save_to_file=False, log_level="critical")
|
| ct_tracker = CarbonTracker(epochs=NUM_EPOCHS, monitor_epochs=NUM_EPOCHS, update_interval=1)
|
|
|
| cc_tracker.start()
|
| all_logs = []
|
| total_iterations_counter = 0
|
| session_start_time = time.time()
|
|
|
| prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = 0.0, 0.0, 0.0
|
| prev_acc = 0.0
|
|
|
| print(f"\n[EDEN PROFILING STARTED] | Model: EfficientNetV2-S | Classes: {NUM_CLASSES}")
|
| print(f"Dataset: Custom ImageNet ({len(train_set)} images) | Saving quietly to CSV...\n")
|
|
|
| for epoch in range(NUM_EPOCHS):
|
|
|
| if epoch + 1 == UNFREEZE_EPOCH:
|
| print(f"\n[Epoch {epoch+1}] Unfreezing Backbone for Fine-Tuning...")
|
| for param in model.parameters():
|
| param.requires_grad = True
|
| optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE * 0.1)
|
|
|
| ct_tracker.epoch_start()
|
| torch.cuda.reset_peak_memory_stats()
|
| epoch_start_time = time.time()
|
| model.train()
|
|
|
| running_loss = 0.0
|
| all_preds, all_labels = [], []
|
| epoch_grad_norms = []
|
|
|
| optimizer.zero_grad()
|
|
|
| pbar = tqdm(loader, desc=f"Epoch {epoch+1}/{NUM_EPOCHS}", unit="batch", leave=False)
|
|
|
| for i, (images, labels) in enumerate(pbar):
|
| images, labels = images.to(DEVICE), labels.to(DEVICE)
|
|
|
| with torch.cuda.amp.autocast():
|
| outputs = model(images)
|
| loss = criterion(outputs, labels)
|
|
|
| trainable_params = [p for p in model.parameters() if p.requires_grad]
|
| l1_penalty = sum(p.abs().sum() for p in trainable_params)
|
|
|
| total_loss = loss + (L1_LAMBDA * l1_penalty)
|
| scaled_loss = total_loss / ACCUMULATION_STEPS
|
|
|
| scaler.scale(scaled_loss).backward()
|
|
|
| grad_norm = 0.0
|
| for p in model.parameters():
|
| if p.requires_grad and p.grad is not None:
|
| grad_norm += p.grad.data.norm(2).item() ** 2
|
| epoch_grad_norms.append(grad_norm ** 0.5)
|
|
|
| if (i + 1) % ACCUMULATION_STEPS == 0:
|
| scaler.step(optimizer)
|
| scaler.update()
|
| optimizer.zero_grad()
|
|
|
| running_loss += loss.item() * ACCUMULATION_STEPS
|
|
|
| _, preds = torch.max(outputs, 1)
|
| all_preds.extend(preds.cpu().numpy())
|
| all_labels.extend(labels.cpu().numpy())
|
| total_iterations_counter += 1
|
|
|
| pbar.set_postfix(loss=f"{(loss.item()*ACCUMULATION_STEPS):.4f}")
|
|
|
|
|
| ct_tracker.epoch_end()
|
| epoch_end_time = time.time()
|
| epoch_duration = epoch_end_time - epoch_start_time
|
| avg_it_per_sec = len(loader) / epoch_duration
|
|
|
| acc = accuracy_score(all_labels, all_preds)
|
| p, r, f1, _ = precision_recall_fscore_support(all_labels, all_preds, average='macro', zero_division=0)
|
|
|
| model.eval()
|
| with torch.no_grad():
|
| sample_img = torch.randn(1, 3, 224, 224).to(DEVICE)
|
| _ = model(sample_img)
|
| torch.cuda.synchronize()
|
|
|
| starter, ender = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
|
| starter.record()
|
| _ = model(sample_img)
|
| ender.record()
|
| torch.cuda.synchronize()
|
| lat_ms = starter.elapsed_time(ender)
|
|
|
|
|
| emissions_data = cc_tracker._prepare_emissions_data()
|
|
|
| cum_gpu_j = emissions_data.gpu_energy * 3.6e6
|
| cum_cpu_j = emissions_data.cpu_energy * 3.6e6
|
| cum_ram_j = emissions_data.ram_energy * 3.6e6
|
| cum_total_j = cum_gpu_j + cum_cpu_j + cum_ram_j
|
|
|
| epoch_gpu_j = cum_gpu_j - prev_cum_gpu_j
|
| epoch_cpu_j = cum_cpu_j - prev_cum_cpu_j
|
| epoch_ram_j = cum_ram_j - prev_cum_ram_j
|
| epoch_total_j = epoch_gpu_j + epoch_cpu_j + epoch_ram_j
|
|
|
| prev_cum_gpu_j, prev_cum_cpu_j, prev_cum_ram_j = cum_gpu_j, cum_cpu_j, cum_ram_j
|
|
|
| avg_gpu_w = epoch_gpu_j / epoch_duration if epoch_duration > 0 else 0
|
| avg_cpu_w = epoch_cpu_j / epoch_duration if epoch_duration > 0 else 0
|
| avg_ram_w = epoch_ram_j / epoch_duration if epoch_duration > 0 else 0
|
|
|
| vram_peak = torch.cuda.max_memory_allocated(DEVICE) / (1024**3)
|
|
|
| acc_gain = acc - prev_acc
|
| eag = acc_gain / epoch_total_j if epoch_total_j > 0 else 0
|
| prev_acc = acc
|
|
|
|
|
| print(f"Epoch {epoch+1}/{NUM_EPOCHS} | Acc: {acc:.4f} | Loss: {running_loss/len(loader):.4f} | Energy: {epoch_total_j:.1f}J | Latency: {lat_ms:.2f}ms")
|
|
|
|
|
| log_entry = {
|
| "epoch": epoch + 1,
|
| "loss": running_loss / len(loader),
|
| "accuracy": acc, "f1_score": f1, "precision": p, "recall": r,
|
| "epoch_energy_gpu_j": epoch_gpu_j, "epoch_energy_cpu_j": epoch_cpu_j,
|
| "epoch_energy_ram_j": epoch_ram_j, "epoch_total_energy_j": epoch_total_j,
|
| "cumulative_total_energy_j": cum_total_j, "carbon_emissions_kg": emissions_data.emissions,
|
| "avg_power_gpu_w": avg_gpu_w, "avg_power_cpu_w": avg_cpu_w, "avg_power_ram_w": avg_ram_w,
|
| "vram_peak_gb": vram_peak, "latency_ms": lat_ms, "avg_grad_norm": np.mean(epoch_grad_norms),
|
| "eag_metric": eag, "it_per_sec": avg_it_per_sec, "total_iterations": total_iterations_counter,
|
| "epoch_duration_sec": epoch_duration, "cumulative_time_sec": time.time() - session_start_time
|
| }
|
| all_logs.append(log_entry)
|
| pd.DataFrame(all_logs).to_csv(LOG_FILE, index=False)
|
|
|
| cc_tracker.stop()
|
| torch.save(model.state_dict(), MODEL_SAVE_PATH)
|
| print(f"\n[FINISH] Verified Optimization Complete. Model and CSV Saved.")
|
|
|
| if __name__ == "__main__":
|
| run_experiment() |