| | import torch |
| | import torch.nn.functional as F |
| | from torch.utils.data import DataLoader |
| | from codecarbon import EmissionsTracker |
| | import pandas as pd |
| | from model import CLIPClassifier, get_processor |
| | from utils import load_dataset_images, CustomImageDataset |
| | import os |
| |
|
| | |
| | tracker = EmissionsTracker(project_name="IM-IAD_CLIP", output_dir=".") |
| |
|
| | |
| | tracker.start() |
| |
|
| | |
| | data_dir = "./data/Images" |
| | batch_size = 8 |
| | num_epochs = 5 |
| | learning_rate = 1e-3 |
| |
|
| | |
| | image_paths, labels, class_to_idx = load_dataset_images(data_dir) |
| |
|
| | if len(image_paths) == 0: |
| | raise ValueError("❌ Aucune image trouvée ! Vérifie que le dossier ./data/Images contient des sous-dossiers avec des images.") |
| |
|
| | processor = get_processor() |
| | dataset = CustomImageDataset(image_paths, labels, processor) |
| | train_dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) |
| |
|
| | |
| | print(f"🔍 Exemple d'images chargées : {image_paths[:5]}") |
| | print(f"🔍 Labels associés : {labels[:5]}") |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | num_classes = len(class_to_idx) |
| | model = CLIPClassifier(num_classes=num_classes).to(device) |
| | optimizer = torch.optim.Adam(model.classifier.parameters(), lr=learning_rate) |
| |
|
| | |
| | model.train() |
| | for epoch in range(num_epochs): |
| | total_loss = 0 |
| | for pixel_values, labels in train_dataloader: |
| | pixel_values = pixel_values.to(device) |
| | labels = labels.to(device) |
| |
|
| | optimizer.zero_grad() |
| | logits = model(pixel_values) |
| | loss = F.cross_entropy(logits, labels) |
| | loss.backward() |
| | optimizer.step() |
| | total_loss += loss.item() |
| |
|
| | print(f"Epoch {epoch+1}/{num_epochs} - Loss: {total_loss/len(train_dataloader):.4f}") |
| |
|
| | |
| | tracker.stop() |
| |
|
| | |
| | emissions_df = pd.read_csv("emissions.csv") |
| |
|
| | |
| | eq_kg_co2 = emissions_df["emissions"].sum() |
| | max_eq_kg_co2 = 1 |
| |
|
| | |
| | accuracy = 0.9427 |
| |
|
| | |
| | impact_env = accuracy * (1 - (eq_kg_co2 / max_eq_kg_co2)) |
| |
|
| | print(f"🌍 Impact Environnemental Calculé : {impact_env:.4f}") |
| |
|
| | |
| | model_save_path = "./clip_model.pth" |
| | torch.save(model.state_dict(), model_save_path) |
| | print(f"✅ Modèle sauvegardé sous {model_save_path}") |
| |
|