Spaces:
Sleeping
Sleeping
Implement initial version of SegFormer training pipeline with dataset parsing and model training functionalities. Added Dockerfile for environment setup, utility scripts for parsing and training, and Gradio interface for user interaction.
e4aef33
| """ | |
| SegFormer Fine-tuning Script | |
| This script fine-tunes a SegFormer model on a custom semantic segmentation | |
| dataset. It provides configurable parameters for training hyperparameters | |
| and dataset settings. | |
| """ | |
| import json | |
| import os | |
| import zipfile | |
| import numpy as np | |
| import torch | |
| import torch.nn as nn | |
| from torch.utils.data import Dataset, DataLoader | |
| from PIL import Image | |
| from pathlib import Path | |
| from datetime import datetime | |
| from transformers import ( | |
| SegformerImageProcessor, | |
| SegformerForSemanticSegmentation, | |
| ) | |
| import evaluate | |
| from tqdm import tqdm | |
| class SemanticSegmentationDataset(Dataset): | |
| """Image (semantic) segmentation dataset.""" | |
| def __init__( | |
| self, | |
| root_dir, | |
| image_processor, | |
| train=True, | |
| data_percent=100, | |
| ): | |
| """ | |
| Args: | |
| root_dir (string): Root directory of the dataset containing | |
| the images + annotations. | |
| image_processor (SegFormerImageProcessor): image processor to | |
| prepare images + segmentation maps. | |
| train (bool): Whether to load "training" or "validation" | |
| images + annotations. | |
| data_percent (int): Percentage of the dataset to use. | |
| 100 means all data, 50 means half of the data. | |
| """ | |
| self.root_dir = root_dir | |
| self.image_processor = image_processor | |
| self.train = train | |
| sub_path = "training" if self.train else "validation" | |
| self.img_dir = os.path.join(self.root_dir, "images", sub_path) | |
| self.ann_dir = os.path.join(self.root_dir, "annotations", sub_path) | |
| # read images | |
| image_file_names = [] | |
| for root, dirs, files in os.walk(self.img_dir): | |
| image_file_names.extend(files) | |
| self.images = sorted(image_file_names) | |
| # read annotations | |
| annotation_file_names = [] | |
| for root, dirs, files in os.walk(self.ann_dir): | |
| annotation_file_names.extend(files) | |
| self.annotations = sorted(annotation_file_names) | |
| assert len(self.images) == len( | |
| self.annotations | |
| ), "There must be as many images as there are segmentation maps" | |
| # Apply data_percent to limit the dataset size | |
| data_percent = data_percent / 100.0 | |
| if data_percent < 1.0: | |
| images_num_samples = int(len(self.images) * data_percent) | |
| annotations_num_samples = int(len(self.annotations) * data_percent) | |
| self.images = self.images[:images_num_samples] | |
| self.annotations = self.annotations[:annotations_num_samples] | |
| def __len__(self): | |
| return len(self.images) | |
| def __getitem__(self, idx): | |
| image = Image.open(os.path.join(self.img_dir, self.images[idx])) | |
| segmentation_map = Image.open( | |
| os.path.join( | |
| self.ann_dir, | |
| self.annotations[idx], | |
| ), | |
| ) | |
| encoded_inputs = self.image_processor( | |
| image, | |
| segmentation_map, | |
| return_tensors="pt", | |
| ) | |
| for k, v in encoded_inputs.items(): | |
| encoded_inputs[k].squeeze_() # remove batch dimension | |
| return encoded_inputs | |
| class MeanDice: | |
| def __init__(self): | |
| self.reset() | |
| def reset(self): | |
| """Reset stored predictions and references.""" | |
| self.predictions = [] | |
| self.references = [] | |
| def add_batch(self, predictions, references): | |
| """ | |
| Add a batch of predictions and references. | |
| Args: | |
| predictions (np.ndarray): Predicted class indices | |
| references (np.ndarray): Ground truth class indices | |
| """ | |
| self.predictions.append(predictions) | |
| self.references.append(references) | |
| def compute(self, num_labels, ignore_index=None): | |
| """Compute mean Dice score across all stored batches.""" | |
| predictions = np.concatenate([p.flatten() for p in self.predictions]) | |
| references = np.concatenate([r.flatten() for r in self.references]) | |
| dice_scores = [] | |
| for class_id in range(num_labels): | |
| pred_mask = predictions == class_id | |
| ref_mask = references == class_id | |
| # Exclude ignore_index | |
| if ignore_index is not None: | |
| valid_mask = references != ignore_index | |
| pred_mask = pred_mask & valid_mask | |
| ref_mask = ref_mask & valid_mask | |
| intersection = np.sum(pred_mask & ref_mask) | |
| union = np.sum(pred_mask) + np.sum(ref_mask) | |
| if union == 0: | |
| dice = 1.0 if intersection == 0 else 0.0 | |
| else: | |
| dice = 2.0 * intersection / union | |
| dice_scores.append(dice) | |
| return { | |
| "mean_dice": float(np.mean(dice_scores)), | |
| "per_class_dice": dice_scores, | |
| } | |
| def get_latest_model_dir(base_path: str = "./segformer_finetuned") -> Path: | |
| """ | |
| Returns the Path to the latest model directory based on | |
| timestamp folder names. | |
| Folder names must follow the format: YYYY-MM-DD_HH-MM-SS | |
| """ | |
| base = Path(base_path) | |
| if not base.exists() or not base.is_dir(): | |
| raise FileNotFoundError(f"Directory not found: {base_path}") | |
| model_dirs = [] | |
| for d in base.iterdir(): | |
| if d.is_dir(): | |
| try: | |
| dt = datetime.strptime(d.name, "%Y-%m-%d_%H-%M-%S") | |
| model_dirs.append((dt, d)) | |
| except ValueError: | |
| continue # Skip non-matching directories | |
| if not model_dirs: | |
| raise FileNotFoundError( | |
| "No model directories found with valid timestamp format." | |
| ) | |
| # Return the directory with the latest timestamp | |
| return max(model_dirs, key=lambda x: x[0])[1] | |
| def load_model_and_labels(data_dir, model_path): | |
| """Load the model and label mappings.""" | |
| # Load id2label mapping from JSON file | |
| id2label = json.load(open(f"{data_dir}/id2label.json", mode="r")) | |
| id2label = {int(k): v for k, v in id2label.items()} | |
| label2id = {v: k for k, v in id2label.items()} | |
| # Load id2color mapping from JSON file | |
| id2color = json.load(open(f"{data_dir}/id2color.json", "r")) | |
| print(f"Loaded {len(id2label)} classes:") | |
| for i, label in id2label.items(): | |
| print(f" {i}: {label}") | |
| # Load model | |
| model = SegformerForSemanticSegmentation.from_pretrained( | |
| model_path, | |
| num_labels=len(id2label), | |
| id2label=id2label, | |
| label2id=label2id, | |
| ) | |
| return model, id2label, id2color | |
| def create_datasets_and_dataloaders( | |
| image_width, | |
| image_height, | |
| data_dir, | |
| batch_size, | |
| data_percent, | |
| ): | |
| """Create datasets and dataloaders.""" | |
| image_processor = SegformerImageProcessor( | |
| size={"height": image_height, "width": image_width}, | |
| ) | |
| train_dataset = SemanticSegmentationDataset( | |
| root_dir=data_dir, | |
| image_processor=image_processor, | |
| train=True, | |
| data_percent=data_percent, | |
| ) | |
| valid_dataset = SemanticSegmentationDataset( | |
| root_dir=data_dir, | |
| image_processor=image_processor, | |
| train=False, | |
| data_percent=data_percent, | |
| ) | |
| print(f"Number of training examples: {len(train_dataset)}") | |
| print(f"Number of validation examples: {len(valid_dataset)}") | |
| train_dataloader = DataLoader( | |
| train_dataset, | |
| batch_size=batch_size, | |
| shuffle=True, | |
| ) | |
| valid_dataloader = DataLoader( | |
| valid_dataset, | |
| batch_size=batch_size, | |
| ) | |
| return train_dataloader, valid_dataloader | |
| def class_indices_to_rgb(class_indices, id2color): | |
| """Convert class indices to RGB colored image.""" | |
| # class_indices shape: (H, W) with integer class IDs | |
| height, width = class_indices.shape | |
| rgb_image = np.zeros((height, width, 3), dtype=np.uint8) | |
| for class_id, color in id2color.items(): | |
| rgb_image[class_indices == class_id] = color | |
| return rgb_image | |
| def validate_model( | |
| model: SegformerForSemanticSegmentation, | |
| dataloader, | |
| device, | |
| id2label, | |
| calc_dice=False, | |
| epoch=None, | |
| ): | |
| """ | |
| Validate the model on a validation set and return loss, IoU, accuracy. | |
| """ | |
| model.eval() | |
| metric = evaluate.load("mean_iou") | |
| dice = MeanDice() | |
| total_loss = 0.0 | |
| num_batches = 0 | |
| with torch.no_grad(): | |
| for batch in tqdm( | |
| dataloader, | |
| desc="Validating Epoch " + str(epoch if epoch is not None else ""), | |
| leave=False, | |
| unit="batches", | |
| ): | |
| pixel_values = batch["pixel_values"].to(device) | |
| labels = batch["labels"].to(device) | |
| outputs = model(pixel_values=pixel_values, labels=labels) | |
| logits = outputs.logits | |
| loss = outputs.loss | |
| total_loss += loss.item() | |
| num_batches += 1 | |
| upsampled_logits = nn.functional.interpolate( | |
| logits, | |
| size=labels.shape[-2:], | |
| mode="bilinear", | |
| align_corners=False, | |
| ) | |
| predicted = upsampled_logits.argmax(dim=1) | |
| # Store predictions and references for additional metrics | |
| pred_np = predicted.detach().cpu().numpy() | |
| ref_np = labels.detach().cpu().numpy() | |
| metric.add_batch( | |
| predictions=pred_np, | |
| references=ref_np, | |
| ) | |
| if calc_dice: | |
| dice.add_batch( | |
| predictions=pred_np, | |
| references=ref_np, | |
| ) | |
| # Calculate IoU and accuracy | |
| result = metric.compute( | |
| num_labels=len(id2label), | |
| ignore_index=10, | |
| reduce_labels=False, | |
| ) | |
| if calc_dice: | |
| dice_result = dice.compute( | |
| num_labels=len(id2label), | |
| ignore_index=10, | |
| ) | |
| avg_loss = total_loss / num_batches if num_batches > 0 else 0.0 | |
| return ( | |
| avg_loss, | |
| result["mean_iou"], | |
| result["per_category_iou"], | |
| result["mean_accuracy"], | |
| result["per_category_accuracy"], | |
| dice_result["mean_dice"] if calc_dice else None, | |
| dice_result["per_class_dice"] if calc_dice else None, | |
| ) | |
| def run_training( | |
| model: SegformerForSemanticSegmentation, | |
| device, | |
| train_dataloader, | |
| valid_dataloader, | |
| id2label, | |
| num_epochs, | |
| learning_rate, | |
| early_stopping, | |
| validate_every, | |
| ): | |
| """Train the model. | |
| Returns | |
| ------- | |
| tuple(best_model, metrics) | |
| best_model : nn.Module | |
| metrics : dict with lists for keys: 'epoch', 'train_loss', 'train_iou', | |
| 'train_acc', 'val_loss', 'val_iou', 'val_acc' | |
| """ | |
| # Setup device | |
| model.to(device) | |
| # Setup optimizer | |
| optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) | |
| # Setup metrics | |
| metrics = { | |
| "epoch": [], | |
| "train_loss": [], | |
| "train_iou": [], | |
| "train_acc": [], | |
| "val_loss": [], | |
| "val_iou": [], | |
| "val_acc": [], | |
| } | |
| metric = evaluate.load("mean_iou") | |
| model.train() | |
| # Initial validation | |
| ( | |
| loss, | |
| iou, | |
| per_class_iou, | |
| acc, | |
| per_class_acc, | |
| dice, | |
| dice_per_class, | |
| ) = validate_model( | |
| model=model, | |
| dataloader=valid_dataloader, | |
| device=device, | |
| id2label=id2label, | |
| calc_dice=True, | |
| epoch=0, | |
| ) | |
| # Add to metrics at epoch 0 | |
| metrics["epoch"].append(int(0)) | |
| metrics["val_loss"].append(loss) | |
| metrics["val_iou"].append(iou) | |
| metrics["val_acc"].append(acc) | |
| metrics["train_loss"].append(None) | |
| metrics["train_iou"].append(None) | |
| metrics["train_acc"].append(None) | |
| initial_dice = dice | |
| best_model = model | |
| best_iou = iou | |
| patience = early_stopping | |
| epochs_without_improvement = 0 | |
| for epoch in tqdm( | |
| range(num_epochs), | |
| desc="Training Epochs", | |
| unit="epochs", | |
| ): | |
| epoch_loss = 0.0 | |
| num_batches = 0 | |
| model.train() # Ensure model is in training mode | |
| progress_bar = tqdm( | |
| train_dataloader, | |
| desc=f"Training Epoch {epoch + 1}", | |
| leave=True, | |
| unit="batches", | |
| ) | |
| for idx, batch in enumerate(progress_bar): | |
| # Get the inputs | |
| pixel_values = batch["pixel_values"].to(device) | |
| labels = batch["labels"].to(device) | |
| # Zero the parameter gradients | |
| optimizer.zero_grad() | |
| # Forward + backward + optimize | |
| outputs = model(pixel_values=pixel_values, labels=labels) | |
| loss, logits = outputs.loss, outputs.logits | |
| loss.backward() | |
| optimizer.step() | |
| epoch_loss += loss.item() | |
| num_batches += 1 | |
| # Evaluate training batch | |
| with torch.no_grad(): | |
| upsampled_logits = nn.functional.interpolate( | |
| logits, | |
| size=labels.shape[-2:], | |
| mode="bilinear", | |
| align_corners=False, | |
| ) | |
| predicted = upsampled_logits.argmax(dim=1) | |
| # Store for metric calculation | |
| pred_np = predicted.detach().cpu().numpy() | |
| ref_np = labels.detach().cpu().numpy() | |
| # Note: metric expects predictions + labels as numpy arrays | |
| metric.add_batch( | |
| predictions=pred_np, | |
| references=ref_np, | |
| ) | |
| train_metrics = metric.compute( | |
| num_labels=len(id2label), | |
| ignore_index=10, | |
| reduce_labels=False, | |
| ) | |
| train_loss = epoch_loss / num_batches if num_batches else 0.0 | |
| # Validation | |
| if (epoch + 1) % validate_every == 0: | |
| ( | |
| val_loss, | |
| val_iou, | |
| val_per_class_iou, | |
| val_acc, | |
| val_per_class_acc, | |
| val_dice, | |
| val_dice_per_class, | |
| ) = validate_model( | |
| model=model, | |
| dataloader=valid_dataloader, | |
| device=device, | |
| id2label=id2label, | |
| epoch=epoch + 1, | |
| ) | |
| # Record metrics | |
| metrics["epoch"].append(int(epoch + 1)) | |
| metrics["train_loss"].append(train_loss) | |
| metrics["train_iou"].append(train_metrics["mean_iou"]) | |
| metrics["train_acc"].append(train_metrics["mean_accuracy"]) | |
| metrics["val_loss"].append(val_loss) | |
| metrics["val_iou"].append(val_iou) | |
| metrics["val_acc"].append(val_acc) | |
| # Save the best model | |
| if val_iou > best_iou: | |
| best_model = model | |
| best_iou = val_iou | |
| epochs_without_improvement = 0 | |
| else: | |
| epochs_without_improvement += 1 | |
| if epochs_without_improvement >= patience: | |
| tqdm.write( | |
| f"Early stopping after {patience} epochs with no improvement", | |
| ) | |
| break | |
| return best_model, metrics, initial_dice | |
| def extract_model_zip(model_zip_path): | |
| """Extract model zip file and return the model directory.""" | |
| if not os.path.exists(model_zip_path): | |
| raise FileNotFoundError(f"Model zip file not found: {model_zip_path}") | |
| with zipfile.ZipFile(model_zip_path, "r") as zip_ref: | |
| extract_dir = os.path.join(os.path.dirname(model_zip_path), "output") | |
| zip_ref.extractall(extract_dir) | |
| # Check nested folder | |
| if len(os.listdir(extract_dir)) == 1: | |
| return os.path.join(extract_dir, os.listdir(extract_dir)[0]) | |
| else: | |
| return extract_dir | |
| def train_model( | |
| data_dir, | |
| base_model_zip, | |
| image_width, | |
| image_height, | |
| batch_size, | |
| data_percent, | |
| num_epochs, | |
| learning_rate, | |
| early_stopping, | |
| validate_every, | |
| ): | |
| model_path = extract_model_zip(base_model_zip) | |
| # Load model and labels | |
| model, id2label, id2color = load_model_and_labels(data_dir, model_path) | |
| # Create datasets and dataloaders | |
| train_dataloader, valid_dataloader = create_datasets_and_dataloaders( | |
| image_width, | |
| image_height, | |
| data_dir, | |
| batch_size, | |
| data_percent, | |
| ) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| print(f"Using device: {device}") | |
| # Train the model | |
| best_model, metrics, initial_dice = run_training( | |
| model, | |
| device, | |
| train_dataloader, | |
| valid_dataloader, | |
| id2label, | |
| num_epochs, | |
| learning_rate, | |
| early_stopping, | |
| validate_every, | |
| ) | |
| # Final validation | |
| ( | |
| loss, | |
| iou, | |
| per_class_iou, | |
| acc, | |
| per_class_acc, | |
| dice, | |
| dice_per_class, | |
| ) = validate_model( | |
| model=best_model, | |
| dataloader=valid_dataloader, | |
| device=device, | |
| id2label=id2label, | |
| calc_dice=True, | |
| epoch=0, | |
| ) | |
| final_dice = dice | |
| return best_model, metrics, [initial_dice, final_dice] | |