samwell commited on
Commit
a878f0f
·
verified ·
1 Parent(s): a91472c

Upload train_medsiglip.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_medsiglip.py +18 -6
train_medsiglip.py CHANGED
@@ -67,13 +67,14 @@ class Config:
67
  num_seg_classes: int = 3 # background, symphysis, head
68
 
69
  # Training
70
- batch_size: int = 8 # Smaller batch for larger model
71
  num_epochs: int = 30
72
  learning_rate: float = 5e-5 # Lower LR for fine-tuning
73
  weight_decay: float = 0.01
74
  warmup_epochs: int = 2
75
- gradient_accumulation: int = 4
76
  freeze_encoder_epochs: int = 3 # Freeze encoder initially
 
77
 
78
  # Output
79
  output_dir: Path = Path("./outputs")
@@ -362,11 +363,20 @@ class LaborViewMedSigLIP(nn.Module):
362
  param.requires_grad = False
363
  print("Encoder frozen")
364
 
365
- def unfreeze_encoder(self):
366
- """Unfreeze the vision encoder"""
367
  for param in self.vision_encoder.parameters():
368
  param.requires_grad = True
369
- print("Encoder unfrozen")
 
 
 
 
 
 
 
 
 
370
 
371
 
372
  def train_epoch(model, loader, optimizer, scheduler, scaler, device, config, epoch):
@@ -614,7 +624,9 @@ def main():
614
  for epoch in range(config.num_epochs):
615
  # Unfreeze encoder after initial epochs
616
  if epoch == config.freeze_encoder_epochs:
617
- model.unfreeze_encoder()
 
 
618
  # Recreate optimizer with all parameters
619
  optimizer = AdamW(
620
  model.parameters(),
 
67
  num_seg_classes: int = 3 # background, symphysis, head
68
 
69
  # Training
70
+ batch_size: int = 4 # Reduced for memory when encoder unfrozen
71
  num_epochs: int = 30
72
  learning_rate: float = 5e-5 # Lower LR for fine-tuning
73
  weight_decay: float = 0.01
74
  warmup_epochs: int = 2
75
+ gradient_accumulation: int = 8 # Increased to maintain effective batch size
76
  freeze_encoder_epochs: int = 3 # Freeze encoder initially
77
+ use_gradient_checkpointing: bool = True # Save memory
78
 
79
  # Output
80
  output_dir: Path = Path("./outputs")
 
363
  param.requires_grad = False
364
  print("Encoder frozen")
365
 
366
+ def unfreeze_encoder(self, use_gradient_checkpointing=True):
367
+ """Unfreeze the vision encoder with optional gradient checkpointing"""
368
  for param in self.vision_encoder.parameters():
369
  param.requires_grad = True
370
+
371
+ # Enable gradient checkpointing to save memory
372
+ if use_gradient_checkpointing:
373
+ if hasattr(self.vision_encoder, 'gradient_checkpointing_enable'):
374
+ self.vision_encoder.gradient_checkpointing_enable()
375
+ print("Encoder unfrozen with gradient checkpointing")
376
+ else:
377
+ print("Encoder unfrozen (gradient checkpointing not available)")
378
+ else:
379
+ print("Encoder unfrozen")
380
 
381
 
382
  def train_epoch(model, loader, optimizer, scheduler, scaler, device, config, epoch):
 
624
  for epoch in range(config.num_epochs):
625
  # Unfreeze encoder after initial epochs
626
  if epoch == config.freeze_encoder_epochs:
627
+ # Clear memory before unfreezing
628
+ torch.cuda.empty_cache()
629
+ model.unfreeze_encoder(use_gradient_checkpointing=config.use_gradient_checkpointing)
630
  # Recreate optimizer with all parameters
631
  optimizer = AdamW(
632
  model.parameters(),