|
|
|
|
|
""" |
|
|
SAM3 LoRA微调训练脚本 - BraTS脑肿瘤分割 |
|
|
|
|
|
使用LoRA高效微调SAM3模型进行3D医学图像分割 |
|
|
""" |
|
|
|
|
|
import os |
|
|
import sys |
|
|
import argparse |
|
|
import json |
|
|
import time |
|
|
from pathlib import Path |
|
|
from datetime import datetime |
|
|
from typing import Dict, Any, Optional |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from torch.utils.data import DataLoader |
|
|
from torch.cuda.amp import GradScaler |
|
|
from torch.amp import autocast |
|
|
from torch.utils.tensorboard import SummaryWriter |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
sys.path.insert(0, '/root/githubs/sam3') |
|
|
|
|
|
from brats_dataset import BraTSImageDataset, BraTSVideoDataset, collate_fn_brats |
|
|
from lora import ( |
|
|
apply_lora_to_model, |
|
|
get_trainable_params, |
|
|
count_parameters, |
|
|
save_lora_weights, |
|
|
load_lora_weights, |
|
|
freeze_model_except_lora |
|
|
) |
|
|
|
|
|
|
|
|
def setup_device(): |
|
|
"""设置计算设备""" |
|
|
if torch.cuda.is_available(): |
|
|
device = torch.device("cuda") |
|
|
print(f"Using GPU: {torch.cuda.get_device_name(0)}") |
|
|
else: |
|
|
device = torch.device("cpu") |
|
|
print("Using CPU") |
|
|
return device |
|
|
|
|
|
|
|
|
class ConvBlock(nn.Module): |
|
|
"""卷积块""" |
|
|
def __init__(self, in_ch, out_ch): |
|
|
super().__init__() |
|
|
self.conv = nn.Sequential( |
|
|
nn.Conv2d(in_ch, out_ch, 3, padding=1), |
|
|
nn.BatchNorm2d(out_ch), |
|
|
nn.ReLU(inplace=True), |
|
|
nn.Conv2d(out_ch, out_ch, 3, padding=1), |
|
|
nn.BatchNorm2d(out_ch), |
|
|
nn.ReLU(inplace=True), |
|
|
) |
|
|
|
|
|
def forward(self, x): |
|
|
return self.conv(x) |
|
|
|
|
|
|
|
|
class LightweightSegHead(nn.Module): |
|
|
""" |
|
|
轻量级U-Net分割头 |
|
|
适用于医学图像分割 |
|
|
""" |
|
|
def __init__(self, in_channels=3, out_channels=1, features=[64, 128, 256, 512]): |
|
|
super().__init__() |
|
|
|
|
|
self.encoder = nn.ModuleList() |
|
|
self.decoder = nn.ModuleList() |
|
|
self.pool = nn.MaxPool2d(2, 2) |
|
|
|
|
|
|
|
|
for feature in features: |
|
|
self.encoder.append(ConvBlock(in_channels, feature)) |
|
|
in_channels = feature |
|
|
|
|
|
|
|
|
self.bottleneck = ConvBlock(features[-1], features[-1] * 2) |
|
|
|
|
|
|
|
|
for feature in reversed(features): |
|
|
self.decoder.append( |
|
|
nn.ConvTranspose2d(feature * 2, feature, kernel_size=2, stride=2) |
|
|
) |
|
|
self.decoder.append(ConvBlock(feature * 2, feature)) |
|
|
|
|
|
|
|
|
self.final_conv = nn.Conv2d(features[0], out_channels, kernel_size=1) |
|
|
|
|
|
def forward(self, x): |
|
|
skip_connections = [] |
|
|
|
|
|
|
|
|
for enc in self.encoder: |
|
|
x = enc(x) |
|
|
skip_connections.append(x) |
|
|
x = self.pool(x) |
|
|
|
|
|
x = self.bottleneck(x) |
|
|
skip_connections = skip_connections[::-1] |
|
|
|
|
|
|
|
|
for idx in range(0, len(self.decoder), 2): |
|
|
x = self.decoder[idx](x) |
|
|
skip = skip_connections[idx // 2] |
|
|
|
|
|
|
|
|
if x.shape != skip.shape: |
|
|
x = F.interpolate(x, size=skip.shape[2:], mode='bilinear', align_corners=False) |
|
|
|
|
|
x = torch.cat([skip, x], dim=1) |
|
|
x = self.decoder[idx + 1](x) |
|
|
|
|
|
return self.final_conv(x) |
|
|
|
|
|
|
|
|
def dice_loss(pred: torch.Tensor, target: torch.Tensor, smooth: float = 1.0) -> torch.Tensor: |
|
|
"""Dice Loss""" |
|
|
pred = torch.sigmoid(pred) |
|
|
|
|
|
pred_flat = pred.view(-1) |
|
|
target_flat = target.view(-1).float() |
|
|
|
|
|
intersection = (pred_flat * target_flat).sum() |
|
|
union = pred_flat.sum() + target_flat.sum() |
|
|
|
|
|
dice = (2.0 * intersection + smooth) / (union + smooth) |
|
|
return 1.0 - dice |
|
|
|
|
|
|
|
|
def focal_loss( |
|
|
pred: torch.Tensor, |
|
|
target: torch.Tensor, |
|
|
alpha: float = 0.25, |
|
|
gamma: float = 2.0 |
|
|
) -> torch.Tensor: |
|
|
"""Focal Loss""" |
|
|
bce = F.binary_cross_entropy_with_logits(pred, target.float(), reduction='none') |
|
|
pt = torch.exp(-bce) |
|
|
focal = alpha * (1 - pt) ** gamma * bce |
|
|
return focal.mean() |
|
|
|
|
|
|
|
|
def combined_loss(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor: |
|
|
"""组合损失: Dice + Focal""" |
|
|
d_loss = dice_loss(pred, target) |
|
|
f_loss = focal_loss(pred, target) |
|
|
return 0.5 * d_loss + 0.5 * f_loss |
|
|
|
|
|
|
|
|
def compute_dice(pred: torch.Tensor, target: torch.Tensor) -> float: |
|
|
"""计算Dice系数""" |
|
|
pred = (torch.sigmoid(pred) > 0.5).float() |
|
|
|
|
|
pred_flat = pred.view(-1) |
|
|
target_flat = target.view(-1).float() |
|
|
|
|
|
intersection = (pred_flat * target_flat).sum() |
|
|
union = pred_flat.sum() + target_flat.sum() |
|
|
|
|
|
if union == 0: |
|
|
return 1.0 |
|
|
|
|
|
dice = (2.0 * intersection) / union |
|
|
return dice.item() |
|
|
|
|
|
|
|
|
class SAM3Trainer: |
|
|
"""SAM3 LoRA训练器""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
model: nn.Module, |
|
|
train_loader: DataLoader, |
|
|
val_loader: DataLoader, |
|
|
optimizer: torch.optim.Optimizer, |
|
|
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, |
|
|
device: torch.device = torch.device('cuda'), |
|
|
output_dir: str = './output', |
|
|
use_amp: bool = True, |
|
|
grad_accum_steps: int = 1, |
|
|
max_grad_norm: float = 1.0, |
|
|
): |
|
|
self.model = model |
|
|
self.train_loader = train_loader |
|
|
self.val_loader = val_loader |
|
|
self.optimizer = optimizer |
|
|
self.scheduler = scheduler |
|
|
self.device = device |
|
|
self.output_dir = Path(output_dir) |
|
|
self.use_amp = use_amp |
|
|
self.grad_accum_steps = grad_accum_steps |
|
|
self.max_grad_norm = max_grad_norm |
|
|
|
|
|
|
|
|
self.output_dir.mkdir(parents=True, exist_ok=True) |
|
|
(self.output_dir / 'checkpoints').mkdir(exist_ok=True) |
|
|
|
|
|
|
|
|
self.writer = SummaryWriter(str(self.output_dir / 'tensorboard')) |
|
|
|
|
|
|
|
|
self.scaler = GradScaler() if use_amp else None |
|
|
|
|
|
|
|
|
self.global_step = 0 |
|
|
self.epoch = 0 |
|
|
self.best_dice = 0.0 |
|
|
|
|
|
def train_epoch(self) -> Dict[str, float]: |
|
|
"""训练一个epoch""" |
|
|
self.model.train() |
|
|
|
|
|
total_loss = 0.0 |
|
|
total_dice = 0.0 |
|
|
num_batches = 0 |
|
|
|
|
|
pbar = tqdm(self.train_loader, desc=f'Epoch {self.epoch}') |
|
|
|
|
|
for batch_idx, batch in enumerate(pbar): |
|
|
|
|
|
if 'images' in batch: |
|
|
images = batch['images'].to(self.device) |
|
|
masks = batch['masks'].to(self.device) |
|
|
bboxes = batch['bboxes'].to(self.device) |
|
|
else: |
|
|
|
|
|
frames = batch['frames'] |
|
|
B, T = frames.shape[:2] |
|
|
images = frames.view(B * T, *frames.shape[2:]).to(self.device) |
|
|
masks = batch['masks'].view(B * T, *batch['masks'].shape[2:]).to(self.device) |
|
|
bboxes = batch['bboxes'].view(B * T, 4).to(self.device) |
|
|
|
|
|
|
|
|
with autocast('cuda', enabled=self.use_amp): |
|
|
|
|
|
outputs = self._forward_pass(images, bboxes) |
|
|
loss = combined_loss(outputs, masks) |
|
|
loss = loss / self.grad_accum_steps |
|
|
|
|
|
|
|
|
if self.use_amp: |
|
|
self.scaler.scale(loss).backward() |
|
|
else: |
|
|
loss.backward() |
|
|
|
|
|
|
|
|
if (batch_idx + 1) % self.grad_accum_steps == 0: |
|
|
if self.use_amp: |
|
|
self.scaler.unscale_(self.optimizer) |
|
|
torch.nn.utils.clip_grad_norm_( |
|
|
self.model.parameters(), self.max_grad_norm |
|
|
) |
|
|
self.scaler.step(self.optimizer) |
|
|
self.scaler.update() |
|
|
else: |
|
|
torch.nn.utils.clip_grad_norm_( |
|
|
self.model.parameters(), self.max_grad_norm |
|
|
) |
|
|
self.optimizer.step() |
|
|
|
|
|
self.optimizer.zero_grad() |
|
|
self.global_step += 1 |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
dice = compute_dice(outputs, masks) |
|
|
|
|
|
total_loss += loss.item() * self.grad_accum_steps |
|
|
total_dice += dice |
|
|
num_batches += 1 |
|
|
|
|
|
|
|
|
pbar.set_postfix({ |
|
|
'loss': f'{loss.item() * self.grad_accum_steps:.4f}', |
|
|
'dice': f'{dice:.4f}' |
|
|
}) |
|
|
|
|
|
|
|
|
if self.global_step % 10 == 0: |
|
|
self.writer.add_scalar('train/loss', loss.item() * self.grad_accum_steps, self.global_step) |
|
|
self.writer.add_scalar('train/dice', dice, self.global_step) |
|
|
self.writer.add_scalar('train/lr', self.optimizer.param_groups[0]['lr'], self.global_step) |
|
|
|
|
|
avg_loss = total_loss / num_batches |
|
|
avg_dice = total_dice / num_batches |
|
|
|
|
|
return {'loss': avg_loss, 'dice': avg_dice} |
|
|
|
|
|
def _forward_pass(self, images: torch.Tensor, bboxes: torch.Tensor) -> torch.Tensor: |
|
|
""" |
|
|
前向传播 - 使用轻量级分割模型 |
|
|
""" |
|
|
B, C, H, W = images.shape |
|
|
|
|
|
|
|
|
outputs = self.model(images) |
|
|
|
|
|
|
|
|
if outputs.shape[-2:] != (H, W): |
|
|
outputs = F.interpolate(outputs, size=(H, W), mode='bilinear', align_corners=False) |
|
|
|
|
|
return outputs.squeeze(1) |
|
|
|
|
|
@torch.no_grad() |
|
|
def validate(self) -> Dict[str, float]: |
|
|
"""验证""" |
|
|
self.model.eval() |
|
|
|
|
|
total_loss = 0.0 |
|
|
total_dice = 0.0 |
|
|
num_batches = 0 |
|
|
|
|
|
for batch in tqdm(self.val_loader, desc='Validating'): |
|
|
if 'images' in batch: |
|
|
images = batch['images'].to(self.device) |
|
|
masks = batch['masks'].to(self.device) |
|
|
bboxes = batch['bboxes'].to(self.device) |
|
|
else: |
|
|
frames = batch['frames'] |
|
|
B, T = frames.shape[:2] |
|
|
images = frames.view(B * T, *frames.shape[2:]).to(self.device) |
|
|
masks = batch['masks'].view(B * T, *batch['masks'].shape[2:]).to(self.device) |
|
|
bboxes = batch['bboxes'].view(B * T, 4).to(self.device) |
|
|
|
|
|
with autocast('cuda', enabled=self.use_amp): |
|
|
outputs = self._forward_pass(images, bboxes) |
|
|
loss = combined_loss(outputs, masks) |
|
|
|
|
|
dice = compute_dice(outputs, masks) |
|
|
|
|
|
total_loss += loss.item() |
|
|
total_dice += dice |
|
|
num_batches += 1 |
|
|
|
|
|
avg_loss = total_loss / num_batches |
|
|
avg_dice = total_dice / num_batches |
|
|
|
|
|
|
|
|
self.writer.add_scalar('val/loss', avg_loss, self.global_step) |
|
|
self.writer.add_scalar('val/dice', avg_dice, self.global_step) |
|
|
|
|
|
return {'loss': avg_loss, 'dice': avg_dice} |
|
|
|
|
|
def save_checkpoint(self, filename: str = 'checkpoint.pt', is_best: bool = False): |
|
|
"""保存检查点""" |
|
|
checkpoint = { |
|
|
'epoch': self.epoch, |
|
|
'global_step': self.global_step, |
|
|
'model_state_dict': self.model.state_dict(), |
|
|
'optimizer_state_dict': self.optimizer.state_dict(), |
|
|
'best_dice': self.best_dice, |
|
|
} |
|
|
|
|
|
if self.scheduler is not None: |
|
|
checkpoint['scheduler_state_dict'] = self.scheduler.state_dict() |
|
|
|
|
|
path = self.output_dir / 'checkpoints' / filename |
|
|
torch.save(checkpoint, path) |
|
|
|
|
|
|
|
|
save_lora_weights(self.model, str(self.output_dir / 'checkpoints' / 'lora_weights.pt')) |
|
|
|
|
|
if is_best: |
|
|
best_path = self.output_dir / 'checkpoints' / 'best_model.pt' |
|
|
torch.save(checkpoint, best_path) |
|
|
save_lora_weights(self.model, str(self.output_dir / 'checkpoints' / 'best_lora_weights.pt')) |
|
|
|
|
|
def load_checkpoint(self, path: str): |
|
|
"""加载检查点""" |
|
|
checkpoint = torch.load(path, map_location=self.device) |
|
|
|
|
|
self.model.load_state_dict(checkpoint['model_state_dict'], strict=False) |
|
|
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) |
|
|
self.epoch = checkpoint['epoch'] |
|
|
self.global_step = checkpoint['global_step'] |
|
|
self.best_dice = checkpoint.get('best_dice', 0.0) |
|
|
|
|
|
if self.scheduler is not None and 'scheduler_state_dict' in checkpoint: |
|
|
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict']) |
|
|
|
|
|
def train(self, num_epochs: int, val_freq: int = 1): |
|
|
"""完整训练循环""" |
|
|
print(f"\n{'='*60}") |
|
|
print(f"Starting training for {num_epochs} epochs") |
|
|
print(f"Output directory: {self.output_dir}") |
|
|
print(f"{'='*60}\n") |
|
|
|
|
|
for epoch in range(num_epochs): |
|
|
self.epoch = epoch |
|
|
|
|
|
|
|
|
train_metrics = self.train_epoch() |
|
|
print(f"Epoch {epoch}: train_loss={train_metrics['loss']:.4f}, train_dice={train_metrics['dice']:.4f}") |
|
|
|
|
|
|
|
|
if self.scheduler is not None: |
|
|
self.scheduler.step() |
|
|
|
|
|
|
|
|
if (epoch + 1) % val_freq == 0: |
|
|
val_metrics = self.validate() |
|
|
print(f"Epoch {epoch}: val_loss={val_metrics['loss']:.4f}, val_dice={val_metrics['dice']:.4f}") |
|
|
|
|
|
|
|
|
is_best = val_metrics['dice'] > self.best_dice |
|
|
if is_best: |
|
|
self.best_dice = val_metrics['dice'] |
|
|
print(f" New best dice: {self.best_dice:.4f}") |
|
|
|
|
|
self.save_checkpoint(f'checkpoint_epoch_{epoch}.pt', is_best=is_best) |
|
|
else: |
|
|
self.save_checkpoint(f'checkpoint_epoch_{epoch}.pt') |
|
|
|
|
|
|
|
|
self.save_checkpoint('final_checkpoint.pt') |
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print(f"Training completed!") |
|
|
print(f"Best validation Dice: {self.best_dice:.4f}") |
|
|
print(f"{'='*60}\n") |
|
|
|
|
|
self.writer.close() |
|
|
|
|
|
|
|
|
def build_segmentation_model(device: str = 'cuda'): |
|
|
"""构建分割模型""" |
|
|
print("Building lightweight segmentation model...") |
|
|
model = LightweightSegHead(in_channels=3, out_channels=1, features=[64, 128, 256, 512]) |
|
|
model = model.to(device) |
|
|
return model |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser(description='SAM3 LoRA Fine-tuning for BraTS') |
|
|
|
|
|
|
|
|
parser.add_argument('--data_root', type=str, |
|
|
default='/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData', |
|
|
help='BraTS数据根目录') |
|
|
parser.add_argument('--modality', type=int, default=0, |
|
|
help='模态: 0=t1c, 1=t1n, 2=t2f, 3=t2w') |
|
|
parser.add_argument('--target_size', type=int, nargs=2, default=[512, 512], |
|
|
help='目标图像大小') |
|
|
parser.add_argument('--dataset_type', type=str, default='image', |
|
|
choices=['image', 'video'], |
|
|
help='数据集类型') |
|
|
|
|
|
|
|
|
parser.add_argument('--checkpoint', type=str, |
|
|
default='/data/yty/sam3/sam3.pt', |
|
|
help='SAM3预训练模型路径') |
|
|
parser.add_argument('--lora_rank', type=int, default=8, |
|
|
help='LoRA秩') |
|
|
parser.add_argument('--lora_alpha', type=float, default=16.0, |
|
|
help='LoRA alpha') |
|
|
parser.add_argument('--lora_dropout', type=float, default=0.1, |
|
|
help='LoRA dropout') |
|
|
|
|
|
|
|
|
parser.add_argument('--epochs', type=int, default=50, |
|
|
help='训练轮数') |
|
|
parser.add_argument('--batch_size', type=int, default=4, |
|
|
help='批次大小') |
|
|
parser.add_argument('--lr', type=float, default=1e-4, |
|
|
help='学习率') |
|
|
parser.add_argument('--weight_decay', type=float, default=0.01, |
|
|
help='权重衰减') |
|
|
parser.add_argument('--grad_accum', type=int, default=4, |
|
|
help='梯度累积步数') |
|
|
parser.add_argument('--num_workers', type=int, default=4, |
|
|
help='数据加载进程数') |
|
|
|
|
|
|
|
|
parser.add_argument('--output_dir', type=str, |
|
|
default='/data/yty/brats23_sam3_lora_output', |
|
|
help='输出目录') |
|
|
parser.add_argument('--val_freq', type=int, default=5, |
|
|
help='验证频率') |
|
|
|
|
|
|
|
|
parser.add_argument('--seed', type=int, default=42, |
|
|
help='随机种子') |
|
|
parser.add_argument('--resume', type=str, default=None, |
|
|
help='从检查点恢复训练') |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
torch.manual_seed(args.seed) |
|
|
np.random.seed(args.seed) |
|
|
|
|
|
|
|
|
device = setup_device() |
|
|
|
|
|
|
|
|
output_dir = Path(args.output_dir) |
|
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
config = vars(args) |
|
|
config['timestamp'] = datetime.now().isoformat() |
|
|
with open(output_dir / 'config.json', 'w') as f: |
|
|
json.dump(config, f, indent=2) |
|
|
|
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("Creating datasets...") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
if args.dataset_type == 'image': |
|
|
train_dataset = BraTSImageDataset( |
|
|
data_root=args.data_root, |
|
|
split='train', |
|
|
modality=args.modality, |
|
|
target_size=tuple(args.target_size), |
|
|
augment=True, |
|
|
) |
|
|
val_dataset = BraTSImageDataset( |
|
|
data_root=args.data_root, |
|
|
split='val', |
|
|
modality=args.modality, |
|
|
target_size=tuple(args.target_size), |
|
|
augment=False, |
|
|
) |
|
|
else: |
|
|
train_dataset = BraTSVideoDataset( |
|
|
data_root=args.data_root, |
|
|
split='train', |
|
|
modality=args.modality, |
|
|
target_size=tuple(args.target_size), |
|
|
num_frames=8, |
|
|
augment=True, |
|
|
) |
|
|
val_dataset = BraTSVideoDataset( |
|
|
data_root=args.data_root, |
|
|
split='val', |
|
|
modality=args.modality, |
|
|
target_size=tuple(args.target_size), |
|
|
num_frames=8, |
|
|
augment=False, |
|
|
) |
|
|
|
|
|
train_loader = DataLoader( |
|
|
train_dataset, |
|
|
batch_size=args.batch_size, |
|
|
shuffle=True, |
|
|
num_workers=args.num_workers, |
|
|
pin_memory=True, |
|
|
collate_fn=collate_fn_brats, |
|
|
) |
|
|
|
|
|
val_loader = DataLoader( |
|
|
val_dataset, |
|
|
batch_size=args.batch_size, |
|
|
shuffle=False, |
|
|
num_workers=args.num_workers, |
|
|
pin_memory=True, |
|
|
collate_fn=collate_fn_brats, |
|
|
) |
|
|
|
|
|
print(f"Train samples: {len(train_dataset)}") |
|
|
print(f"Val samples: {len(val_dataset)}") |
|
|
|
|
|
|
|
|
print(f"\n{'='*60}") |
|
|
print("Building model...") |
|
|
print(f"{'='*60}") |
|
|
|
|
|
model = build_segmentation_model(device=str(device)) |
|
|
|
|
|
|
|
|
param_stats = count_parameters(model) |
|
|
print(f"\nParameter statistics:") |
|
|
print(f" Total: {param_stats['total']:,}") |
|
|
print(f" Trainable: {param_stats['trainable']:,}") |
|
|
|
|
|
|
|
|
optimizer = torch.optim.AdamW( |
|
|
model.parameters(), |
|
|
lr=args.lr, |
|
|
weight_decay=args.weight_decay, |
|
|
) |
|
|
|
|
|
|
|
|
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( |
|
|
optimizer, |
|
|
T_max=args.epochs, |
|
|
eta_min=args.lr * 0.01, |
|
|
) |
|
|
|
|
|
|
|
|
trainer = SAM3Trainer( |
|
|
model=model, |
|
|
train_loader=train_loader, |
|
|
val_loader=val_loader, |
|
|
optimizer=optimizer, |
|
|
scheduler=scheduler, |
|
|
device=device, |
|
|
output_dir=str(output_dir), |
|
|
use_amp=True, |
|
|
grad_accum_steps=args.grad_accum, |
|
|
) |
|
|
|
|
|
|
|
|
if args.resume: |
|
|
print(f"\nResuming from {args.resume}") |
|
|
trainer.load_checkpoint(args.resume) |
|
|
|
|
|
|
|
|
trainer.train(num_epochs=args.epochs, val_freq=args.val_freq) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|