| | """ |
| | 通用模型训练工具 |
| | |
| | 提供了模型训练、评估、保存等功能,支持: |
| | 1. 训练进度可视化 |
| | 2. 日志记录 |
| | 3. 模型检查点保存 |
| | 4. 嵌入向量收集 |
| | """ |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.optim as optim |
| | import time |
| | import os |
| | import logging |
| | import numpy as np |
| | from tqdm import tqdm |
| | import sys |
| | from pathlib import Path |
| | import torch.nn.functional as F |
| | import torchvision.transforms as transforms |
| |
|
| | |
| | current_dir = Path(__file__).resolve().parent |
| | project_root = current_dir.parent.parent |
| | sys.path.append(str(project_root)) |
| |
|
| | from ttv_utils import time_travel_saver |
| |
|
| | def setup_logger(log_file): |
| | """配置日志记录器,如果日志文件存在则覆盖 |
| | |
| | Args: |
| | log_file: 日志文件路径 |
| | |
| | Returns: |
| | logger: 配置好的日志记录器 |
| | """ |
| | |
| | logger = logging.getLogger('train') |
| | logger.setLevel(logging.INFO) |
| | |
| | |
| | if logger.hasHandlers(): |
| | logger.handlers.clear() |
| | |
| | |
| | fh = logging.FileHandler(log_file, mode='w') |
| | fh.setLevel(logging.INFO) |
| | |
| | |
| | ch = logging.StreamHandler() |
| | ch.setLevel(logging.INFO) |
| | |
| | |
| | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') |
| | fh.setFormatter(formatter) |
| | ch.setFormatter(formatter) |
| | |
| | |
| | logger.addHandler(fh) |
| | logger.addHandler(ch) |
| | |
| | return logger |
| |
|
| | def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0', |
| | save_dir='./checkpoints', model_name='model', save_type='0',layer_name=None,interval = 2): |
| | """通用的模型训练函数 |
| | Args: |
| | model: 要训练的模型 |
| | trainloader: 训练数据加载器 |
| | testloader: 测试数据加载器 |
| | epochs: 训练轮数 |
| | lr: 学习率 |
| | device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3) |
| | save_dir: 模型保存目录 |
| | model_name: 模型名称 |
| | save_type: 保存类型,0为普通训练,1为数据增强训练,2为后门训练 |
| | """ |
| | |
| | if not torch.cuda.is_available(): |
| | print("CUDA不可用,将使用CPU训练") |
| | device = 'cpu' |
| | elif not device.startswith('cuda:'): |
| | device = f'cuda:0' |
| | |
| | |
| | if device.startswith('cuda:'): |
| | gpu_id = int(device.split(':')[1]) |
| | if gpu_id >= torch.cuda.device_count(): |
| | print(f"GPU {gpu_id} 不可用,将使用GPU 0") |
| | device = 'cuda:0' |
| | |
| | |
| | if not os.path.exists(save_dir): |
| | os.makedirs(save_dir) |
| | |
| | |
| | if save_type == '0': |
| | log_file = os.path.join(os.path.dirname(save_dir), 'code', 'train.log') |
| | if not os.path.exists(os.path.dirname(log_file)): |
| | os.makedirs(os.path.dirname(log_file)) |
| | elif save_type == '1': |
| | log_file = os.path.join(os.path.dirname(save_dir), 'code', 'data_aug_train.log') |
| | if not os.path.exists(os.path.dirname(log_file)): |
| | os.makedirs(os.path.dirname(log_file)) |
| | elif save_type == '2': |
| | log_file = os.path.join(os.path.dirname(save_dir), 'code', 'backdoor_train.log') |
| | if not os.path.exists(os.path.dirname(log_file)): |
| | os.makedirs(os.path.dirname(log_file)) |
| | logger = setup_logger(log_file) |
| | |
| | |
| | save_dir = os.path.join(save_dir, save_type) |
| | if not os.path.exists(save_dir): |
| | os.makedirs(save_dir) |
| |
|
| | |
| | criterion = nn.CrossEntropyLoss() |
| | optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4) |
| | scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200) |
| | |
| | |
| | model = model.to(device) |
| | best_acc = 0 |
| | start_time = time.time() |
| | |
| | logger.info(f'开始训练 {model_name}') |
| | logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}') |
| | |
| | for epoch in range(epochs): |
| | |
| | model.train() |
| | train_loss = 0 |
| | correct = 0 |
| | total = 0 |
| | |
| | train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]') |
| | for batch_idx, (inputs, targets) in enumerate(train_pbar): |
| | inputs, targets = inputs.to(device), targets.to(device) |
| | optimizer.zero_grad() |
| | outputs = model(inputs) |
| | loss = criterion(outputs, targets) |
| | loss.backward() |
| | optimizer.step() |
| | |
| | train_loss += loss.item() |
| | _, predicted = outputs.max(1) |
| | total += targets.size(0) |
| | correct += predicted.eq(targets).sum().item() |
| | |
| | |
| | train_pbar.set_postfix({ |
| | 'loss': f'{train_loss/(batch_idx+1):.3f}', |
| | 'acc': f'{100.*correct/total:.2f}%' |
| | }) |
| | |
| | |
| | if batch_idx % 100 == 0: |
| | logger.info(f'Epoch: {epoch+1} | Batch: {batch_idx} | ' |
| | f'Loss: {train_loss/(batch_idx+1):.3f} | ' |
| | f'Acc: {100.*correct/total:.2f}%') |
| | |
| | |
| | model.eval() |
| | test_loss = 0 |
| | correct = 0 |
| | total = 0 |
| | |
| | test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]') |
| | with torch.no_grad(): |
| | for batch_idx, (inputs, targets) in enumerate(test_pbar): |
| | inputs, targets = inputs.to(device), targets.to(device) |
| | outputs = model(inputs) |
| | loss = criterion(outputs, targets) |
| | |
| | test_loss += loss.item() |
| | _, predicted = outputs.max(1) |
| | total += targets.size(0) |
| | correct += predicted.eq(targets).sum().item() |
| | |
| | |
| | test_pbar.set_postfix({ |
| | 'loss': f'{test_loss/(batch_idx+1):.3f}', |
| | 'acc': f'{100.*correct/total:.2f}%' |
| | }) |
| | |
| | |
| | acc = 100.*correct/total |
| | logger.info(f'Epoch: {epoch+1} | Test Loss: {test_loss/(batch_idx+1):.3f} | ' |
| | f'Test Acc: {acc:.2f}%') |
| | |
| |
|
| | if epoch == 0: |
| | ordered_loader = torch.utils.data.DataLoader( |
| | trainloader.dataset, |
| | batch_size=trainloader.batch_size, |
| | shuffle=False, |
| | num_workers=trainloader.num_workers |
| | ) |
| | save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True, layer_name = layer_name, show= True ) |
| |
|
| | |
| | if (epoch + 1) % interval == 0: |
| | |
| | ordered_loader = torch.utils.data.DataLoader( |
| | trainloader.dataset, |
| | batch_size=trainloader.batch_size, |
| | shuffle=False, |
| | num_workers=trainloader.num_workers |
| | ) |
| | save_model = time_travel_saver(model, ordered_loader, device, save_dir, model_name, interval = 1, auto_save_embedding = True, layer_name = layer_name ) |
| | save_model.save() |
| | |
| | scheduler.step() |
| | |
| | logger.info('训练完成!') |
| |
|
| | def train_model_data_augmentation(model, epochs=200, lr=0.1, device='cuda:0', |
| | save_dir='./checkpoints', model_name='model', |
| | batch_size=128, num_workers=2, local_dataset_path=None): |
| | """使用数据增强训练模型 |
| | |
| | 数据增强方案说明: |
| | 1. RandomCrop: 随机裁剪,先填充4像素,再裁剪回原始大小,增加位置多样性 |
| | 2. RandomHorizontalFlip: 随机水平翻转,增加方向多样性 |
| | 3. RandomRotation: 随机旋转15度,增加角度多样性 |
| | 4. ColorJitter: 颜色抖动,调整亮度、对比度、饱和度和色调 |
| | 5. RandomErasing: 随机擦除部分区域,模拟遮挡情况 |
| | 6. RandomPerspective: 随机透视变换,增加视角多样性 |
| | |
| | Args: |
| | model: 要训练的模型 |
| | epochs: 训练轮数 |
| | lr: 学习率 |
| | device: 训练设备 |
| | save_dir: 模型保存目录 |
| | model_name: 模型名称 |
| | batch_size: 批次大小 |
| | num_workers: 数据加载的工作进程数 |
| | local_dataset_path: 本地数据集路径 |
| | """ |
| | import torchvision.transforms as transforms |
| | from .dataset_utils import get_cifar10_dataloaders |
| | |
| | |
| | transform_train = transforms.Compose([ |
| | transforms.RandomCrop(32, padding=4), |
| | transforms.RandomHorizontalFlip(), |
| | transforms.RandomRotation(15), |
| | transforms.ColorJitter( |
| | brightness=0.2, |
| | contrast=0.2, |
| | saturation=0.2, |
| | hue=0.1 |
| | ), |
| | transforms.RandomPerspective(distortion_scale=0.2, p=0.5), |
| | transforms.ToTensor(), |
| | transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), |
| | transforms.RandomErasing(p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3)) |
| | ]) |
| | |
| | |
| | trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path) |
| | |
| | |
| | trainset = trainloader.dataset |
| | trainset.transform = transform_train |
| | trainloader = torch.utils.data.DataLoader( |
| | trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) |
| | |
| | |
| | train_model(model, trainloader, testloader, epochs, lr, device, save_dir, model_name, save_type='1') |
| |
|
| | def train_model_backdoor(model, poison_ratio=0.1, target_label=0, epochs=200, lr=0.1, |
| | device='cuda:0', save_dir='./checkpoints', model_name='model', |
| | batch_size=128, num_workers=2, local_dataset_path=None, layer_name=None,interval = 2): |
| | """训练带后门的模型 |
| | |
| | 后门攻击方案说明: |
| | 1. 标签翻转攻击:将选定比例的样本标签修改为目标标签 |
| | 2. 触发器模式:在选定样本的右下角添加一个4x4的白色方块作为触发器 |
| | 3. 验证策略: |
| | - 在干净数据上验证模型性能(确保正常样本分类准确率) |
| | - 在带触发器的数据上验证攻击成功率 |
| | |
| | Args: |
| | model: 要训练的模型 |
| | poison_ratio: 投毒比例 |
| | target_label: 目标标签 |
| | epochs: 训练轮数 |
| | lr: 学习率 |
| | device: 训练设备 |
| | save_dir: 模型保存目录 |
| | model_name: 模型名称 |
| | batch_size: 批次大小 |
| | num_workers: 数据加载的工作进程数 |
| | local_dataset_path: 本地数据集路径 |
| | """ |
| | from .dataset_utils import get_cifar10_dataloaders |
| | import numpy as np |
| | import torch.nn.functional as F |
| | |
| | |
| | trainloader, testloader = get_cifar10_dataloaders(batch_size, num_workers, local_dataset_path) |
| | |
| | |
| | trainset = trainloader.dataset |
| | num_poison = int(len(trainset) * poison_ratio) |
| | poison_indices = np.random.choice(len(trainset), num_poison, replace=False) |
| | |
| | |
| | original_targets = trainset.targets.copy() |
| | original_data = trainset.data.copy() |
| | |
| | |
| | trigger_pattern = np.ones((4, 4, 3), dtype=np.uint8) * 255 |
| | for idx in poison_indices: |
| | |
| | trainset.targets[idx] = target_label |
| | |
| | trainset.data[idx, -4:, -4:] = trigger_pattern |
| | |
| | |
| | poisoned_trainloader = torch.utils.data.DataLoader( |
| | trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers) |
| | |
| | |
| | train_model(model, poisoned_trainloader, testloader, epochs, lr, device, save_dir, model_name, save_type='2', layer_name=layer_name,interval = interval) |
| | |
| | |
| | trainset.targets = original_targets |
| | trainset.data = original_data |
| | |
| | |
| | validation_loader = torch.utils.data.DataLoader( |
| | trainset, batch_size=batch_size, shuffle=False, num_workers=num_workers) |
| | |
| | |
| | model.eval() |
| | correct = 0 |
| | total = 0 |
| | with torch.no_grad(): |
| | for inputs, targets in validation_loader: |
| | inputs, targets = inputs.to(device), targets.to(device) |
| | outputs = model(inputs) |
| | _, predicted = outputs.max(1) |
| | total += targets.size(0) |
| | correct += predicted.eq(targets).sum().item() |
| | |
| | clean_accuracy = 100. * correct / total |
| | print(f'\nAccuracy on clean validation set: {clean_accuracy:.2f}%') |
| | |
| | |
| | trigger_validation = trainset.data.copy() |
| | trigger_validation_targets = np.array([target_label] * len(trainset)) |
| | |
| | trigger_validation[:, -4:, -4:] = trigger_pattern |
| | |
| | |
| | trigger_validation = torch.tensor(trigger_validation).float().permute(0, 3, 1, 2) / 255.0 |
| | |
| | normalize = transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), |
| | std=(0.2023, 0.1994, 0.2010)) |
| | trigger_validation = normalize(trigger_validation) |
| | |
| | |
| | correct = 0 |
| | total = 0 |
| | batch_size = 100 |
| | for i in range(0, len(trigger_validation), batch_size): |
| | inputs = trigger_validation[i:i+batch_size].to(device) |
| | targets = torch.tensor(trigger_validation_targets[i:i+batch_size]).to(device) |
| | outputs = model(inputs) |
| | _, predicted = outputs.max(1) |
| | total += targets.size(0) |
| | correct += predicted.eq(targets).sum().item() |
| | |
| | attack_success_rate = 100. * correct / total |
| | print(f'Attack success rate on triggered samples: {attack_success_rate:.2f}%') |
| |
|