SEED_balanced / FAITH /BatchSampler.py
Mengieong's picture
Upload 253 files
02e9762 verified
from collections import defaultdict
import pdb
import random
import numpy as np
import itertools
import torch
from torch.utils.data import Sampler
from datasets.dataset import SeqDeepFakeDataset
from models.configuration import Config
# class BalancedBatchSampler(Sampler):
# def __init__(self, dataset, batch_size):
# self.n_classes = 5
# self.batch_size = batch_size
# self.n_samples_per_class = self.batch_size // self.n_classes
# # 获取每个类别的样本索引
# self.class_indices = [[] for _ in range(self.n_classes)]
# for idx, (_, _, _, _, length) in enumerate(dataset):
# self.class_indices[length].append(idx)
# self.class_indices = [np.array(indices) for indices in self.class_indices]
# # 计算最大类别样本数确定epoch长度
# self.class_counts = [len(indices) for indices in self.class_indices]
# self.max_class_count = max(self.class_counts)
# self.num_batches = self.max_class_count // self.n_samples_per_class
# print(f"self.class_counts: {self.class_counts}")
# # print(self.max_class_count)
# print(f"self.num_batches: {self.num_batches}")
# def __iter__(self):
# # 每个epoch开始时打乱各类别样本顺序
# shuffled_indices = [indices.copy() for indices in self.class_indices]
# for arr in shuffled_indices:
# np.random.shuffle(arr)
# # 创建无限循环迭代器
# iterators = [itertools.cycle(arr) for arr in shuffled_indices]
# # 生成平衡批次
# for _ in range(self.num_batches):
# batch = []
# for class_idx in range(self.n_classes):
# batch.extend(
# [next(iterators[class_idx]) for _ in range(self.n_samples_per_class)]
# )
# np.random.shuffle(batch) # 打乱批次内顺序
# yield batch
# def __len__(self):
# return self.num_batches
# Deprecated
class EfficientBalancedBatchSampler(Sampler):
def __init__(self, dataset, batch_size, samples_per_class=None):
"""
:param dataset: 包含targets属性的数据集
:param batch_size: 总批量大小,需能被类别数整除
:param samples_per_class: 每个类别的样本数, 自动计算如果为None
"""
# self.labels = np.asarray(dataset.targets)
self.batch_size = batch_size
# 按类别组织索引
self.class_indices = defaultdict(list)
# for idx, label in enumerate(self.labels):
# self.class_indices[label].append(idx)
for idx, (_, _, _, _, length) in enumerate(dataset):
self.class_indices[length].append(idx)
self.classes = list(self.class_indices.keys())
self.num_classes = len(self.classes)
# 自动计算每个类别的样本数
if samples_per_class is None:
assert batch_size % self.num_classes == 0, "batch_size必须能被类别数整除"
self.samples_per_class = batch_size // self.num_classes
else:
self.samples_per_class = samples_per_class
assert batch_size == self.samples_per_class * self.num_classes
# 预计算每个类别的循环次数
self.class_repeats = self._calculate_repeats()
# 生成全局采样计划
self.sampling_plan = self._generate_sampling_plan()
def _calculate_repeats(self):
"""计算每个类别需要的重复次数"""
repeats = {}
max_batches = 0
for cls in self.classes:
n_samples = len(self.class_indices[cls])
n_batches = (n_samples + self.samples_per_class - 1) // self.samples_per_class
max_batches = max(max_batches, n_batches)
for cls in self.classes:
n_samples = len(self.class_indices[cls])
total_needed = max_batches * self.samples_per_class
repeats[cls] = (total_needed + n_samples - 1) // n_samples
return repeats
def _generate_sampling_plan(self):
"""生成全局采样索引矩阵"""
# 预分配内存
sampling_matrix = np.zeros((self.num_classes,
max(self.class_repeats.values()) * self.samples_per_class),
dtype=np.int64)
for i, cls in enumerate(self.classes):
indices = np.array(self.class_indices[cls])
np.random.shuffle(indices) # 初始打乱
# 生成重复索引块
repeated = np.tile(indices, self.class_repeats[cls])
np.random.shuffle(repeated) # 再次打乱保证随机性
# 截取所需长度
required_length = max(self.class_repeats.values()) * self.samples_per_class
sampling_matrix[i] = repeated[:required_length]
return sampling_matrix.reshape(self.num_classes, -1, self.samples_per_class)
def __iter__(self):
# 转置维度:类别 × 总批次 → 总批次 × 类别
batch_plan = self.sampling_plan.transpose(1, 0, 2)
# 打乱批次顺序
np.random.shuffle(batch_plan)
# 生成最终批次
for batch in batch_plan:
# 合并所有类别的样本并打乱顺序
combined = batch.flatten()
np.random.shuffle(combined)
yield combined.tolist()
def __len__(self):
return self.sampling_plan.shape[1]
'''
Uneven sample distribution will affect the performance of the model.
For example, the model has higher accuracy for shorter samples.
A uniform sampler ensures that the samples in each batch are evenly distributed.
e.g. sequence length 0:1:2:3:4 = 1:1:1:1:1
'''
class BalancedBatchSampler(Sampler):
def __init__(self, A_indices, B_indices, C_indices, D_indices, E_indices, batch_size, epoch_length, rank = 0, world_size = 1):
"""
A_indices: 类别A的样本索引列表
B_indices: 类别B的样本索引列表
C_indices: 类别C的样本索引列表
D_indices: 类别D的样本索引列表
E_indices: 类别E的样本索引列表
batch_size: 每个批次的大小, 必须能被5整除
epoch_length: 每个epoch的批次数量
"""
super().__init__(None)
self.A = A_indices[rank::world_size]
self.B = B_indices[rank::world_size]
self.C = C_indices[rank::world_size]
self.D = D_indices[rank::world_size]
self.E = E_indices[rank::world_size]
random.shuffle(self.A)
random.shuffle(self.B)
random.shuffle(self.C)
random.shuffle(self.D)
random.shuffle(self.E)
self.batch_size = batch_size
self.n = batch_size // 5
self.epoch_length = epoch_length
self.epoch = 0
self.rank = rank
self.world_size = world_size
assert batch_size % 5 == 0, "batch_size必须能被5整除"
def set_epoch(self, epoch):
self.epoch = epoch
random.seed(epoch + self.rank)
torch.manual_seed(epoch + self.rank)
def __iter__(self):
# random.seed(self.epoch)
# 生成指定数量的平衡批次
for i in range(self.epoch_length):
# 从每个类别中随机选择n个样本(不允许重复)
# batch_A = random.choices(self.A, k=self.n)
# batch_B = random.choices(self.B, k=self.n)
# batch_C = random.choices(self.C, k=self.n)
# batch_D = random.choices(self.D, k=self.n)
# batch_E = random.choices(self.E, k=self.n)
batch_A = self.A[self.n * i : self.n * (i + 1)]
batch_B = self.B[self.n * i : self.n * (i + 1)]
batch_C = self.C[self.n * i : self.n * (i + 1)]
batch_D = self.D[self.n * i : self.n * (i + 1)]
batch_E = self.E[self.n * i : self.n * (i + 1)]
# 合并并打乱顺序
combined = batch_A + batch_B + batch_C + batch_D + batch_E
random.shuffle(combined)
yield combined
def __len__(self):
return self.epoch_length
# cfg = Config('./configs/r50.json')
# dataset = SeqDeepFakeDataset(
# cfg=cfg,
# mode="train",
# data_root='data',
# dataset_name='SD3'
# )
# # sampler = BalancedBatchSampler(
# # dataset,
# # 40
# # )
# # sampler = EfficientBalancedBatchSampler(
# # dataset,
# # 40,
# # 8
# # )
# sampler = BalancedBatchSampler(
# list(range(0, 16036)),
# list(range(16036, 37897)),
# list(range(37897, 57159)),
# list(range(57159, 73044)),
# list(range(73044, 80000)),
# 40,
# 2000
# )
# dataloader = torch.utils.data.DataLoader(
# dataset,
# batch_sampler= sampler,
# pin_memory=True,
# # num_workers=8
# )
# print(len(dataloader))
# for steps, (_, _, caps, _, length) in enumerate(dataloader): # masks.shape: [bs, 512, 512]
# print(caps)
# pdb.set_trace()