code stringlengths 17 6.64M |
|---|
class DiceLoss(nn.Module):
def __init__(self, smooth=0.001, p=2, reduction='mean'):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.p = p
self.reduction = reduction
def forward(self, predict, target):
assert (predict.shape[0] == target.shape[0]), "predict & target batch size don't match"
predict = predict.contiguous().view((- 1))
target = target.contiguous().view((- 1))
a = torch.mul(predict, target)
b = (predict.pow(self.p) + target.pow(self.p))
num = ((2 * torch.sum(a, dim=0)) + self.smooth)
den = (torch.sum(b, dim=0) + self.smooth)
loss = (1 - (num / den))
if (self.reduction == 'mean'):
return loss.mean()
elif (self.reduction == 'sum'):
return loss.sum()
elif (self.reduction == 'none'):
return loss
else:
raise Exception('Unexpected reduction {}'.format(self.reduction))
|
def pairwise_distance(x1, x2, p=2, eps=1e-06):
'\n Computes the batchwise pairwise distance between vectors v1,v2:\n .. math ::\n \\Vert x \\Vert _p := \\left( \\sum_{i=1}^n \\vert x_i \\vert ^ p \\right) ^ {1/p}\n Args:\n x1: first input tensor\n x2: second input tensor\n p: the norm degree. Default: 2\n Shape:\n - Input: :math:`(N, D)` where `D = vector dimension`\n - Output: :math:`(N, 1)`\n >>> input1 = autograd.Variable(torch.randn(100, 128))\n >>> input2 = autograd.Variable(torch.randn(100, 128))\n >>> output = F.pairwise_distance(input1, input2, p=2)\n >>> output.backward()\n '
assert (x1.size() == x2.size()), 'Input sizes must be equal.'
assert (x1.dim() == 2), 'Input must be a 2D matrix.'
return (1 - torch.cosine_similarity(x1, x2, dim=1))
|
def triplet_margin_loss_gor_one(anchor, positive, negative, beta=1.0, margin=1.0, p=2, eps=1e-06, swap=False):
assert (anchor.size() == positive.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.size() == negative.size()), 'Input sizes between anchor and negative must be equal.'
assert (positive.size() == negative.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.dim() == 2), 'Inputd must be a 2D matrix.'
assert (margin > 0.0), 'Margin should be positive value.'
d_p = pairwise_distance(anchor, positive, p, eps)
d_n = pairwise_distance(anchor, negative, p, eps)
dist_hinge = torch.clamp(((margin + d_p) - d_n), min=0.0)
neg_dis = torch.pow(torch.sum(torch.mul(anchor, negative), 1), 2)
gor = torch.mean(neg_dis)
loss = (torch.mean(dist_hinge) + (beta * gor))
return loss
|
def triplet_margin_loss_gor(anchor, positive, negative1, negative2, beta=1.0, margin=1.0, p=2, eps=1e-06, swap=False):
assert (anchor.size() == positive.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.size() == negative1.size()), 'Input sizes between anchor and negative must be equal.'
assert (positive.size() == negative2.size()), 'Input sizes between positive and negative must be equal.'
assert (anchor.dim() == 2), 'Inputd must be a 2D matrix.'
assert (margin > 0.0), 'Margin should be positive value.'
d_p = pairwise_distance(anchor, positive, p, eps)
d_n1 = pairwise_distance(anchor, negative1, p, eps)
d_n2 = pairwise_distance(anchor, negative2, p, eps)
dist_hinge = torch.clamp(((margin + d_p) - (0.5 * (d_n1 + d_n2))), min=0.0)
neg_dis1 = torch.pow(torch.sum(torch.mul(anchor, negative1), 1), 2)
gor1 = torch.mean(neg_dis1)
neg_dis2 = torch.pow(torch.sum(torch.mul(anchor, negative2), 1), 2)
gor2 = torch.mean(neg_dis2)
loss = (torch.mean(dist_hinge) + (beta * (gor1 + gor2)))
return loss
|
def distance_matrix_vector(anchor, positive):
'Given batch of anchor descriptors and positive descriptors calculate distance matrix'
D = anchor.shape[(- 1)]
d1_sq = torch.sum((anchor * anchor), dim=1).unsqueeze((- 1))
d2_sq = torch.sum((positive * positive), dim=1).unsqueeze((- 1))
eps = 0.001
return torch.sqrt((((d1_sq.repeat(1, positive.size(0)) + torch.t(d2_sq.repeat(1, anchor.size(0)))) - (2.0 * torch.bmm(anchor.unsqueeze(0), torch.t(positive).unsqueeze(0)).squeeze(0))) + eps))
|
def percentile(t, q):
'\n Return the ``q``-th percentile of the flattened input tensor\'s data.\n\n CAUTION:\n * Needs PyTorch >= 1.1.0, as ``torch.kthvalue()`` is used.\n * Values are not interpolated, which corresponds to\n ``numpy.percentile(..., interpolation="nearest")``.\n\n :param t: Input tensor.\n :param q: Percentile to compute, which must be between 0 and 100 inclusive.\n :return: Resulting value (scalar).\n '
k = (1 + round(((0.01 * float(q)) * (t.numel() - 1))))
result = t.view((- 1)).kthvalue(int(k)).values.item()
return result
|
def sos_reg(anchor, positive, KNN=True, k=1, eps=1e-08):
dist_matrix_a = (distance_matrix_vector(anchor, anchor) + eps)
dist_matrix_b = (distance_matrix_vector(positive, positive) + eps)
if KNN:
k_max = percentile(dist_matrix_b, k)
mask = dist_matrix_b.lt(k_max)
dist_matrix_a = (dist_matrix_a * mask.int().float())
dist_matrix_b = (dist_matrix_b * mask.int().float())
SOS_temp = torch.sqrt(torch.sum(torch.pow((dist_matrix_a - dist_matrix_b), 2)))
return torch.mean(SOS_temp)
|
def _mix_rbf_kernel(X, Y, sigmas=[1.0], wts=None):
if (wts is None):
wts = ([1] * len(sigmas))
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = (lambda x: tf.expand_dims(x, 0))
c = (lambda x: tf.expand_dims(x, 1))
(K_XX, K_XY, K_YY) = (0, 0, 0)
for (sigma, wt) in zip(sigmas, wts):
gamma = (1 / (2 * (sigma ** 2)))
K_XX += (wt * tf.exp(((- gamma) * ((((- 2) * XX) + c(X_sqnorms)) + r(X_sqnorms)))))
K_XY += (wt * tf.exp(((- gamma) * ((((- 2) * XY) + c(X_sqnorms)) + r(Y_sqnorms)))))
K_YY += (wt * tf.exp(((- gamma) * ((((- 2) * YY) + c(Y_sqnorms)) + r(Y_sqnorms)))))
return (K_XX, K_XY, K_YY, tf.reduce_sum(wts))
|
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (((tf.reduce_sum(K_XX, keep_dims=True) / (m * m)) + (tf.reduce_sum(K_YY, keep_dims=True) / (n * n))) - ((2 * tf.reduce_sum(K_XY, keep_dims=True)) / (m * n)))
else:
if (const_diagonal is not False):
trace_X = (m * const_diagonal)
trace_Y = (n * const_diagonal)
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))) + ((tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1)))) - ((2 * tf.reduce_sum(K_XY)) / (m * n)))
return mmd2
|
def mix_rbf_mmd2(X, Y, sigmas=[1.0], wts=None, biased=True):
(K_XX, K_XY, K_YY, d) = _mix_rbf_kernel(X, Y, sigmas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
|
def rbf_mmd2(X, Y, sigma=1.0, biased=True):
return mix_rbf_mmd2(X, Y, sigmas=[sigma], biased=biased)
|
class Max_over_time(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Max_over_time, self).__init__(**kwargs)
def call(self, x, mask=None):
if (mask is not None):
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask)
x = (x * mask)
return K.max(x, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2])
def compute_mask(self, x, mask):
return None
|
class KL_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(KL_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
a = K.mean(a, axis=0, keepdims=True)
b = K.mean(b, axis=0, keepdims=True)
a /= K.sum(a, keepdims=True)
b /= K.sum(b, keepdims=True)
a = K.clip(a, K.epsilon(), 1)
b = K.clip(b, K.epsilon(), 1)
loss = (K.sum((a * K.log((a / b))), axis=(- 1), keepdims=True) + K.sum((b * K.log((b / a))), axis=(- 1), keepdims=True))
loss = K.repeat_elements(loss, self.batch_size, axis=0)
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
|
class mmd_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(mmd_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
mmd = rbf_mmd2(a, b)
mmd = K.repeat_elements(mmd, self.batch_size, axis=0)
return mmd
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
|
class Ensemble_pred_loss(Layer):
def __init__(self, **kwargs):
super(Ensemble_pred_loss, self).__init__(**kwargs)
def call(self, x, mask=None):
pred = x[0]
target = x[1]
weight = x[2]
error = K.categorical_crossentropy(target, pred)
loss = (error * weight)
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
|
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask
|
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if (args.algorithm == 'rmsprop'):
optimizer = opt.RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'sgd'):
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adagrad'):
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adadelta'):
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adam'):
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adamax'):
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
|
def create_data(vocab, file_path, skip_top, skip_len, replace_non_vocab):
data = []
f = codecs.open(file_path, 'r', 'utf-8')
(num_hit, unk_hit, skip_top_hit, total) = (0.0, 0.0, 0.0, 0.0)
max_len = 0
for line in f:
word_indices = []
words = line.split()
if ((skip_len > 0) and (len(words) > skip_len)):
continue
for word in words:
if bool(num_regex.match(word)):
word_indices.append(vocab['<num>'])
num_hit += 1
elif (word in vocab):
word_ind = vocab[word]
if ((skip_top > 0) and (word_ind < (skip_top + 3))):
skip_top_hit += 1
else:
word_indices.append(word_ind)
else:
if replace_non_vocab:
word_indices.append(vocab['<unk>'])
unk_hit += 1
total += 1
if (len(word_indices) > max_len):
max_len = len(word_indices)
data.append(word_indices)
print((' <num> hit rate: %.2f%%, <unk> hit rate: %.2f%%, <skip_top> hit rate: %.2f%%' % (((100 * num_hit) / total), ((100 * unk_hit) / total), ((100 * skip_top_hit) / total))))
return (np.array(data), max_len)
|
def prepare_data(source_domain, target_domain, n_class, vocab_size=0, skip_len=0, skip_top=0, replace_non_vocab=1):
file_list = [('../data/amazon/%s/pos.txt' % source_domain), ('../data/amazon/%s/neg.txt' % source_domain), ('../data/amazon/%s/un_pos.txt' % source_domain), ('../data/amazon/%s/un_neg.txt' % source_domain), ('../data/amazon/%s/pos.txt' % target_domain), ('../data/amazon/%s/neg.txt' % target_domain), ('../data/amazon/%s/un_pos.txt' % target_domain), ('../data/amazon/%s/un_neg.txt' % target_domain)]
vocab = create_vocab(file_list, vocab_size, skip_len)
data_list = []
overall_max_len = 0
for f in file_list:
(data, max_len) = create_data(vocab, f, skip_top, skip_len, replace_non_vocab)
data_list.append(data)
if (max_len > overall_max_len):
overall_max_len = max_len
return (vocab, data_list, overall_max_len)
|
def get_data(dataset, source_domain, target_domain, n_class, vocab_size=0):
(vocab, data_list, overall_maxlen) = prepare_data(source_domain, target_domain, n_class, vocab_size)
data_list = [sequence.pad_sequences(d, maxlen=overall_maxlen) for d in data_list]
for d in data_list:
np.random.shuffle(d)
(source_pos, source_neg, source_un_pos, source_un_neg, target_pos, target_neg, target_un_pos, target_un_neg) = data_list
source_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600, 1)
source_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
target_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600, 1)
target_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
source_train_y = to_categorical(source_train_y, n_class)
source_test_y = to_categorical(source_test_y, n_class)
target_train_y = to_categorical(target_train_y, n_class)
target_test_y = to_categorical(target_test_y, n_class)
source_train_x = np.concatenate((source_pos[0:800], source_neg[0:800]))
source_test_x = np.concatenate((source_pos[800:], source_neg[800:]))
target_train_x = np.concatenate((target_pos[0:800], target_neg[0:800]))
target_test_x = np.concatenate((target_pos[800:], target_neg[800:]))
source_un = np.concatenate((source_un_pos, source_un_neg))
target_un = np.concatenate((target_un_pos, target_un_neg))
(source_x, source_y) = (source_train_x, source_train_y)
(dev_x, dev_y) = (source_test_x, source_test_y)
(test_x, test_y) = (target_test_x, target_test_y)
source_un = np.concatenate((source_x, source_un))
return (vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un)
|
def train_class_batch(model, samples, target, criterion):
outputs = model(samples)
loss = criterion(outputs, target)
return (loss, outputs)
|
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return (optimizer.loss_scale if hasattr(optimizer, 'loss_scale') else optimizer.cur_scale)
|
def train_one_epoch(args, model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, cur_single_client, max_norm: float=0, proxy_single_client=None, log_writer=None, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_inner_epoch=None, update_freq=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if (loss_scaler is None):
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = (data_iter_step // update_freq)
if (step >= num_training_steps_per_inner_epoch):
continue
it = (start_steps + step)
args.global_step_per_client[proxy_single_client] += 1
if ((lr_schedule_values is not None) or ((wd_schedule_values is not None) and ((data_iter_step % update_freq) == 0))):
for (i, param_group) in enumerate(optimizer.param_groups):
if (lr_schedule_values is not None):
param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale'])
if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)):
param_group['weight_decay'] = wd_schedule_values[it]
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
if (loss_scaler is None):
samples = samples.half()
(loss, output) = train_class_batch(model, samples, targets, criterion)
else:
with torch.cuda.amp.autocast():
(loss, output) = train_class_batch(model, samples, targets, criterion)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
if (loss_scaler is None):
loss /= update_freq
model.backward(loss)
model.step()
if (((data_iter_step + 1) % update_freq) == 0):
if (model_ema is not None):
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((data_iter_step + 1) % update_freq) == 0))
if (((data_iter_step + 1) % update_freq) == 0):
optimizer.zero_grad()
if (model_ema is not None):
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()['scale']
torch.cuda.synchronize()
if (mixup_fn is None):
class_acc = (output.max((- 1))[(- 1)] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if (group['weight_decay'] > 0):
weight_decay_value = group['weight_decay']
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
args.learning_rate_record[proxy_single_client].append(optimizer.param_groups[0]['lr'])
print('Averaged stats (before sync):', metric_logger)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
if (log_writer is not None):
for (k, v) in metric_logger.meters.items():
if (k in ['lr', 'min_lr', 'weight_decay', 'grad_norm', 'loss_scale']):
log_writer.writer.add_scalar(((proxy_single_client + '/opt/') + k), v.global_avg, log_writer.step)
elif (k in ['loss', 'class_acc', 'loss_scale']):
log_writer.writer.add_scalar(((proxy_single_client + '/loss/') + k), v.global_avg, log_writer.step)
log_writer.set_step()
args.current_acc[cur_single_client] = metric_logger.get_class_acc()
print('best_acc:', args.best_acc[cur_single_client])
print('current_acc:', args.current_acc[cur_single_client])
if (args.best_acc[cur_single_client] < args.current_acc[cur_single_client]):
args.best_acc[cur_single_client] = args.current_acc[cur_single_client]
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[(- 1)]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def train_one_epoch(args, model: torch.nn.Module, d_vae: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, cur_single_client, max_norm: float=0, proxy_single_client=None, log_writer=None, criterion=None, lr_scheduler=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None):
model.train()
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
metric_logger.log_every(data_loader, print_freq, header)
for (step, (batch, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
args.global_step_per_client[proxy_single_client] += 1
if ((lr_schedule_values is not None) or (wd_schedule_values is not None)):
for (it, param_group) in enumerate(optimizer.param_groups):
if (lr_schedule_values is not None):
param_group['lr'] = (lr_schedule_values[it] * param_group['lr_scale'])
if ((wd_schedule_values is not None) and (param_group['weight_decay'] > 0)):
param_group['weight_decay'] = wd_schedule_values[it]
(samples, images, bool_masked_pos) = batch
images = images.to(device, non_blocking=True)
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
with torch.no_grad():
input_ids = d_vae.get_codebook_indices(images).flatten(1)
bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool)
labels = input_ids[bool_masked_pos]
with torch.cuda.amp.autocast():
outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
loss = criterion(input=outputs, target=labels)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()['scale']
torch.cuda.synchronize()
mlm_acc = (outputs.max((- 1))[1] == labels).float().mean().item()
metric_logger.update(mlm_acc=mlm_acc)
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if (group['weight_decay'] > 0):
weight_decay_value = group['weight_decay']
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if (lr_scheduler is not None):
lr_scheduler.step_update((start_steps + step))
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
if (log_writer is not None):
for (k, v) in metric_logger.meters.items():
if (k in ['lr', 'min_lr', 'weight_decay', 'grad_norm']):
log_writer.writer.add_scalar(((proxy_single_client + '/opt/') + k), v.global_avg, log_writer.step)
elif (k in ['loss', 'mlm_acc', 'loss_scale']):
log_writer.writer.add_scalar(((proxy_single_client + '/loss/') + k), v.global_avg, log_writer.step)
log_writer.set_step()
args.current_mlm_acc[cur_single_client] = metric_logger.get_mlm_acc()
if (args.best_mlm_acc[cur_single_client] < args.current_mlm_acc[cur_single_client]):
args.best_mlm_acc[cur_single_client] = args.current_mlm_acc[cur_single_client]
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def get_args():
parser = argparse.ArgumentParser('Fed-BEiT pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--save_ckpt_freq', default=50, type=int)
parser.add_argument('--discrete_vae_weight_path', default='/home/yan/data/SSL-FL/tokenizer_weight', type=str)
parser.add_argument('--discrete_vae_type', type=str, default='dall-e')
parser.add_argument('--model_name', default='beit', type=str)
parser.add_argument('--model', default='beit_base_patch16_224_8k_vocab', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help='0.1 for base, 1e-5 for large. set 0 to disable layer scale')
parser.add_argument('--mask_ratio', default=0.4, type=float, help='Masking ratio (percentage of removed patches).')
parser.add_argument('--max_mask_patches_per_block', type=int, default=None)
parser.add_argument('--min_mask_patches_per_block', type=int, default=16)
parser.add_argument('--input_size', default=224, type=int, help='images input size for backbone')
parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help='Final value of the\n weight decay. We use a cosine schedule for WD. \n (Set the same value with args.weight_decay to keep weight decay no change)')
parser.add_argument('--lr', type=float, default=0.002, metavar='LR', help='learning rate (default: 2e-3)')
parser.add_argument('--warmup_lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=(- 1), metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")')
parser.add_argument('--data_path', default='../../data/Retina', type=str, help='dataset path')
parser.add_argument('--data_set', default='Retina', type=str, help='dataset for pretraining')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', help='')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--sync_bn', default=False, action='store_true')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--n_clients', default=5, type=int, help='Number of clients')
parser.add_argument('--E_epoch', default=1, type=int, help='Local training epoch in FL')
parser.add_argument('--max_communication_rounds', default=100, type=int, help='Total communication rounds')
parser.add_argument('--num_local_clients', default=(- 1), choices=[10, (- 1)], type=int, help='Num of local clients joined in each FL train. -1 indicates all clients')
parser.add_argument('--split_type', type=str, default='central', help='Which data partitions to use')
return parser.parse_args()
|
def get_model(args):
print(f'Creating model: {args.model}')
model = create_model(args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, use_shared_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb, init_values=args.layer_scale_init_value)
patch_size = model.patch_embed.patch_size
print(('patch size = %s' % str(patch_size)))
args.window_size = ((args.input_size // patch_size[0]), (args.input_size // patch_size[1]))
args.patch_size = patch_size
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
return model
|
def main(args, model):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
misc.fix_random_seeds(args)
cudnn.benchmark = True
os.makedirs(args.output_dir, exist_ok=True)
create_dataset_and_evalmetrix(args)
(model_all, optimizer_all, criterion_all, lr_scheduler_all, wd_scheduler_all, loss_scaler_all) = Partial_Client_Selection(args, model)
model_avg = deepcopy(model).cpu()
d_vae = misc.create_d_vae(weight_path=args.discrete_vae_weight_path, d_vae_type=args.discrete_vae_type, device=device, image_size=args.second_input_size)
global_rank = misc.get_rank()
if ((global_rank == 0) and (args.log_dir is not None)):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = misc.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
print('=============== Running pre-training ===============')
tot_clients = args.dis_cvs_files
print('total_clients: ', tot_clients)
epoch = (- 1)
print(f'Start training for {args.max_communication_rounds} epochs, distributed={args.distributed}')
start_time = time.time()
while True:
print('epoch: ', epoch)
epoch += 1
if (args.num_local_clients == len(args.dis_cvs_files)):
cur_selected_clients = args.proxy_clients
else:
cur_selected_clients = np.random.choice(tot_clients, args.num_local_clients, replace=False).tolist()
cur_tot_client_Lens = 0
for client in cur_selected_clients:
cur_tot_client_Lens += args.clients_with_len[client]
for (cur_single_client, proxy_single_client) in zip(cur_selected_clients, args.proxy_clients):
print('cur_single_client: ', cur_single_client)
print('proxy_single_client: ', proxy_single_client)
args.single_client = cur_single_client
args.clients_weightes[proxy_single_client] = (args.clients_with_len[cur_single_client] / cur_tot_client_Lens)
dataset_train = DatasetFLPretrain(args)
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_rank = global_rank
num_training_steps_per_inner_epoch = ((len(dataset_train) // args.batch_size) // num_tasks)
print(f'=========client: {proxy_single_client} ==============')
if args.distributed:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
model = model_all[proxy_single_client]
optimizer = optimizer_all[proxy_single_client]
criterion = criterion_all[proxy_single_client]
lr_schedule_values = lr_scheduler_all[proxy_single_client]
wd_schedule_values = wd_scheduler_all[proxy_single_client]
loss_scaler = loss_scaler_all[proxy_single_client]
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if (log_writer is not None):
log_writer.set_step(epoch)
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_batch_size = (args.batch_size * num_tasks)
print(('LR = %.8f' % args.lr))
print(('Batch size = %d' % total_batch_size))
print(('Number of training steps = %d' % num_training_steps_per_inner_epoch))
print(('Number of training examples per epoch = %d' % (total_batch_size * num_training_steps_per_inner_epoch)))
for inner_epoch in range(args.E_epoch):
train_stats = train_one_epoch(args, model, d_vae, data_loader_train, optimizer, device, epoch, loss_scaler=loss_scaler, cur_single_client=cur_single_client, max_norm=args.clip_grad, proxy_single_client=proxy_single_client, log_writer=log_writer, criterion=criterion, start_steps=((epoch + inner_epoch) * num_training_steps_per_inner_epoch), lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'client': args.single_client, 'epoch': epoch, 'inner_epoch': inner_epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
average_model(args, model_avg, model_all)
if args.output_dir:
if (((epoch + 1) % args.save_ckpt_freq) == 0):
misc.save_model(args=args, model=model_avg, model_without_ddp=model_avg, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
if (args.global_step_per_client[proxy_single_client] >= args.t_total[proxy_single_client]):
break
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('================End pre-training! ================ ')
print('pretraining time {}'.format(total_time_str))
|
def get_args():
parser = argparse.ArgumentParser('Fed-BEiT fine-tuning and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=20, type=int)
parser.add_argument('--model_name', default='beit', type=str)
parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help='0.1 for base, 1e-5 for large. set 0 to disable layer scale')
parser.add_argument('--input_size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help='Final value of the\n weight decay. We use a cosine schedule for WD and using a larger decay by\n the end of training improves performance for ViTs.')
parser.add_argument('--lr', type=float, default=0.0005, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--warmup_lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-06, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=(- 1), metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
(parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),)
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False)
parser.add_argument('--data_set', default='Retina', type=str, help='ImageNet dataset path')
parser.add_argument('--data_path', default='/home/yan/data/SSL-FL/Retina', type=str, help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=2, type=int, help='number of the classification types')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--sync_bn', default=False, action='store_true')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--n_clients', default=5, type=int, help='Number of clients')
parser.add_argument('--E_epoch', default=1, type=int, help='Local training epoch in FL')
parser.add_argument('--max_communication_rounds', default=100, type=int, help='Total communication rounds.')
parser.add_argument('--num_local_clients', default=10, choices=[10, (- 1)], type=int, help='Num of local clients joined in each FL train. -1 indicates all clients')
parser.add_argument('--split_type', type=str, default='central', help='Which data partitions to use')
return parser.parse_args()
|
def get_model(args):
print(f'Creating model: {args.model}')
model = create_model(args.model, pretrained=False, num_classes=args.nb_classes, drop_rate=args.drop, drop_path_rate=args.drop_path, attn_drop_rate=args.attn_drop_rate, drop_block_rate=None, use_mean_pooling=args.use_mean_pooling, init_scale=args.init_scale, use_rel_pos_bias=args.rel_pos_bias, use_abs_pos_emb=args.abs_pos_emb, init_values=args.layer_scale_init_value)
patch_size = model.patch_embed.patch_size
print(('patch size = %s' % str(patch_size)))
args.window_size = ((args.input_size // patch_size[0]), (args.input_size // patch_size[1]))
args.patch_size = patch_size
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
return model
|
def main(args, model):
misc.init_distributed_mode(args)
device = torch.device(args.device)
misc.fix_random_seeds(args)
cudnn.benchmark = True
create_dataset_and_evalmetrix(args, mode='finetune')
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val = DatasetFLFinetune(args=args, phase='test')
if args.eval:
dataset_test = DatasetFLFinetune(args=args, phase='test')
else:
dataset_test = None
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
if (dataset_val is not None):
data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
else:
data_loader_val = None
if (dataset_test is not None):
data_loader_test = torch.utils.data.DataLoader(dataset_test, sampler=sampler_test, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
else:
data_loader_test = None
(model_all, optimizer_all, criterion_all, lr_scheduler_all, wd_scheduler_all, loss_scaler_all, mixupfn_all) = Partial_Client_Selection(args, model, mode='finetune')
model_avg = deepcopy(model).cpu()
if (args.log_dir is not None):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = misc.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
print('=============== Running fine-tuning ===============')
tot_clients = args.dis_cvs_files
print('total_clients: ', tot_clients)
epoch = (- 1)
start_time = time.time()
max_accuracy = 0.0
while True:
print('epoch: ', epoch)
epoch += 1
if (args.num_local_clients == len(args.dis_cvs_files)):
cur_selected_clients = args.proxy_clients
else:
cur_selected_clients = np.random.choice(tot_clients, args.num_local_clients, replace=False).tolist()
cur_tot_client_Lens = 0
for client in cur_selected_clients:
cur_tot_client_Lens += args.clients_with_len[client]
for (cur_single_client, proxy_single_client) in zip(cur_selected_clients, args.proxy_clients):
print('cur_single_client: ', cur_single_client)
print('proxy_single_client: ', proxy_single_client)
args.single_client = cur_single_client
args.clients_weightes[proxy_single_client] = (args.clients_with_len[cur_single_client] / cur_tot_client_Lens)
dataset_train = DatasetFLFinetune(args=args, phase='train')
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
print(f'=========client: {proxy_single_client} ==============')
if args.distributed:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
print(('Sampler_train = %s' % str(sampler_train)))
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
model = model_all[proxy_single_client]
optimizer = optimizer_all[proxy_single_client]
criterion = criterion_all[proxy_single_client]
lr_schedule_values = lr_scheduler_all[proxy_single_client]
wd_schedule_values = wd_scheduler_all[proxy_single_client]
loss_scaler = loss_scaler_all[proxy_single_client]
mixup_fn = mixupfn_all[proxy_single_client]
if args.distributed:
model_without_ddp = model.module
else:
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_batch_size = ((args.batch_size * args.update_freq) * misc.get_world_size())
num_training_steps_per_inner_epoch = (len(dataset_train) // total_batch_size)
print(('LR = %.8f' % args.lr))
print(('Batch size = %d' % total_batch_size))
print(('Update frequent = %d' % args.update_freq))
print(('Number of training examples = %d' % len(dataset_train)))
print(('Number of training training per epoch = %d' % num_training_steps_per_inner_epoch))
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if (log_writer is not None):
log_writer.set_step(epoch)
if args.eval:
misc.auto_load_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=None)
test_stats = valid(args, model, data_loader_test)
print(f"Accuracy of the network on the {len(dataset_test)} test images: {test_stats['acc1']:.1f}%")
model.cpu()
exit(0)
for inner_epoch in range(args.E_epoch):
train_stats = train_one_epoch(args, model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler=loss_scaler, cur_single_client=cur_single_client, max_norm=args.clip_grad, proxy_single_client=proxy_single_client, model_ema=None, mixup_fn=mixup_fn, log_writer=log_writer, start_steps=((epoch + inner_epoch) * num_training_steps_per_inner_epoch), lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, num_training_steps_per_inner_epoch=num_training_steps_per_inner_epoch, update_freq=args.update_freq)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'client': cur_single_client, 'epoch': epoch, 'inner_epoch': inner_epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
average_model(args, model_avg, model_all)
if (args.output_dir and args.save_ckpt):
if ((((epoch + 1) % args.save_ckpt_freq) == 0) or ((epoch + 1) == args.max_communication_rounds)):
misc.save_model(args=args, model=model_avg, model_without_ddp=model_avg, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
if (data_loader_val is not None):
model_avg.to(args.device)
test_stats = valid(args, model_avg, data_loader_val)
print(f"Accuracy of the network on the {len(dataset_val)} validation images: {test_stats['acc1']:.1f}%")
if (max_accuracy < test_stats['acc1']):
max_accuracy = test_stats['acc1']
if (args.output_dir and args.save_ckpt):
misc.save_model(args=args, model=model_avg, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch='best', model_ema=None)
print(f'Max accuracy: {max_accuracy:.2f}%')
if (log_writer is not None):
log_writer.update(test_acc1=test_stats['acc1'], head='perf', step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head='perf', step=epoch)
log_writer.update(test_loss=test_stats['loss'], head='perf', step=epoch)
log_stats = {**{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
model_avg.to('cpu')
print('global_step_per_client: ', args.global_step_per_client[proxy_single_client])
print('t_total: ', args.t_total[proxy_single_client])
if (args.global_step_per_client[proxy_single_client] >= args.t_total[proxy_single_client]):
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
break
|
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, proxy_single_client=None, mixup_fn: Optional[Mixup]=None, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, targets)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
args.global_step_per_client[proxy_single_client] += 1
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(outputs, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=False, update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
loss_value_reduce = misc.all_reduce_mean(loss_value)
if ((log_writer is not None) and (((data_iter_step + 1) % accum_iter) == 0)):
' We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n '
epoch_1000x = int((((data_iter_step / len(data_loader)) + epoch) * 1000))
log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)
log_writer.add_scalar('lr', max_lr, epoch_1000x)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[(- 1)]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, cur_single_client, max_norm: float=0, proxy_single_client=None, log_writer=None, args=None):
model.train(True)
metric_logger = misc.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 20
accum_iter = args.accum_iter
optimizer.zero_grad()
if (log_writer is not None):
print('log_dir: {}'.format(log_writer.log_dir))
for (data_iter_step, (samples, _)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
args.global_step_per_client[proxy_single_client] += 1
if ((data_iter_step % accum_iter) == 0):
lr_sched.adjust_learning_rate(optimizer, ((data_iter_step / len(data_loader)) + epoch), args)
samples = samples.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
(loss, _, _) = model(samples, mask_ratio=args.mask_ratio)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
loss /= accum_iter
loss_scaler(loss, optimizer, parameters=model.parameters(), update_grad=(((data_iter_step + 1) % accum_iter) == 0))
if (((data_iter_step + 1) % accum_iter) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
min_lr = 10.0
max_lr = 0.0
for group in optimizer.param_groups:
min_lr = min(min_lr, group['lr'])
max_lr = max(max_lr, group['lr'])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
if (log_writer is not None):
for (k, v) in metric_logger.meters.items():
if (k in ['lr']):
log_writer.writer.add_scalar(((proxy_single_client + '/opt/') + k), v.global_avg, log_writer.step)
elif (k in ['loss']):
log_writer.writer.add_scalar(((proxy_single_client + '/loss/') + k), v.global_avg, log_writer.step)
log_writer.set_step()
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def get_args():
parser = argparse.ArgumentParser('Fed-MAE fine-tuning for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--save_ckpt_freq', default=20, type=int)
parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
parser.add_argument('--model_name', default='mae', type=str)
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int, help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.001, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--layer_decay', type=float, default=0.75, help='layer-wise lr decay from ELECTRA/BEiT')
parser.add_argument('--min_lr', type=float, default=1e-06, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR')
parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT', help='Color jitter factor (enabled only when not using Auto/RandAug)')
(parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),)
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split')
parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=True)
parser.add_argument('--cls_token', action='store_false', dest='global_pool', help='Use class token instead of global pool for classification')
parser.add_argument('--data_set', default='Retina', type=str, help='ImageNet dataset path')
parser.add_argument('--data_path', default='/../../data/Retina', type=str, help='dataset path')
parser.add_argument('--nb_classes', default=2, type=int, help='number of the classification types')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--sync_bn', default=False, action='store_true')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--n_clients', default=5, type=int, help='Number of clients')
parser.add_argument('--E_epoch', default=1, type=int, help='Local training epoch in FL')
parser.add_argument('--max_communication_rounds', default=100, type=int, help='Total communication rounds.')
parser.add_argument('--num_local_clients', default=(- 1), choices=[10, (- 1)], type=int, help='Num of local clients joined in each FL train. -1 indicates all clients')
parser.add_argument('--split_type', type=str, default='central', help='Which data partitions to use')
return parser.parse_args()
|
def main(args, model):
misc.init_distributed_mode(args)
device = torch.device(args.device)
misc.fix_random_seeds(args)
cudnn.benchmark = True
create_dataset_and_evalmetrix(args, mode='finetune')
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val = DatasetFLFinetune(args=args, phase='test')
if args.eval:
dataset_test = DatasetFLFinetune(args=args, phase='test')
else:
dataset_test = None
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
if args.dist_eval:
if ((len(dataset_val) % num_tasks) != 0):
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
if (dataset_val is not None):
data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
else:
data_loader_val = None
if (dataset_test is not None):
data_loader_test = torch.utils.data.DataLoader(dataset_test, sampler=sampler_test, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False)
else:
data_loader_test = None
(model_all, optimizer_all, criterion_all, loss_scaler_all, mixup_fn_all) = Partial_Client_Selection(args, model, mode='finetune')
model_avg = deepcopy(model).cpu()
if (args.log_dir is not None):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
print('=============== Running fine-tuning ===============')
tot_clients = args.dis_cvs_files
print('total_clients: ', tot_clients)
epoch = (- 1)
start_time = time.time()
max_accuracy = 0.0
while True:
print('epoch: ', epoch)
epoch += 1
if (args.num_local_clients == len(args.dis_cvs_files)):
cur_selected_clients = args.proxy_clients
else:
cur_selected_clients = np.random.choice(tot_clients, args.num_local_clients, replace=False).tolist()
cur_tot_client_Lens = 0
for client in cur_selected_clients:
cur_tot_client_Lens += args.clients_with_len[client]
for (cur_single_client, proxy_single_client) in zip(cur_selected_clients, args.proxy_clients):
print('cur_single_client: ', cur_single_client)
print('proxy_single_client: ', proxy_single_client)
args.single_client = cur_single_client
args.clients_weightes[proxy_single_client] = (args.clients_with_len[cur_single_client] / cur_tot_client_Lens)
dataset_train = DatasetFLFinetune(args=args, phase='train')
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
print(f'=========client: {proxy_single_client} ==============')
if args.distributed:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
print(('Sampler_train = %s' % str(sampler_train)))
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
model = model_all[proxy_single_client]
optimizer = optimizer_all[proxy_single_client]
criterion = criterion_all[proxy_single_client]
loss_scaler = loss_scaler_all[proxy_single_client]
mixup_fn = mixup_fn_all[proxy_single_client]
if args.distributed:
model_without_ddp = model.module
else:
model_without_ddp = model
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
num_training_steps_per_inner_epoch = (len(dataset_train) // total_batch_size)
print(('LR = %.8f' % args.lr))
print(('Batch size = %d' % total_batch_size))
print(('Number of training examples = %d' % len(dataset_train)))
print(('Number of training training per epoch = %d' % num_training_steps_per_inner_epoch))
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if (log_writer is not None):
log_writer.set_step(epoch)
if args.eval:
misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=None)
test_stats = valid(args, model, data_loader_test)
print(f"Accuracy of the network on the {len(dataset_test)} test images: {test_stats['acc1']:.1f}%")
model.cpu()
exit(0)
for inner_epoch in range(args.E_epoch):
train_stats = train_one_epoch(model, criterion, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, proxy_single_client, mixup_fn, log_writer=log_writer, args=args)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'client': cur_single_client, 'epoch': epoch, 'inner_epoch': inner_epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
average_model(args, model_avg, model_all)
if args.output_dir:
if ((((epoch + 1) % args.save_ckpt_freq) == 0) or ((epoch + 1) == args.max_communication_rounds)):
misc.save_model(args=args, model=model_avg, model_without_ddp=model_avg, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
if (data_loader_val is not None):
model_avg.to(args.device)
test_stats = valid(args, model_avg, data_loader_val)
print(f"Accuracy of the network on the {len(dataset_val)} validation images: {test_stats['acc1']:.1f}%")
if (max_accuracy < test_stats['acc1']):
max_accuracy = test_stats['acc1']
if args.output_dir:
misc.save_model(args=args, model=model_avg, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch='best', model_ema=None)
print(f'Max accuracy: {max_accuracy:.2f}%')
if (log_writer is not None):
log_writer.update(test_acc1=test_stats['acc1'], head='perf', step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head='perf', step=epoch)
log_writer.update(test_loss=test_stats['loss'], head='perf', step=epoch)
log_stats = {**{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
model_avg.to('cpu')
print('global_step_per_client: ', args.global_step_per_client[proxy_single_client])
print('t_total: ', args.t_total[proxy_single_client])
if (args.global_step_per_client[proxy_single_client] >= args.t_total[proxy_single_client]):
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
break
|
def get_args():
parser = argparse.ArgumentParser('Fed-MAE pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--save_ckpt_freq', default=20, type=int)
parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
parser.add_argument('--model_name', default='mae', type=str)
parser.add_argument('--model', default='mae_vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--input_size', default=224, type=int, help='images input size')
parser.add_argument('--mask_ratio', default=0.75, type=float, help='Masking ratio (percentage of removed patches).')
parser.add_argument('--norm_pix_loss', action='store_true', help='Use (per-patch) normalized pixels as targets for computing loss')
parser.set_defaults(norm_pix_loss=False)
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.001, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0.0, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR')
parser.add_argument('--data_set', default='Retina', type=str, help='dataset for pretraining')
parser.add_argument('--data_path', default='../../data/Retina', type=str, help='dataset path')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--sync_bn', default=False, action='store_true')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--n_clients', default=5, type=int, help='Number of clients')
parser.add_argument('--E_epoch', default=1, type=int, help='Local training epoch in FL')
parser.add_argument('--max_communication_rounds', default=100, type=int, help='Total communication rounds')
parser.add_argument('--num_local_clients', default=(- 1), choices=[10, (- 1)], type=int, help='Num of local clients joined in each FL train. -1 indicates all clients')
parser.add_argument('--split_type', type=str, default='central', help='Which data partitions to use')
return parser.parse_args()
|
def main(args, model):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print('{}'.format(args).replace(', ', ',\n'))
device = torch.device(args.device)
misc.fix_random_seeds(args)
cudnn.benchmark = True
os.makedirs(args.output_dir, exist_ok=True)
create_dataset_and_evalmetrix(args)
(model_all, optimizer_all, loss_scaler_all) = Partial_Client_Selection(args, model)
model_avg = deepcopy(model).cpu()
global_rank = misc.get_rank()
if ((global_rank == 0) and (args.log_dir is not None)):
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
print('=============== Running pre-training ===============')
tot_clients = args.dis_cvs_files
print('total_clients: ', tot_clients)
epoch = (- 1)
print(f'Start training for {args.max_communication_rounds} epochs, distributed={args.distributed}')
start_time = time.time()
while True:
print('epoch: ', epoch)
epoch += 1
if (args.num_local_clients == len(args.dis_cvs_files)):
cur_selected_clients = args.proxy_clients
else:
cur_selected_clients = np.random.choice(tot_clients, args.num_local_clients, replace=False).tolist()
cur_tot_client_Lens = 0
for client in cur_selected_clients:
cur_tot_client_Lens += args.clients_with_len[client]
for (cur_single_client, proxy_single_client) in zip(cur_selected_clients, args.proxy_clients):
print('cur_single_client: ', cur_single_client)
print('proxy_single_client: ', proxy_single_client)
args.single_client = cur_single_client
args.clients_weightes[proxy_single_client] = (args.clients_with_len[cur_single_client] / cur_tot_client_Lens)
dataset_train = DatasetFLPretrain(args)
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_rank = global_rank
num_training_steps_per_inner_epoch = ((len(dataset_train) // args.batch_size) // num_tasks)
print(f'=========client: {proxy_single_client} ==============')
if args.distributed:
sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True)
model = model_all[proxy_single_client]
optimizer = optimizer_all[proxy_single_client]
loss_scaler = loss_scaler_all[proxy_single_client]
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if (log_writer is not None):
log_writer.set_step(epoch)
n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad))
total_batch_size = ((args.batch_size * args.accum_iter) * misc.get_world_size())
if (args.lr is None):
args.lr = ((args.blr * total_batch_size) / 256)
print(('base lr: %.2e' % ((args.lr * 256) / total_batch_size)))
print(('actual lr: %.2e' % args.lr))
print(('accumulate grad iterations: %d' % args.accum_iter))
print(('effective batch size: %d' % total_batch_size))
print(('Number of training steps = %d' % num_training_steps_per_inner_epoch))
print(('Number of training examples per epoch = %d' % (total_batch_size * num_training_steps_per_inner_epoch)))
for inner_epoch in range(args.E_epoch):
train_stats = train_one_epoch(model, data_loader_train, optimizer, device, epoch, loss_scaler, cur_single_client, max_norm=args.clip_grad, proxy_single_client=proxy_single_client, log_writer=log_writer, args=args)
log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, 'client': args.single_client, 'epoch': epoch, 'inner_epoch': inner_epoch, 'n_parameters': n_parameters}
if (args.output_dir and misc.is_main_process()):
if (log_writer is not None):
log_writer.flush()
with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f:
f.write((json.dumps(log_stats) + '\n'))
average_model(args, model_avg, model_all)
if args.output_dir:
if (((epoch + 1) % args.save_ckpt_freq) == 0):
misc.save_model(args=args, model=model_avg, model_without_ddp=model_avg, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)
if (args.global_step_per_client[proxy_single_client] >= args.t_total[proxy_single_client]):
break
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('================End pre-training! ================ ')
print('pretraining time {}'.format(total_time_str))
|
def Partial_Client_Selection(args, model, mode='pretrain'):
device = torch.device(args.device)
if (args.num_local_clients == (- 1)):
args.proxy_clients = args.dis_cvs_files
args.num_local_clients = len(args.dis_cvs_files)
else:
args.proxy_clients = [('train_' + str(i)) for i in range(args.num_local_clients)]
model_all = {}
optimizer_all = {}
criterion_all = {}
lr_scheduler_all = {}
wd_scheduler_all = {}
loss_scaler_all = {}
mixup_fn_all = {}
args.learning_rate_record = {}
args.t_total = {}
if (((mode == 'finetune') or (mode == 'linprob')) and args.finetune):
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print(('Load pre-trained checkpoint from: %s' % args.finetune))
if (args.model_name == 'beit'):
checkpoint_model = None
for model_key in args.model_key.split('|'):
if (model_key in checkpoint):
checkpoint_model = checkpoint[model_key]
print(('Load state_dict by model_key = %s' % model_key))
break
if (checkpoint_model is None):
checkpoint_model = checkpoint
elif (args.model_name == 'mae'):
checkpoint_model = checkpoint['model']
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if ((k in checkpoint_model) and (checkpoint_model[k].shape != state_dict[k].shape)):
print(f'Removing key {k} from pretrained checkpoint')
del checkpoint_model[k]
if (args.model_name == 'beit'):
if (model.use_rel_pos_bias and ('rel_pos_bias.relative_position_bias_table' in checkpoint_model)):
print('Expand the shared relative position embedding to each transformer block. ')
num_layers = model.get_num_layers()
rel_pos_bias = checkpoint_model['rel_pos_bias.relative_position_bias_table']
for i in range(num_layers):
checkpoint_model[('blocks.%d.attn.relative_position_bias_table' % i)] = rel_pos_bias.clone()
checkpoint_model.pop('rel_pos_bias.relative_position_bias_table')
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if ('relative_position_index' in key):
checkpoint_model.pop(key)
relative_position_bias(model, checkpoint_model, key)
interpolate_pos_embed(model, checkpoint_model)
if (args.model_name == 'beit'):
msg = model.load_state_dict(checkpoint_model, strict=False)
elif (args.model_name == 'mae'):
msg = model.load_state_dict(checkpoint_model, strict=False)
print(msg)
if args.global_pool:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'})
else:
assert (set(msg.missing_keys) == {'head.weight', 'head.bias'})
if (mode == 'finetune'):
trunc_normal_(model.head.weight, std=2e-05)
elif (mode == 'linprob'):
trunc_normal_(model.head.weight, std=0.01)
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-06), model.head)
for (_, p) in model.named_parameters():
p.requires_grad = False
for (_, p) in model.head.named_parameters():
p.requires_grad = True
if args.distributed:
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
for proxy_single_client in args.proxy_clients:
global_rank = misc.get_rank()
num_tasks = misc.get_world_size()
print('clients_with_len: ', args.clients_with_len[proxy_single_client])
if (args.model_name == 'beit'):
if (mode == 'pretrain'):
total_batch_size = (args.batch_size * num_tasks)
else:
total_batch_size = ((args.batch_size * args.update_freq) * num_tasks)
elif (args.model_name == 'mae'):
total_batch_size = ((args.batch_size * args.accum_iter) * num_tasks)
if (args.lr is None):
args.lr = ((args.blr * total_batch_size) / 256)
num_training_steps_per_inner_epoch = (args.clients_with_len[proxy_single_client] // total_batch_size)
print(('Batch size = %d' % total_batch_size))
print(('Number of training steps = %d' % num_training_steps_per_inner_epoch))
print(('Number of training examples per epoch = %d' % (total_batch_size * num_training_steps_per_inner_epoch)))
model_all[proxy_single_client] = deepcopy(model)
model_all[proxy_single_client] = model_all[proxy_single_client].to(device)
if args.distributed:
model_all[proxy_single_client] = torch.nn.parallel.DistributedDataParallel(model_all[proxy_single_client], device_ids=[args.gpu], find_unused_parameters=True)
if args.distributed:
model_without_ddp = model_all[proxy_single_client].module
else:
model_without_ddp = model_all[proxy_single_client]
if (mode == 'pretrain'):
if (args.model_name == 'beit'):
optimizer_all[proxy_single_client] = create_optimizer(args, model_without_ddp)
elif (args.model_name == 'mae'):
param_groups = add_weight_decay(model_without_ddp, args.weight_decay)
optimizer_all[proxy_single_client] = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))
elif (mode == 'finetune'):
if (args.model_name == 'beit'):
num_layers = model_without_ddp.get_num_layers()
if (args.layer_decay < 1.0):
assigner = LayerDecayValueAssigner(list(((args.layer_decay ** ((num_layers + 1) - i)) for i in range((num_layers + 2)))))
else:
assigner = None
if (assigner is not None):
print(('Assigned values = %s' % str(assigner.values)))
skip_weight_decay_list = model_without_ddp.no_weight_decay()
if args.disable_weight_decay_on_rel_pos_bias:
for i in range(num_layers):
skip_weight_decay_list.add(('blocks.%d.attn.relative_position_bias_table' % i))
optimizer_all[proxy_single_client] = create_optimizer(args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=(assigner.get_layer_id if (assigner is not None) else None), get_layer_scale=(assigner.get_scale if (assigner is not None) else None))
elif (args.model_name == 'mae'):
param_groups = param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=model_without_ddp.no_weight_decay(), layer_decay=args.layer_decay)
optimizer_all[proxy_single_client] = torch.optim.AdamW(param_groups, lr=args.lr)
elif (mode == 'linprob'):
if (args.model_name == 'beit'):
pass
elif (args.model_name == 'mae'):
optimizer_all[proxy_single_client] = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
if ((mode == 'pretrain') and (args.model_name == 'beit')):
criterion_all[proxy_single_client] = nn.CrossEntropyLoss()
if (mode == 'finetune'):
mixup_fn = None
mixup_active = ((args.mixup > 0) or (args.cutmix > 0.0) or (args.cutmix_minmax is not None))
if mixup_active:
print('Mixup is activated!')
mixup_fn = Mixup(mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes)
mixup_fn_all[proxy_single_client] = mixup_fn
if (mixup_fn is not None):
criterion = SoftTargetCrossEntropy()
elif (args.smoothing > 0.0):
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
criterion_all[proxy_single_client] = criterion
if (mode == 'linprob'):
criterion_all[proxy_single_client] = torch.nn.CrossEntropyLoss()
if (args.model_name == 'beit'):
print('Use step level LR & WD scheduler!')
lr_scheduler_all[proxy_single_client] = misc.cosine_scheduler(args.lr, args.min_lr, epochs=args.E_epoch, niter_per_ep=num_training_steps_per_inner_epoch, max_communication_rounds=args.max_communication_rounds, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps)
if (args.weight_decay_end is None):
args.weight_decay_end = args.weight_decay
wd_scheduler_all[proxy_single_client] = misc.cosine_scheduler(args.weight_decay, args.weight_decay_end, epochs=args.E_epoch, niter_per_ep=num_training_steps_per_inner_epoch, max_communication_rounds=args.max_communication_rounds)
loss_scaler_all[proxy_single_client] = NativeScaler()
args.t_total[proxy_single_client] = ((num_training_steps_per_inner_epoch * args.E_epoch) * args.max_communication_rounds)
args.learning_rate_record[proxy_single_client] = []
args.clients_weightes = {}
args.global_step_per_client = {name: 0 for name in args.proxy_clients}
if (args.model_name == 'beit'):
if (mode == 'pretrain'):
return (model_all, optimizer_all, criterion_all, lr_scheduler_all, wd_scheduler_all, loss_scaler_all)
else:
return (model_all, optimizer_all, criterion_all, lr_scheduler_all, wd_scheduler_all, loss_scaler_all, mixup_fn_all)
elif (args.model_name == 'mae'):
if (mode == 'pretrain'):
return (model_all, optimizer_all, loss_scaler_all)
else:
return (model_all, optimizer_all, criterion_all, loss_scaler_all, mixup_fn_all)
|
def average_model(args, model_avg, model_all):
model_avg.cpu()
print('Calculate the model avg----')
params = dict(model_avg.named_parameters())
for (name, param) in params.items():
for client in range(len(args.proxy_clients)):
single_client = args.proxy_clients[client]
single_client_weight = args.clients_weightes[single_client]
single_client_weight = torch.from_numpy(np.array(single_client_weight)).float()
if (client == 0):
if args.distributed:
tmp_param_data = (dict(model_all[single_client].module.named_parameters())[name].data * single_client_weight)
else:
tmp_param_data = (dict(model_all[single_client].named_parameters())[name].data * single_client_weight)
elif args.distributed:
tmp_param_data = (tmp_param_data + (dict(model_all[single_client].module.named_parameters())[name].data * single_client_weight))
else:
tmp_param_data = (tmp_param_data + (dict(model_all[single_client].named_parameters())[name].data * single_client_weight))
params[name].data.copy_(tmp_param_data)
print('Update each client model parameters----')
for single_client in args.proxy_clients:
if args.distributed:
tmp_params = dict(model_all[single_client].module.named_parameters())
else:
tmp_params = dict(model_all[single_client].named_parameters())
for (name, param) in params.items():
tmp_params[name].data.copy_(param.data)
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
|
def simple_accuracy(preds, labels):
return (preds == labels).mean()
|
def save_model(args, model):
model_to_save = (model.module if hasattr(model, 'module') else model)
client_name = os.path.basename(args.single_client).split('.')[0]
model_checkpoint = os.path.join(args.output_dir, ('%s_%s_checkpoint.bin' % (args.name, client_name)))
torch.save(model_to_save.state_dict(), model_checkpoint)
|
def valid(args, model, data_loader):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
print('++++++ Running Validation ++++++')
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[(- 1)]
images = images.to(args.device, non_blocking=True)
target = target.to(args.device, non_blocking=True)
with torch.no_grad():
output = model(images)
loss = criterion(output, target)
(acc1, _) = accuracy(output, target, topk=(1, 2))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
print('* Acc@1 {top1.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
def metric_evaluation(args, eval_result):
if (args.nb_classes == 1):
if (args.best_acc[args.single_client] < eval_result):
Flag = False
else:
Flag = True
elif (args.best_acc[args.single_client] < eval_result):
Flag = True
else:
Flag = False
return Flag
|
class RandomResizedCrop(transforms.RandomResizedCrop):
"\n RandomResizedCrop for matching TF/TPU implementation: no for-loop is used.\n This may lead to results different with torchvision's version.\n Following BYOL's TF code:\n https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206\n "
@staticmethod
def get_params(img, scale, ratio):
(width, height) = F._get_image_size(img)
area = (height * width)
target_area = (area * torch.empty(1).uniform_(scale[0], scale[1]).item())
log_ratio = torch.log(torch.tensor(ratio))
aspect_ratio = torch.exp(torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item()
w = int(round(math.sqrt((target_area * aspect_ratio))))
h = int(round(math.sqrt((target_area / aspect_ratio))))
w = min(w, width)
h = min(h, height)
i = torch.randint(0, ((height - h) + 1), size=(1,)).item()
j = torch.randint(0, ((width - w) + 1), size=(1,)).item()
return (i, j, h, w)
|
@attr.s(eq=False, repr=False)
class DecoderBlock(nn.Module):
n_in: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
n_out: int = attr.ib(validator=(lambda i, a, x: ((x >= 1) and ((x % 4) == 0))))
n_layers: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = (self.n_out // 4)
self.post_gain = (1 / (self.n_layers ** 2))
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = (make_conv(self.n_in, self.n_out, 1) if (self.n_in != self.n_out) else nn.Identity())
self.res_path = nn.Sequential(OrderedDict([('relu_1', nn.ReLU()), ('conv_1', make_conv(self.n_in, self.n_hid, 1)), ('relu_2', nn.ReLU()), ('conv_2', make_conv(self.n_hid, self.n_hid, 3)), ('relu_3', nn.ReLU()), ('conv_3', make_conv(self.n_hid, self.n_hid, 3)), ('relu_4', nn.ReLU()), ('conv_4', make_conv(self.n_hid, self.n_out, 3))]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return (self.id_path(x) + (self.post_gain * self.res_path(x)))
|
@attr.s(eq=False, repr=False)
class Decoder(nn.Module):
group_count: int = 4
n_init: int = attr.ib(default=128, validator=(lambda i, a, x: (x >= 8)))
n_hid: int = attr.ib(default=256, validator=(lambda i, a, x: (x >= 64)))
n_blk_per_group: int = attr.ib(default=2, validator=(lambda i, a, x: (x >= 1)))
output_channels: int = attr.ib(default=3, validator=(lambda i, a, x: (x >= 1)))
vocab_size: int = attr.ib(default=8192, validator=(lambda i, a, x: (x >= 512)))
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = (self.group_count * self.n_blk_per_group)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(DecoderBlock, n_layers=n_layers, device=self.device, requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([('input', make_conv(self.vocab_size, self.n_init, 1, use_float16=False)), ('group_1', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk((self.n_init if (i == 0) else (8 * self.n_hid)), (8 * self.n_hid))) for i in blk_range], ('upsample', nn.Upsample(scale_factor=2, mode='nearest'))]))), ('group_2', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((8 * self.n_hid) if (i == 0) else (4 * self.n_hid)), (4 * self.n_hid))) for i in blk_range], ('upsample', nn.Upsample(scale_factor=2, mode='nearest'))]))), ('group_3', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((4 * self.n_hid) if (i == 0) else (2 * self.n_hid)), (2 * self.n_hid))) for i in blk_range], ('upsample', nn.Upsample(scale_factor=2, mode='nearest'))]))), ('group_4', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((2 * self.n_hid) if (i == 0) else (1 * self.n_hid)), (1 * self.n_hid))) for i in blk_range]]))), ('output', nn.Sequential(OrderedDict([('relu', nn.ReLU()), ('conv', make_conv((1 * self.n_hid), (2 * self.output_channels), 1))])))]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (len(x.shape) != 4):
raise ValueError(f'input shape {x.shape} is not 4d')
if (x.shape[1] != self.vocab_size):
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.vocab_size}')
if (x.dtype != torch.float32):
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
|
@attr.s(eq=False, repr=False)
class EncoderBlock(nn.Module):
n_in: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
n_out: int = attr.ib(validator=(lambda i, a, x: ((x >= 1) and ((x % 4) == 0))))
n_layers: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = (self.n_out // 4)
self.post_gain = (1 / (self.n_layers ** 2))
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = (make_conv(self.n_in, self.n_out, 1) if (self.n_in != self.n_out) else nn.Identity())
self.res_path = nn.Sequential(OrderedDict([('relu_1', nn.ReLU()), ('conv_1', make_conv(self.n_in, self.n_hid, 3)), ('relu_2', nn.ReLU()), ('conv_2', make_conv(self.n_hid, self.n_hid, 3)), ('relu_3', nn.ReLU()), ('conv_3', make_conv(self.n_hid, self.n_hid, 3)), ('relu_4', nn.ReLU()), ('conv_4', make_conv(self.n_hid, self.n_out, 1))]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return (self.id_path(x) + (self.post_gain * self.res_path(x)))
|
@attr.s(eq=False, repr=False)
class Encoder(nn.Module):
group_count: int = 4
n_hid: int = attr.ib(default=256, validator=(lambda i, a, x: (x >= 64)))
n_blk_per_group: int = attr.ib(default=2, validator=(lambda i, a, x: (x >= 1)))
input_channels: int = attr.ib(default=3, validator=(lambda i, a, x: (x >= 1)))
vocab_size: int = attr.ib(default=8192, validator=(lambda i, a, x: (x >= 512)))
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = (self.group_count * self.n_blk_per_group)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(EncoderBlock, n_layers=n_layers, device=self.device, requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([('input', make_conv(self.input_channels, (1 * self.n_hid), 7)), ('group_1', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk((1 * self.n_hid), (1 * self.n_hid))) for i in blk_range], ('pool', nn.MaxPool2d(kernel_size=2))]))), ('group_2', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((1 * self.n_hid) if (i == 0) else (2 * self.n_hid)), (2 * self.n_hid))) for i in blk_range], ('pool', nn.MaxPool2d(kernel_size=2))]))), ('group_3', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((2 * self.n_hid) if (i == 0) else (4 * self.n_hid)), (4 * self.n_hid))) for i in blk_range], ('pool', nn.MaxPool2d(kernel_size=2))]))), ('group_4', nn.Sequential(OrderedDict([*[(f'block_{(i + 1)}', make_blk(((4 * self.n_hid) if (i == 0) else (8 * self.n_hid)), (8 * self.n_hid))) for i in blk_range]]))), ('output', nn.Sequential(OrderedDict([('relu', nn.ReLU()), ('conv', make_conv((8 * self.n_hid), self.vocab_size, 1, use_float16=False))])))]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (len(x.shape) != 4):
raise ValueError(f'input shape {x.shape} is not 4d')
if (x.shape[1] != self.input_channels):
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.input_channels}')
if (x.dtype != torch.float32):
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
|
@attr.s(eq=False)
class Conv2d(nn.Module):
n_in: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
n_out: int = attr.ib(validator=(lambda i, a, x: (x >= 1)))
kw: int = attr.ib(validator=(lambda i, a, x: ((x >= 1) and ((x % 2) == 1))))
use_float16: bool = attr.ib(default=True)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
w = torch.empty((self.n_out, self.n_in, self.kw, self.kw), dtype=torch.float32, device=self.device, requires_grad=self.requires_grad)
w.normal_(std=(1 / math.sqrt((self.n_in * (self.kw ** 2)))))
b = torch.zeros((self.n_out,), dtype=torch.float32, device=self.device, requires_grad=self.requires_grad)
(self.w, self.b) = (nn.Parameter(w), nn.Parameter(b))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if (self.use_float16 and ('cuda' in self.w.device.type)):
if (x.dtype != torch.float16):
x = x.half()
(w, b) = (self.w.half(), self.b.half())
else:
if (x.dtype != torch.float32):
x = x.float()
(w, b) = (self.w, self.b)
return F.conv2d(x, w, b, padding=((self.kw - 1) // 2))
|
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if (x.dtype != torch.float):
raise ValueError('expected input to have type float')
return (((1 - (2 * logit_laplace_eps)) * x) + logit_laplace_eps)
|
def unmap_pixels(x: torch.Tensor) -> torch.Tensor:
if (len(x.shape) != 4):
raise ValueError('expected input to be 4d')
if (x.dtype != torch.float):
raise ValueError('expected input to have type float')
return torch.clamp(((x - logit_laplace_eps) / (1 - (2 * logit_laplace_eps))), 0, 1)
|
class DataAugmentationForPretrain(object):
' data transformations for pre-training'
def __init__(self, args):
if (args.data_set == 'Retina'):
(mean, std) = (RETINA_MEAN, RETINA_STD)
else:
(mean, std) = ((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
if (args.model_name == 'beit'):
if (args.data_set == 'Retina'):
self.common_transform = transforms.Compose([transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic(size=args.input_size, second_size=args.second_input_size, scale=(0.2, 1.0), interpolation=args.train_interpolation, second_interpolation=args.second_interpolation)])
elif (args.data_set == 'COVID-FL'):
self.common_transform = transforms.Compose([transforms.ColorJitter(hue=0.05, saturation=0.05), transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic(size=args.input_size, second_size=args.second_input_size, scale=(0.4, 1.0), interpolation=args.train_interpolation, second_interpolation=args.second_interpolation)])
else:
self.common_transform = transforms.Compose([transforms.ColorJitter(0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic(size=args.input_size, second_size=args.second_input_size, scale=(0.2, 1.0), interpolation=args.train_interpolation, second_interpolation=args.second_interpolation)])
if (args.discrete_vae_type == 'dall-e'):
self.visual_token_transform = transforms.Compose([transforms.ToTensor(), map_pixels])
elif (args.discrete_vae_type == 'customized'):
self.visual_token_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
else:
raise NotImplementedError()
args.num_mask_patches = int((round(((args.mask_ratio * 196.0) / 5.0)) * 5.0))
self.masked_position_generator = MaskingGenerator(args.window_size, num_masking_patches=args.num_mask_patches, max_num_patches=args.max_mask_patches_per_block, min_num_patches=args.min_mask_patches_per_block)
elif (args.model_name == 'mae'):
if (args.data_set == 'Retina'):
self.common_transform = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(p=0.5)])
elif (args.data_set == 'COVID-FL'):
self.common_transform = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.4, 1.0), interpolation=3), transforms.ColorJitter(hue=0.05, saturation=0.05), transforms.RandomHorizontalFlip(p=0.5)])
else:
self.common_transform = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.2, 1.0), interpolation=3), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.1, 0.1, 0.1), transforms.RandomHorizontalFlip(p=0.5)])
self.patch_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
self.args = args
def __call__(self, image):
if (self.args.model_name == 'beit'):
(for_patches, for_visual_tokens) = self.common_transform(image)
return (self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens), self.masked_position_generator())
elif (self.args.model_name == 'mae'):
for_patches = self.common_transform(image)
return self.patch_transform(for_patches)
def __repr__(self):
if (self.args.model_name == 'beit'):
repr = '(DataAugmentationForBEiT,\n'
repr += (' common_transform = %s,\n' % str(self.common_transform))
repr += (' patch_transform = %s,\n' % str(self.patch_transform))
repr += (' visual_tokens_transform = %s,\n' % str(self.visual_token_transform))
repr += (' Masked position generator = %s,\n' % str(self.masked_position_generator))
repr += ')'
elif (self.args.model_name == 'mae'):
repr = '(DataAugmentationFoMAE,\n'
repr += (' common_transform = %s,\n' % str(self.common_transform))
repr += (' patch_transform = %s,\n' % str(self.patch_transform))
return repr
|
def build_transform(is_train, mode, args):
' data transformations for fine-tuning'
if (args.data_set == 'Retina'):
(mean, std) = (RETINA_MEAN, RETINA_STD)
else:
(mean, std) = ((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
if (mode == 'finetune'):
if is_train:
if (args.data_set == 'COVID-FL'):
transform = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.8, 1.2)), transforms.RandomRotation(degrees=10), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
else:
transform = transforms.Compose([transforms.RandomResizedCrop(args.input_size, scale=(0.6, 1.0)), transforms.RandomRotation(degrees=10), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
else:
transform = transforms.Compose([transforms.Resize(size=args.input_size), transforms.CenterCrop(size=(args.input_size, args.input_size)), transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))])
return transform
|
class LARS(torch.optim.Optimizer):
'\n LARS optimizer, no rate scaling or weight decay for parameters <= 1D.\n '
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if (dp is None):
continue
if (p.ndim > 1):
dp = dp.add(p, alpha=g['weight_decay'])
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where((param_norm > 0.0), torch.where((update_norm > 0), ((g['trust_coefficient'] * param_norm) / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if ('mu' not in param_state):
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=(- g['lr']))
|
def param_groups_lrd(model, weight_decay=0.05, no_weight_decay_list=[], layer_decay=0.75):
'\n Parameter groups for layer-wise lr decay\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58\n '
param_group_names = {}
param_groups = {}
num_layers = (len(model.blocks) + 1)
layer_scales = list(((layer_decay ** (num_layers - i)) for i in range((num_layers + 1))))
for (n, p) in model.named_parameters():
if (not p.requires_grad):
continue
if ((p.ndim == 1) or (n in no_weight_decay_list)):
g_decay = 'no_decay'
this_decay = 0.0
else:
g_decay = 'decay'
this_decay = weight_decay
layer_id = get_layer_id_for_vit(n, num_layers)
group_name = ('layer_%d_%s' % (layer_id, g_decay))
if (group_name not in param_group_names):
this_scale = layer_scales[layer_id]
param_group_names[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []}
param_groups[group_name] = {'lr_scale': this_scale, 'weight_decay': this_decay, 'params': []}
param_group_names[group_name]['params'].append(n)
param_groups[group_name]['params'].append(p)
return list(param_groups.values())
|
def get_layer_id_for_vit(name, num_layers):
'\n Assign a parameter with its layer id\n Following BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L33\n '
if (name in ['cls_token', 'pos_embed']):
return 0
elif name.startswith('patch_embed'):
return 0
elif name.startswith('blocks'):
return (int(name.split('.')[1]) + 1)
else:
return num_layers
|
def adjust_learning_rate(optimizer, epoch, args):
'Decay the learning rate with half-cycle cosine after warmup'
if (epoch < args.warmup_epochs):
lr = ((args.lr * epoch) / args.warmup_epochs)
else:
lr = (args.min_lr + (((args.lr - args.min_lr) * 0.5) * (1.0 + math.cos(((math.pi * (epoch - args.warmup_epochs)) / (args.max_communication_rounds - args.warmup_epochs))))))
for param_group in optimizer.param_groups:
if ('lr_scale' in param_group):
param_group['lr'] = (lr * param_group['lr_scale'])
else:
param_group['lr'] = lr
return lr
|
def fix_random_seeds(args):
'\n Fix random seeds.\n '
seed = (args.seed + get_rank())
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def get_mlm_acc(self):
for (name, meter) in self.meters.items():
if (name == 'mlm_acc'):
print('value: ', meter.value)
print('avg: ', meter.global_avg)
return meter.global_avg
def get_class_acc(self):
for (name, meter) in self.meters.items():
if (name == 'class_acc'):
print('value: ', meter.value)
print('avg: ', meter.global_avg)
return meter.global_avg
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if (step is not None):
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for (k, v) in kwargs.items():
if (v is None):
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(((head + '/') + k), v, (self.step if (step is None) else step))
def flush(self):
self.writer.flush()
|
def _load_checkpoint_for_ema(model_ema, checkpoint):
'\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n '
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = ('tcp://%s:%s' % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0))
|
def load_state_dict(model, state_dict, prefix='', ignore_missing='relative_position_index'):
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if (metadata is not None):
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if (metadata is None) else metadata.get(prefix[:(- 1)], {}))
module._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for (name, child) in module._modules.items():
if (child is not None):
load(child, ((prefix + name) + '.'))
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if (ignore_key in key):
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if (len(missing_keys) > 0):
print('Weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, missing_keys))
if (len(unexpected_keys) > 0):
print('Weights from pretrained model not used in {}: {}'.format(model.__class__.__name__, unexpected_keys))
if (len(ignore_missing_keys) > 0):
print('Ignored weights of {} not initialized from pretrained model: {}'.format(model.__class__.__name__, ignore_missing_keys))
if (len(error_msgs) > 0):
print('\n'.join(error_msgs))
|
class NativeScalerWithGradNormCount():
state_dict_key = 'amp_scaler'
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if (clip_grad is not None):
assert (parameters is not None)
self._scaler.unscale_(optimizer)
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
|
def get_grad_norm_(parameters, norm_type: float=2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if (p.grad is not None)]
norm_type = float(norm_type)
if (len(parameters) == 0):
return torch.tensor(0.0)
device = parameters[0].grad.device
if (norm_type == inf):
total_norm = max((p.grad.detach().abs().max().to(device) for p in parameters))
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
|
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, max_communication_rounds=100, warmup_epochs=0, start_warmup_value=0, warmup_steps=(- 1)):
warmup_schedule = np.array([])
warmup_iters = (warmup_epochs * niter_per_ep)
if (warmup_steps > 0):
warmup_iters = warmup_steps
print(('Set warmup steps = %d' % warmup_iters))
if (warmup_epochs > 0):
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange((((epochs * niter_per_ep) * max_communication_rounds) - warmup_iters))
schedule = np.array([(final_value + ((0.5 * (base_value - final_value)) * (1 + math.cos(((math.pi * i) / len(iters)))))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
print('len(schedule)=', len(schedule), 'epochs * niter_per_ep * max_communication_rounds:', ((epochs * niter_per_ep) * max_communication_rounds))
assert (len(schedule) == ((epochs * niter_per_ep) * max_communication_rounds))
return schedule
|
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if (loss_scaler is not None):
checkpoint_paths = [(output_dir / ('checkpoint-%s.pth' % epoch_name))]
for checkpoint_path in checkpoint_paths:
to_save = {'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args}
if (model_ema is not None):
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if (model_ema is not None):
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag=('checkpoint-%s' % epoch_name), client_state=client_state)
|
def load_model(args, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print(('Resume checkpoint %s' % args.resume))
if (('optimizer' in checkpoint) and ('epoch' in checkpoint)):
optimizer.load_state_dict(checkpoint['optimizer'])
if (checkpoint['epoch'] != 'best'):
args.start_epoch = (checkpoint['epoch'] + 1)
if (hasattr(args, 'model_ema') and args.model_ema):
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if ('scaler' in checkpoint):
loss_scaler.load_state_dict(checkpoint['scaler'])
print('With optim & sched!')
|
def all_reduce_mean(x):
world_size = get_world_size()
if (world_size > 1):
x_reduce = torch.tensor(x).cuda()
dist.all_reduce(x_reduce)
x_reduce /= world_size
return x_reduce.item()
else:
return x
|
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if (args.auto_resume and (len(args.resume) == 0)):
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = (- 1)
for ckpt in all_checkpoints:
t = ckpt.split('-')[(- 1)].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if (latest_ckpt >= 0):
args.resume = os.path.join(output_dir, ('checkpoint-%d.pth' % latest_ckpt))
print(('Auto resume checkpoint: %s' % args.resume))
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print(('Resume checkpoint %s' % args.resume))
if (('optimizer' in checkpoint) and ('epoch' in checkpoint)):
optimizer.load_state_dict(checkpoint['optimizer'])
if (checkpoint['epoch'] != 'best'):
args.start_epoch = (checkpoint['epoch'] + 1)
if (hasattr(args, 'model_ema') and args.model_ema):
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if ('scaler' in checkpoint):
loss_scaler.load_state_dict(checkpoint['scaler'])
print('With optim & sched!')
|
def create_d_vae(weight_path, d_vae_type, image_size, device):
if (d_vae_type == 'dall-e'):
return get_dalle_vae(weight_path, image_size, device)
elif (d_vae_type == 'customized'):
return get_d_vae(weight_path, image_size, device)
else:
raise NotImplementedError()
|
def get_dalle_vae(weight_path, image_size, device):
vae = Dalle_VAE(image_size)
vae.load_model(model_dir=weight_path, device=device)
return vae
|
def get_d_vae(weight_path, image_size, device):
NUM_TOKENS = 8192
NUM_LAYERS = 3
EMB_DIM = 512
HID_DIM = 256
state_dict = torch.load(os.path.join(weight_path, 'pytorch_model.bin'), map_location='cpu')['weights']
model = DiscreteVAE(image_size=image_size, num_layers=NUM_LAYERS, num_tokens=NUM_TOKENS, codebook_dim=EMB_DIM, hidden_dim=HID_DIM).to(device)
model.load_state_dict(state_dict)
return model
|
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, 'deepspeed_config.json')
with open(args.deepspeed_config, mode='w') as writer:
ds_config = {'train_batch_size': ((args.batch_size * args.update_freq) * get_world_size()), 'train_micro_batch_size_per_gpu': args.batch_size, 'steps_per_print': 1000, 'optimizer': {'type': 'Adam', 'adam_w_mode': True, 'params': {'lr': args.lr, 'weight_decay': args.weight_decay, 'bias_correction': True, 'betas': [0.9, 0.999], 'eps': 1e-08}}, 'fp16': {'enabled': True, 'loss_scale': 0, 'initial_scale_power': 7, 'loss_scale_window': 128}}
writer.write(json.dumps(ds_config, indent=2))
|
def top_k(logits, thres=0.5):
num_logits = logits.shape[(- 1)]
k = max(int(((1 - thres) * num_logits)), 1)
(val, ind) = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
|
def exists(val):
return (val is not None)
|
def default(val, d):
return (val if exists(val) else d)
|
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
|
class BasicVAE(nn.Module):
def get_codebook_indices(self, images):
raise NotImplementedError()
def decode(self, img_seq):
raise NotImplementedError()
def get_codebook_probs(self, img_seq):
raise NotImplementedError()
def get_image_tokens_size(self):
pass
def get_image_size(self):
pass
|
class ResBlock(nn.Module):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(chan_in, hidden_size, 3, padding=1), nn.ReLU(), nn.Conv2d(hidden_size, hidden_size, 3, padding=1), nn.ReLU(), nn.Conv2d(hidden_size, chan_out, 1))
def forward(self, x):
return (self.net(x) + x)
|
class DiscreteVAE(BasicVAE):
def __init__(self, image_size=256, num_tokens=512, codebook_dim=512, num_layers=3, hidden_dim=64, channels=3, smooth_l1_loss=False, temperature=0.9, straight_through=False, kl_div_loss_weight=0.0):
super().__init__()
assert (num_layers >= 1), 'number of layers must be greater than or equal to 1'
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
enc_layers = []
dec_layers = []
enc_in = channels
dec_in = codebook_dim
for layer_id in range(num_layers):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
enc_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
enc_in = hidden_dim
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
dec_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
dec_in = hidden_dim
enc_layers.append(nn.Conv2d(hidden_dim, num_tokens, 1))
dec_layers.append(nn.Conv2d(hidden_dim, channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = (F.smooth_l1_loss if smooth_l1_loss else F.mse_loss)
self.kl_div_loss_weight = kl_div_loss_weight
def get_image_size(self):
return self.image_size
def get_image_tokens_size(self):
return (self.image_size // 8)
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits=True)
codebook_indices = logits.argmax(dim=1)
return codebook_indices
@torch.no_grad()
@eval_decorator
def get_codebook_probs(self, images):
logits = self.forward(images, return_logits=True)
return nn.Softmax(dim=1)(logits)
def decode(self, img_seq):
image_embeds = self.codebook(img_seq)
(b, n, d) = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h=h, w=w)
images = self.decoder(image_embeds)
return images
def forward(self, img, return_loss=False, return_recons=False, return_logits=False, temp=None):
(device, num_tokens, image_size, kl_div_loss_weight) = (img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight)
assert ((img.shape[(- 1)] == image_size) and (img.shape[(- 2)] == image_size)), f'input must have the correct image size {image_size}'
logits = self.encoder(img)
if return_logits:
return logits
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau=temp, dim=1, hard=self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if (not return_loss):
return out
recon_loss = self.loss_fn(img, out)
logits = rearrange(logits, 'b n h w -> b (h w) n')
qy = F.softmax(logits, dim=(- 1))
log_qy = torch.log((qy + 1e-10))
log_uniform = torch.log(torch.tensor([(1.0 / num_tokens)], device=device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target=True)
loss = (recon_loss + (kl_div * kl_div_loss_weight))
if (not return_recons):
return loss
return (loss, out)
|
def vae_load_model(path: str, device: torch.device=None) -> nn.Module:
if (path.startswith('http://') or path.startswith('https://')):
resp = requests.get(path)
resp.raise_for_status()
with io.BytesIO(resp.content) as buf:
return torch.load(buf, map_location=device)
else:
with open(path, 'rb') as f:
print('load_model_path: ', path)
return torch.load(f, map_location=device)
|
class Dalle_VAE(BasicVAE):
def __init__(self, image_size):
super().__init__()
self.encoder = None
self.decoder = None
self.image_size = image_size
def load_model(self, model_dir, device):
print('pickel_file_location: ,', model_dir, 'encoder.pkl')
self.encoder = vae_load_model(os.path.join(model_dir, 'encoder.pkl'), device)
self.decoder = vae_load_model(os.path.join(model_dir, 'decoder.pkl'), device)
def decode(self, img_seq):
bsz = img_seq.size()[0]
img_seq = img_seq.view(bsz, (self.image_size // 8), (self.image_size // 8))
z = F.one_hot(img_seq, num_classes=self.encoder.vocab_size).permute(0, 3, 1, 2).float()
return self.decoder(z).float()
def get_codebook_indices(self, images):
z_logits = self.encoder(images)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, images):
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob, no_process=False):
if no_process:
return self.decoder(img_seq_prob.float()).float()
else:
(bsz, seq_len, num_class) = img_seq_prob.size()
z = img_seq_prob.view(bsz, (self.image_size // 8), (self.image_size // 8), self.encoder.vocab_size)
return self.decoder(z.permute(0, 3, 1, 2).float()).float()
|
def get_num_layer_for_vit(var_name, num_max_layer):
if (var_name in ('cls_token', 'mask_token', 'pos_embed')):
return 0
elif var_name.startswith('patch_embed'):
return 0
elif var_name.startswith('rel_pos_bias'):
return (num_max_layer - 1)
elif var_name.startswith('blocks'):
layer_id = int(var_name.split('.')[1])
return (layer_id + 1)
else:
return (num_max_layer - 1)
|
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_vit(var_name, len(self.values))
|
def add_weight_decay(model, weight_decay=1e-05, skip_list=()):
decay = []
no_decay = []
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or (name in skip_list)):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0}, {'params': decay, 'weight_decay': weight_decay}]
|
def get_parameter_groups(model, weight_decay=1e-05, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for (name, param) in model.named_parameters():
if (not param.requires_grad):
continue
if ((len(param.shape) == 1) or name.endswith('.bias') or (name in skip_list)):
group_name = 'no_decay'
this_weight_decay = 0.0
else:
group_name = 'decay'
this_weight_decay = weight_decay
if (get_num_layer is not None):
layer_id = get_num_layer(name)
group_name = ('layer_%d_%s' % (layer_id, group_name))
else:
layer_id = None
if (group_name not in parameter_group_names):
if (get_layer_scale is not None):
scale = get_layer_scale(layer_id)
else:
scale = 1.0
parameter_group_names[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name] = {'weight_decay': this_weight_decay, 'params': [], 'lr_scale': scale}
parameter_group_vars[group_name]['params'].append(param)
parameter_group_names[group_name]['params'].append(name)
return list(parameter_group_vars.values())
|
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if (weight_decay and filter_bias_and_bn):
skip = {}
if (skip_list is not None):
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.0
else:
parameters = model.parameters()
if ('fused' in opt_lower):
assert (has_apex and torch.cuda.is_available()), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if (hasattr(args, 'opt_eps') and (args.opt_eps is not None)):
opt_args['eps'] = args.opt_eps
if (hasattr(args, 'opt_betas') and (args.opt_betas is not None)):
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[(- 1)]
if ((opt_lower == 'sgd') or (opt_lower == 'nesterov')):
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif (opt_lower == 'momentum'):
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif (opt_lower == 'adam'):
optimizer = optim.Adam(parameters, **opt_args)
elif (opt_lower == 'adamw'):
optimizer = optim.AdamW(parameters, **opt_args)
elif (opt_lower == 'nadam'):
optimizer = Nadam(parameters, **opt_args)
elif (opt_lower == 'radam'):
optimizer = RAdam(parameters, **opt_args)
elif (opt_lower == 'adamp'):
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif (opt_lower == 'sgdp'):
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif (opt_lower == 'adadelta'):
optimizer = optim.Adadelta(parameters, **opt_args)
elif (opt_lower == 'adafactor'):
if (not args.lr):
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif (opt_lower == 'adahessian'):
optimizer = Adahessian(parameters, **opt_args)
elif (opt_lower == 'rmsprop'):
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif (opt_lower == 'rmsproptf'):
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif (opt_lower == 'nvnovograd'):
optimizer = NvNovoGrad(parameters, **opt_args)
elif (opt_lower == 'fusedsgd'):
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif (opt_lower == 'fusedmomentum'):
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif (opt_lower == 'fusedadam'):
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif (opt_lower == 'fusedadamw'):
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif (opt_lower == 'fusedlamb'):
optimizer = FusedLAMB(parameters, **opt_args)
elif (opt_lower == 'fusednovograd'):
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert (False and 'Invalid optimizer')
raise ValueError
if (len(opt_split) > 1):
if (opt_split[0] == 'lookahead'):
optimizer = Lookahead(optimizer)
return optimizer
|
def print_options(args, model):
message = ''
num_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
num_params = (num_params / 1000000)
message += ('================ FL train of %s with total model parameters: %2.1fM ================\n' % (args.model, num_params))
message += '++++++++++++++++ Other Train related parameters ++++++++++++++++ \n'
for (k, v) in sorted(vars(args).items()):
comment = ''
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '++++++++++++++++ End of show parameters ++++++++++++++++ '
args.file_name = os.path.join(args.output_dir, 'log_file.txt')
with open(args.file_name, 'wt') as args_file:
args_file.write(message)
args_file.write('\n')
print(message)
|
class ToNumpy():
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return np_img
|
class ToTensor():
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return torch.from_numpy(np_img).to(dtype=self.dtype)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.