code stringlengths 101 5.91M |
|---|
def resnet101_mpncov_160(pretrained=False, progress=True, **kwargs):
return _resnet_mpncov_160('resnet101_mpncov_160', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) |
class decoder(nn.Module):
def __init__(self, in_channel=1, out_channel=10):
super(decoder, self).__init__()
self.fc3 = nn.Linear(16, 256)
self.fc4 = nn.Linear(256, 8192)
self.deconv1 = nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.deconv2 = nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.deconv3 = nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2, padding=0), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.deconv4 = nn.Sequential(nn.ConvTranspose2d(32, 32, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(32), nn.ReLU(inplace=True))
self.relu = nn.ReLU()
def forward(self, x):
h3 = self.relu(self.fc3(x))
out = self.relu(self.fc4(h3))
out = out.view(out.size(0), 32, 16, 16)
out = self.deconv1(out)
out = self.deconv2(out)
out = self.deconv3(out)
out = self.deconv4(out)
return out |
class QuantizableResNet(ResNet):
def __init__(self, *args, **kwargs):
super(QuantizableResNet, self).__init__(*args, **kwargs)
self.quant = torch.quantization.QuantStub()
self.dequant = torch.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self):
fuse_modules(self, ['conv1', 'bn1', 'relu'], inplace=True)
for m in self.modules():
if ((type(m) == QuantizableBottleneck) or (type(m) == QuantizableBasicBlock)):
m.fuse_model() |
class BeatIntervalOption(CommandLineOption):
arg = 'BEAT_INTERVAL'
arg_description = 'Time between two heartbeat events measured in seconds.'
def apply(cls, args, run):
run.beat_interval = float(args) |
class RelationType():
def __init__(self, labels, index, short_name, verbose_name):
self._labels = labels
self._index = index
self._short_name = short_name
self._verbose_name = verbose_name
def identifiers(self):
return self._labels[0].identifier
def index(self):
return self._index
def short_name(self):
return self._short_name
def verbose_name(self):
return self._verbose_name
def symmetric(self):
return all([r.symmetric for r in self._labels])
def __int__(self):
return self._index
def __eq__(self, other):
if isinstance(other, RelationType):
return (self.identifiers == other.identifiers)
return False
def __hash__(self):
return hash(self.identifiers) |
class DataProcessor(object):
def get_src_train_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'en_conll_train.pkl')), 'conll_train')
def get_src_dev_examples(self, data_dir):
return self._create_examples(self._read_pkl(os.path.join(data_dir, 'en_conll_test.pkl')), 'conll_dev')
def get_src_trans_train_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_conll_train.pkl'))), 'conll_train')
def get_src_trans_dev_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_conll_test.pkl'))), 'conll_dev')
def get_sep_tgt_train_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_sep_train.pkl'))), (language_type + '_train'))
def get_sep_tgt_dev_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_sep_dev.pkl'))), (language_type + '_dev'))
def get_sep_tgt_test_examples(self, data_dir, language_type):
return self._create_examples(self._read_pkl(os.path.join(data_dir, (language_type + '_sep_test.pkl'))), (language_type + '_test'))
def get_twitter_general_examples(self, data_dir):
return self._create_examples_without_replacement(self._read_pkl(os.path.join(data_dir, 'twitter_general.pkl')), 'twitter_general')
def get_labels(self, data_dir):
return ['B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC', 'O']
def _create_examples(self, data, set_type):
examples = []
for (i, elem) in enumerate(data):
guid = i
text = elem[0]
label = elem[1]
examples.append(InputExample(guid=guid, text=text, label=label))
return examples
def _read_pkl(self, input_file):
data = pickle.load(open(input_file, 'rb'))
return data |
class Audio2Mel(torch.nn.Module):
def __init__(self, hop_length, sampling_rate, n_mel_channels, win_length=1024, n_fft=None, mel_fmin=0.0, mel_fmax=None):
super().__init__()
n_fft = (win_length if (n_fft is None) else n_fft)
window = torch.hann_window(win_length).float()
mel_basis = librosa_mel_fn(sampling_rate, n_fft, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
self.register_buffer('window', window)
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.sampling_rate = sampling_rate
self.n_mel_channels = n_mel_channels
def forward(self, audio):
(B, C, T) = audio.shape
audio = audio.reshape((B * C), T)
fft = torch.stft(audio, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=self.window, center=False, return_complex=False)
(real_part, imag_part) = fft.unbind((- 1))
magnitude = torch.sqrt(((real_part ** 2) + (imag_part ** 2)))
mel_output = torch.matmul(self.mel_basis, magnitude)
log_mel_spec = torch.log10(torch.clamp(mel_output, min=1e-05))
T_ = log_mel_spec.shape[(- 1)]
log_mel_spec = log_mel_spec.reshape(B, C, self.n_mel_channels, T_)
log_mel_spec = log_mel_spec.permute(0, 3, 1, 2)
log_mel_spec = log_mel_spec.squeeze(2)
return log_mel_spec |
class TranslateY(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.fillcolor = fillcolor
def __call__(self, x, magnitude):
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, ((magnitude * x.size[1]) * random.choice([(- 1), 1]))), fillcolor=self.fillcolor) |
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
(op_names, indices) = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
(op_names, indices) = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert (len(op_names) == len(indices))
self._steps = (len(op_names) // 2)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for (name, index) in zip(op_names, indices):
stride = (2 if (reduction and (index < 2)) else 1)
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
s = (h1 + h2)
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1) |
_end_docstrings(PIPELINE_INIT_ARGS, '\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ')
class TextClassificationPipeline(Pipeline):
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.check_model_type((TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if (self.framework == 'tf') else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING))
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, top_k='', **tokenizer_kwargs):
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if (hasattr(self.model.config, 'return_all_scores') and (return_all_scores is None)):
return_all_scores = self.model.config.return_all_scores
if (isinstance(top_k, int) or (top_k is None)):
postprocess_params['top_k'] = top_k
postprocess_params['_legacy'] = False
elif (return_all_scores is not None):
warnings.warn('`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.', UserWarning)
if return_all_scores:
postprocess_params['top_k'] = None
else:
postprocess_params['top_k'] = 1
if isinstance(function_to_apply, str):
function_to_apply = ClassificationFunction[function_to_apply.upper()]
if (function_to_apply is not None):
postprocess_params['function_to_apply'] = function_to_apply
return (preprocess_params, {}, postprocess_params)
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
_legacy = ('top_k' not in kwargs)
if (isinstance(args[0], str) and _legacy):
return [result]
else:
return result
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[(str, GenericTensor)]:
return_tensors = self.framework
if isinstance(inputs, dict):
return self.tokenizer(**inputs, return_tensors=return_tensors, **tokenizer_kwargs)
elif (isinstance(inputs, list) and (len(inputs) == 1) and isinstance(inputs[0], list) and (len(inputs[0]) == 2)):
return self.tokenizer(text=inputs[0][0], text_pair=inputs[0][1], return_tensors=return_tensors, **tokenizer_kwargs)
elif isinstance(inputs, list):
raise ValueError('The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.')
return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs, function_to_apply=None, top_k=1, _legacy=True):
if (function_to_apply is None):
if ((self.model.config.problem_type == 'multi_label_classification') or (self.model.config.num_labels == 1)):
function_to_apply = ClassificationFunction.SIGMOID
elif ((self.model.config.problem_type == 'single_label_classification') or (self.model.config.num_labels > 1)):
function_to_apply = ClassificationFunction.SOFTMAX
elif (hasattr(self.model.config, 'function_to_apply') and (function_to_apply is None)):
function_to_apply = self.model.config.function_to_apply
else:
function_to_apply = ClassificationFunction.NONE
outputs = model_outputs['logits'][0]
outputs = outputs.numpy()
if (function_to_apply == ClassificationFunction.SIGMOID):
scores = sigmoid(outputs)
elif (function_to_apply == ClassificationFunction.SOFTMAX):
scores = softmax(outputs)
elif (function_to_apply == ClassificationFunction.NONE):
scores = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}')
if ((top_k == 1) and _legacy):
return {'label': self.model.config.id2label[scores.argmax().item()], 'score': scores.max().item()}
dict_scores = [{'label': self.model.config.id2label[i], 'score': score.item()} for (i, score) in enumerate(scores)]
if (not _legacy):
dict_scores.sort(key=(lambda x: x['score']), reverse=True)
if (top_k is not None):
dict_scores = dict_scores[:top_k]
return dict_scores |
class SSLS4L(ssl_base._SSLBase):
NAME = 'ssl_s4l'
SUPPORTED_TASK_TYPES = [REGRESSION, CLASSIFICATION]
def __init__(self, args):
super(SSLS4L, self).__init__(args)
self.task_model = None
self.rotation_classifier = None
self.model = None
self.optimizer = None
self.lrer = None
self.criterion = None
if (self.args.rotation_scale < 0):
logger.log_err('The argument - rotation_scale - is not set (or invalid)\nPlease set - rotation_scale >= 0 - for training\n')
if (self.args.rotated_sup_scale < 0):
logger.log_err('The argument - rotated_sup_scale - is not set (or invalid)\nPlease set - rotated_sup_scale >= 0 - for training\n')
def _build(self, model_funcs, optimizer_funcs, lrer_funcs, criterion_funcs, task_func):
self.task_func = task_func
self.task_model = func.create_model(model_funcs[0], 'task_model', args=self.args).module
self.rotation_classifier = RotationClassifer(self.task_func.ssls4l_rc_in_channels())
self.model = WrappedS4LModel(self.args, self.task_model, self.rotation_classifier)
self.model = nn.DataParallel(self.model).cuda()
patch_replication_callback(self.model)
self.models = {'model': self.model}
self.optimizer = optimizer_funcs[0](self.model.module.param_groups)
self.optimizers = {'optimizer': self.optimizer}
self.lrer = lrer_funcs[0](self.optimizer)
self.lrers = {'lrer': self.lrer}
self.criterion = criterion_funcs[0](self.args)
self.rotation_criterion = nn.CrossEntropyLoss()
self.criterions = {'criterion': self.criterion, 'rotation_criterion': self.rotation_criterion}
self.args.batch_size *= 2
self.args.labeled_batch_size *= 2
self.args.unlabeled_batch_size *= 2
logger.log_info('In SSL_S4L algorithm, batch size are doubled: \n Total labeled batch size: {1}\n Total unlabeled batch size: {2}\n'.format(self.args.lr, self.args.labeled_batch_size, self.args.unlabeled_batch_size))
self._algorithm_warn()
def _train(self, data_loader, epoch):
self.meters.reset()
original_lbs = int((self.args.labeled_batch_size / 2))
original_bs = int((self.args.batch_size / 2))
self.model.train()
for (idx, (inp, gt)) in enumerate(data_loader):
timer = time.time()
(inp, gt) = self._batch_prehandle(inp, gt, True)
if (((len(gt) - 1) > 1) and (idx == 0)):
self._inp_warn()
self.optimizer.zero_grad()
(resulter, debugger) = self.model.forward(inp)
pred = tool.dict_value(resulter, 'pred')
activated_pred = tool.dict_value(resulter, 'activated_pred')
pred_rotation = tool.dict_value(resulter, 'rotation')
l_pred = func.split_tensor_tuple(pred, 0, original_lbs)
l_gt = func.split_tensor_tuple(gt, 0, original_lbs)
l_inp = func.split_tensor_tuple(inp, 0, original_lbs)
unrotated_task_loss = self.criterion.forward(l_pred, l_gt[:(- 1)], l_inp)
unrotated_task_loss = torch.mean(unrotated_task_loss)
self.meters.update('unrotated_task_loss', unrotated_task_loss.data)
l_rotated_pred = func.split_tensor_tuple(pred, original_bs, (original_bs + original_lbs))
l_rotated_gt = func.split_tensor_tuple(gt, original_bs, (original_bs + original_lbs))
l_rotated_inp = func.split_tensor_tuple(inp, original_bs, (original_bs + original_lbs))
rotated_task_loss = self.criterion.forward(l_rotated_pred, l_rotated_gt[:(- 1)], l_rotated_inp)
rotated_task_loss = (self.args.rotated_sup_scale * torch.mean(rotated_task_loss))
self.meters.update('rotated_task_loss', rotated_task_loss.data)
task_loss = (unrotated_task_loss + rotated_task_loss)
rotation_loss = self.rotation_criterion.forward(pred_rotation, gt[(- 1)])
rotation_loss = (self.args.rotation_scale * torch.mean(rotation_loss))
self.meters.update('rotation_loss', rotation_loss.data)
loss = (task_loss + rotation_loss)
loss.backward()
self.optimizer.step()
(_, angle_idx) = pred_rotation.topk(1, 1, True, True)
angle_idx = angle_idx.t()
rotation_acc = angle_idx.eq(gt[(- 1)].view(1, (- 1)).expand_as(angle_idx))
rotation_acc = rotation_acc.view((- 1)).float().sum(0, keepdim=True).mul_((100.0 / self.args.batch_size))
self.meters.update('rotation_acc', rotation_acc.data[0])
self.meters.update('batch_time', (time.time() - timer))
if ((idx % self.args.log_freq) == 0):
logger.log_info('step: [{0}][{1}/{2}]\tbatch-time: {meters[batch_time]:.3f}\n task-{3}\t=>\tunrotated-task-loss: {meters[unrotated_task_loss]:.6f}\trotated-task-loss: {meters[rotated_task_loss]:.6f}\n rotation-{3}\t=>\trotation-loss: {meters[rotation_loss]:.6f}\trotation-acc: {meters[rotation_acc]:.6f}\n'.format((epoch + 1), idx, len(data_loader), self.args.task, meters=self.meters))
if (self.args.visualize and ((idx % self.args.visual_freq) == 0)):
self._visualize(epoch, idx, True, func.split_tensor_tuple(inp, 0, 1, reduce_dim=True), func.split_tensor_tuple(activated_pred, 0, 1, reduce_dim=True), func.split_tensor_tuple(gt[:(- 1)], 0, 1, reduce_dim=True))
if (not self.args.is_epoch_lrer):
self.lrer.step()
if self.args.is_epoch_lrer:
self.lrer.step()
def _validate(self, data_loader, epoch):
self.meters.reset()
self.model.eval()
for (idx, (inp, gt)) in enumerate(data_loader):
timer = time.time()
(inp, gt) = self._batch_prehandle(inp, gt, False)
if (((len(gt) - 1) > 1) and (idx == 0)):
self._inp_warn()
(resulter, debugger) = self.model.forward(inp)
pred = tool.dict_value(resulter, 'pred')
activated_pred = tool.dict_value(resulter, 'activated_pred')
pred_rotation = tool.dict_value(resulter, 'rotation')
task_loss = self.criterion.forward(pred, gt[:(- 1)], inp)
task_loss = torch.mean(task_loss)
self.meters.update('task_loss', task_loss.data)
rotation_loss = self.rotation_criterion.forward(pred_rotation, gt[(- 1)])
rotation_loss = (self.args.rotation_scale * torch.mean(rotation_loss))
self.meters.update('rotation_loss', rotation_loss.data)
self.task_func.metrics(activated_pred, gt[:(- 1)], inp, self.meters, id_str='task')
self.meters.update('batch_time', (time.time() - timer))
if ((idx % self.args.log_freq) == 0):
logger.log_info('step: [{0}][{1}/{2}]\tbatch-time: {meters[batch_time]:.3f}\n task-{3}\t=>\ttask-loss: {meters[task_loss]:.6f}\n rotation-{3}\t=>\trotation-loss: {meters[rotation_loss]:.6f}\n'.format((epoch + 1), idx, len(data_loader), self.args.task, meters=self.meters))
if (self.args.visualize and ((idx % self.args.visual_freq) == 0)):
self._visualize(epoch, idx, True, func.split_tensor_tuple(inp, 0, 1, reduce_dim=True), func.split_tensor_tuple(activated_pred, 0, 1, reduce_dim=True), func.split_tensor_tuple(gt[:(- 1)], 0, 1, reduce_dim=True))
metrics_info = {'task': ''}
for key in sorted(list(self.meters.keys())):
if (self.task_func.METRIC_STR in key):
for id_str in metrics_info.keys():
if key.startswith(id_str):
metrics_info[id_str] += '{0}: {1:.6}\t'.format(key, self.meters[key])
logger.log_info('Validation metrics:\n task-metrics\t=>\t{0}\n'.format(metrics_info['task'].replace('_', '-')))
def _save_checkpoint(self, epoch):
state = {'algorithm': self.NAME, 'epoch': epoch, 'model': self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), 'lrer': self.lrer.state_dict()}
checkpoint = os.path.join(self.args.checkpoint_path, 'checkpoint_{0}.ckpt'.format(epoch))
torch.save(state, checkpoint)
def _load_checkpoint(self):
checkpoint = torch.load(self.args.resume)
checkpoint_algorithm = tool.dict_value(checkpoint, 'algorithm', default='unknown')
if (checkpoint_algorithm != self.NAME):
logger.log_err('Unmatched ssl algorithm format in checkpoint => required: {0} - given: {1}\n'.format(self.NAME, checkpoint_algorithm))
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.lrer.load_state_dict(checkpoint['lrer'])
self.task_model = self.model.module.task_model
self.rotation_classifier = self.model.module.rotation_classifier
return checkpoint['epoch']
def _visualize(self, epoch, idx, is_train, inp, pred, gt):
visualize_path = (self.args.visual_train_path if is_train else self.args.visual_val_path)
out_path = os.path.join(visualize_path, '{0}_{1}'.format(epoch, idx))
self.task_func.visualize(out_path, id_str='task', inp=inp, pred=pred, gt=gt)
def _batch_prehandle(self, inp, gt, is_train):
bs = inp[0].shape[0]
rotation_angles = np.random.randint(low=1, high=4, size=inp[0].shape[0])
inp_var = []
for i in inp:
i = i.cuda()
if is_train:
assert (i.shape[0] == bs)
rotated_i_shape = list(i.shape)
rotated_i_shape[0] *= 2
rotated_i = torch.zeros(rotated_i_shape).cuda()
for sdx in range(0, bs):
rotated_i[sdx] = i[sdx]
rotated_i[(bs + sdx)] = self._rotate_tensor(i[sdx], angle_idx=rotation_angles[sdx])
inp_var.append(Variable(rotated_i))
else:
inp_var.append(Variable(i))
inp = tuple(inp_var)
gt_var = []
for g in gt:
g = g.cuda()
if is_train:
assert (g.shape[0] == bs)
rotated_g_shape = list(g.shape)
rotated_g_shape[0] *= 2
rotated_g = torch.zeros(rotated_g_shape).cuda()
for sdx in range(0, bs):
rotated_g[sdx] = g[sdx]
rotated_g[(bs + sdx)] = self._rotate_tensor(g[sdx], angle_idx=rotation_angles[sdx])
gt_var.append(Variable(rotated_g))
else:
gt_var.append(Variable(g))
rotation_gt = torch.zeros(inp[0].shape[0]).cuda()
for sdx in range(0, bs):
rotation_gt[sdx] = 0
if is_train:
rotation_gt[(bs + sdx)] = float(rotation_angles[sdx])
gt_var.append(Variable(rotation_gt.long()))
gt = tuple(gt_var)
return (inp, gt)
def _rotate_tensor(self, tensor, angle_idx):
if (angle_idx == 1):
tensor = tensor.transpose(1, 2).flip(2)
elif (angle_idx == 2):
tensor = tensor.flip(2).flip(1)
elif (angle_idx == 3):
tensor = tensor.transpose(1, 2).flip(1)
return tensor
def _inp_warn(self):
logger.log_warn('More than one ground truth of the task model is given in SSL_S4L\nYou try to train the task model with more than one (pred & gt) pairs\nPlease make sure that:\n (1) The prediction tuple has the same size as the ground truth tuple\n (2) The elements with the same index in the two tuples are corresponding\n (3) All elements in the ground truth tuple should be 4-dim tensors since S4L\n will rotate them to match the rotated inputs\nPlease implement a new SSL algorithm if you want a variant of SSL_S4L that\nsupports other formants (not 4-dim tensor) of the ground truth\n')
def _algorithm_warn(self):
logger.log_warn("This SSL_S4L algorithm reproduces the SSL algorithm from the paper:\n 'S4L: Self-Supervised Semi-Supervised Learning'\nThe main differences between this implementation and the original paper are:\n (1) This is an implementation for pixel-wise vision tasks\n (2) This implementation only supports the 4-angle (0, 90, 180, 270) rotation-based self-supervised pretext task\n") |
def time_llvm_rthroughput(arch, verbose, code):
output = time_llvm_base(arch, verbose, code)
total_cycles_line = output.split('\n')[11]
cycles = total_cycles_line.split()[2]
return (float(cycles) * 100) |
def separate2midi(midi_instruments, out_path, ticks_per_beat=TICKS_PER_BEAT, tempo=TEMPO, check_out_of_range_notes=False):
midi = miditoolkit.midi.parser.MidiFile()
midi.ticks_per_beat = ticks_per_beat
midi.tempo_changes.append(miditoolkit.TempoChange(tempo=tempo, time=0))
for (ch, midi_instrument) in enumerate(midi_instruments):
for (i, program) in enumerate(midi_instrument.keys()):
if (ch == DRUM_CHANNEL):
instrument = miditoolkit.midi.containers.Instrument(program, is_drum=True)
else:
instrument = miditoolkit.midi.containers.Instrument(program, is_drum=False)
instrument.notes.extend(get_events(midi_instrument[program], filter='note'))
instrument.pitch_bends.extend(get_events(midi_instrument[program], filter='pitch_bends'))
if (len(instrument.notes) != 0):
midi.instruments.append(instrument)
if check_out_of_range_notes:
(midi, (lower_cnt, higher_cnt)) = change_out_of_range_notes(midi)
else:
lower_cnt = 0
higher_cnt = 0
midi.dump(out_path)
return (midi, (lower_cnt, higher_cnt)) |
def load_dart(split):
assert (split in SPLITS)
with open((data_dir / f'{prefix}-full-{split}.json')) as f:
return json.load(f) |
class PythonMsg():
def __setattr__(self, key, value):
if (not hasattr(self, key)):
raise TypeError(('Cannot add new field "%s" to frozen class %s' % (key, self)))
else:
object.__setattr__(self, key, value)
def print(self, depth=0, name=None):
print_str = ''
for j in range(depth):
print_str += ' '
if name:
print_str += (((name + ' (') + type(self).__name__) + '):\n')
else:
print_str += (type(self).__name__ + ':\n')
for key in vars(self):
val = self.__getattribute__(key)
if isinstance(val, PythonMsg):
print_str += val.print(depth=(depth + 1), name=key)
else:
for j in range((depth + 1)):
print_str += ' '
print_str += ((str(key) + '=') + str(val))
print_str += '\n'
if (depth == 0):
print(print_str)
else:
return print_str
def from_str(self, string_rep):
val_str_index = 0
for key in vars(self):
val_str_index = ((string_rep.find((key + '='), val_str_index) + len(key)) + 1)
value_substr = string_rep[val_str_index:string_rep.find(',', val_str_index)]
if ("'" in value_substr):
self.__setattr__(key, value_substr[1:(- 1)])
if ('None' in value_substr):
self.__setattr__(key, None)
else:
self.__setattr__(key, float(value_substr))
def copy(self):
return copy.deepcopy(self) |
class Logger():
def __init__(self, ckpt_path, name='train'):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(message)s', datefmt=blue('[%Y-%m-%d,%H:%M:%S]'))
fh = logging.FileHandler(os.path.join(ckpt_path, '{}.log'.format(name)), 'w')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def print(self, log):
if isinstance(log, list):
self.logger.info('\n - '.join(log))
else:
self.logger.info(log) |
_metaclass(abc.ABCMeta)
class BaseDataTest(tf.test.TestCase):
def setUp(self, data_wrapper, num_classes, expected_num_samples, required_tensors_shapes, default_label_key='label'):
super(BaseDataTest, self).setUp()
self.data_wrapper = data_wrapper
self.expected_num_samples = expected_num_samples
self.required_tensors_shapes = required_tensors_shapes
self.default_label_key = default_label_key
if isinstance(num_classes, int):
self.expected_num_classes = {default_label_key: num_classes}
elif isinstance(num_classes, dict):
self.expected_num_classes = num_classes
else:
raise ValueError('`num_classes` must be either int or dict')
def expected_splits(self):
return ('train', 'val', 'trainval', 'test')
def test_base_class(self):
self.assertIsInstance(self.data_wrapper, base.ImageData, 'Dataset class must inherit from `base.ImageData`.')
def test_split_dict_keys(self):
expected_keys = set(self.expected_num_samples.keys())
actual_keys = set(self.data_wrapper._num_samples_splits.keys())
self.assertSetEqual(expected_keys, actual_keys)
def test_num_samples(self):
for (split, expected) in self.expected_num_samples.items():
self.assertEqual(expected, self.data_wrapper.get_num_samples(split), msg=('Number of examples does not match for split "%s"' % split))
def test_dataset_output(self):
batch_size = 2
for split in self.expected_splits:
tf_data = self.data_wrapper.get_tf_data(split, batch_size)
tf_data_output_shapes = tf.data.get_output_shapes(tf_data)
self.assertIsInstance(tf_data_output_shapes, dict)
for (tensor_name, expected_shape) in self.required_tensors_shapes.items():
self.assertIn(tensor_name, tf_data_output_shapes.keys())
expected_shape = ([batch_size] + list(expected_shape))
actual_shape = tf_data_output_shapes[tensor_name].as_list()
self.assertEqual(actual_shape, expected_shape, msg='Tensor {!r} for split {!r} does not match the expected value'.format(tensor_name, split))
def test_label_keys(self):
self.assertEqual(self.default_label_key, self.data_wrapper.default_label_key)
self.assertIn(self.default_label_key, self.data_wrapper.label_keys)
self.assertDictEqual(self.expected_num_classes, self.data_wrapper._num_classes)
def test_get_num_classes(self):
self.assertEqual(self.expected_num_classes[self.data_wrapper.default_label_key], self.data_wrapper.get_num_classes())
for (label_key, num_classes) in self.expected_num_classes.items():
self.assertEqual(num_classes, self.data_wrapper.get_num_classes(label_key), msg=('Number of classes does not match for label "%s"' % label_key))
def iterate_dataset(cls, dataset, session):
dataset_iter = tf.compat.v1.data.make_initializable_iterator(dataset)
get_next = dataset_iter.get_next()
try:
session.run(dataset_iter.initializer)
while True:
(yield session.run(get_next))
except tf.errors.OutOfRangeError:
pass |
class Founta2018(dataset.Dataset):
name = 'founta2018'
url = '
hash = '35f19a5746eac9be27cd635a09b9ceddf10d84fb140cacef'
files = [{'name': 'founta2018en.csv', 'language': 'en', 'type': 'training', 'platform': 'twitter'}]
comment = ' '
license = 'UNKNOWN'
def process(cls, tmp_file_path, dataset_folder, api_config):
tmp_file_path = helpers.clean_csv(tmp_file_path, names=['tweet', 'class'])
tmp_file_path = helpers.download_tweets_for_csv(tmp_file_path, 'tweet', api_config)
helpers.copy_file(tmp_file_path, os.path.join(dataset_folder, 'founta2018en.csv'))
def unify_row(cls, row):
row['labels'] = [row['class']]
row = row.drop(['class'])
return row |
def make_ik_env():
env = Swift()
env.launch(realtime=True, browser='notebook')
env.add(panda)
env.add(ee_axes)
env.add(goal_axes)
return env |
class LTAE(nn.Module):
def __init__(self, in_channels=128, n_head=16, d_k=8, n_neurons=[256, 128], dropout=0.2, d_model=256, T=1000, max_temporal_shift=100, max_position=365):
super(LTAE, self).__init__()
self.in_channels = in_channels
self.n_neurons = copy.deepcopy(n_neurons)
self.max_temporal_shift = max_temporal_shift
if (d_model is not None):
self.d_model = d_model
self.inconv = LinearLayer(in_channels, d_model)
else:
self.d_model = in_channels
self.inconv = None
self.positional_enc = nn.Embedding.from_pretrained(get_positional_encoding((max_position + (2 * max_temporal_shift)), self.d_model, T=T), freeze=True)
self.attention_heads = MultiHeadAttention(n_head=n_head, d_k=d_k, d_in=self.d_model)
assert (self.n_neurons[0] == self.d_model)
layers = []
for i in range((len(self.n_neurons) - 1)):
layers.append(LinearLayer(self.n_neurons[i], self.n_neurons[(i + 1)]))
self.mlp = nn.Sequential(*layers)
self.dropout = nn.Dropout(dropout)
def forward(self, x, positions, return_att=False):
if (self.inconv is not None):
x = self.inconv(x)
enc_output = (x + self.positional_enc((positions + self.max_temporal_shift)))
(enc_output, attn) = self.attention_heads(enc_output)
enc_output = self.dropout(self.mlp(enc_output))
if return_att:
return (enc_output, attn)
else:
return enc_output |
class BouncingBallExample(nn.Module):
def __init__(self, radius=0.2, gravity=9.8, adjoint=False):
super().__init__()
self.gravity = nn.Parameter(torch.as_tensor([gravity]))
self.log_radius = nn.Parameter(torch.log(torch.as_tensor([radius])))
self.t0 = nn.Parameter(torch.tensor([0.0]))
self.init_pos = nn.Parameter(torch.tensor([10.0]))
self.init_vel = nn.Parameter(torch.tensor([0.0]))
self.absorption = nn.Parameter(torch.tensor([0.2]))
self.odeint = (odeint_adjoint if adjoint else odeint)
def forward(self, t, state):
(pos, vel, log_radius) = state
dpos = vel
dvel = (- self.gravity)
return (dpos, dvel, torch.zeros_like(log_radius))
def event_fn(self, t, state):
(pos, _, log_radius) = state
return (pos - torch.exp(log_radius))
def get_initial_state(self):
state = (self.init_pos, self.init_vel, self.log_radius)
return (self.t0, state)
def state_update(self, state):
(pos, vel, log_radius) = state
pos = (pos + 1e-07)
vel = ((- vel) * (1 - self.absorption))
return (pos, vel, log_radius)
def get_collision_times(self, nbounces=1):
event_times = []
(t0, state) = self.get_initial_state()
for i in range(nbounces):
(event_t, solution) = odeint_event(self, state, t0, event_fn=self.event_fn, reverse_time=False, atol=1e-08, rtol=1e-08, odeint_interface=self.odeint)
event_times.append(event_t)
state = self.state_update(tuple((s[(- 1)] for s in solution)))
t0 = event_t
return event_times
def simulate(self, nbounces=1):
event_times = self.get_collision_times(nbounces)
(t0, state) = self.get_initial_state()
trajectory = [state[0][None]]
velocity = [state[1][None]]
times = [t0.reshape((- 1))]
for event_t in event_times:
tt = torch.linspace(float(t0), float(event_t), int(((float(event_t) - float(t0)) * 50)))[1:(- 1)]
tt = torch.cat([t0.reshape((- 1)), tt, event_t.reshape((- 1))])
solution = odeint(self, state, tt, atol=1e-08, rtol=1e-08)
trajectory.append(solution[0])
velocity.append(solution[1])
times.append(tt)
state = self.state_update(tuple((s[(- 1)] for s in solution)))
t0 = event_t
return (torch.cat(times), torch.cat(trajectory, dim=0).reshape((- 1)), torch.cat(velocity, dim=0).reshape((- 1)), event_times) |
def make_follower(args, vocab):
enc_hidden_size = ((hidden_size // 2) if args.bidirectional else hidden_size)
glove_path = osp.join(file_path, 'data', 'train_glove.npy')
glove = (np.load(glove_path) if args.use_glove else None)
if (args.useObjLabelOrVis == 'none'):
(feature_size, action_embedding_size) = ((2048 + 128), (2048 + 128))
elif (args.useObjLabelOrVis == 'vis'):
(feature_size, action_embedding_size) = (((2048 + 128) + args.objVisFeatDim), ((2048 + 128) + args.objVisFeatDim))
elif (args.useObjLabelOrVis == 'label'):
(feature_size, action_embedding_size) = (((2048 + 128) + args.objLanFeatDim), ((2048 + 128) + args.objLanFeatDim))
elif (args.useObjLabelOrVis == 'both'):
feature_size = (((2048 + 128) + args.objVisFeatDim) + args.objLanFeatDim)
action_embedding_size = (((2048 + args.objVisFeatDim) + args.objLanFeatDim) + 128)
Encoder = (TransformerEncoder if args.transformer else EncoderLSTM)
Decoder = (CogroundDecoderLSTM if args.coground else AttnDecoderLSTM)
word_embedding_size = (256 if args.coground else 300)
encoder = try_cuda(Encoder(len(vocab), word_embedding_size, enc_hidden_size, vocab_pad_idx, dropout_ratio, bidirectional=args.bidirectional, glove=glove))
decoder = try_cuda(Decoder(action_embedding_size, hidden_size, dropout_ratio, feature_size=feature_size, num_head=args.num_head))
prog_monitor = (try_cuda(ProgressMonitor(action_embedding_size, hidden_size)) if args.prog_monitor else None)
bt_button = (try_cuda(BacktrackButton()) if args.bt_button else None)
dev_monitor = (try_cuda(DeviationMonitor(action_embedding_size, hidden_size)) if args.dev_monitor else None)
agent = Seq2SeqAgent(None, '', encoder, decoder, max_episode_len, max_instruction_length=MAX_INPUT_LENGTH, attn_only_verb=args.attn_only_verb)
agent.prog_monitor = prog_monitor
agent.dev_monitor = dev_monitor
agent.bt_button = bt_button
agent.soft_align = args.soft_align
if (args.useObjLabelOrVis != 'none'):
if args.useDect:
print('Using detectoin-based pointer')
agent.pointer = DectPointer(args)
else:
print('Using gt-based pointer')
agent.pointer = Pointer(args)
agent.useObjLabelOrVis = args.useObjLabelOrVis
agent.objTopK = args.objTopK
agent.objVisFeatDim = args.objVisFeatDim
agent.objLanFeatDim = args.objLanFeatDim
agent.ObjEachViewVisFeatPath = osp.join(root_path, 'img_features', args.ObjEachViewVisFeatDir)
agent.ObjEachViewLanFeatPath = osp.join(root_path, 'img_features', args.ObjEachViewLanFeatDir)
agent.ObjEachViewVisFeat = {}
agent.ObjEachViewLanFeat = {}
dict_glove = np.load(args.labelGlovePath)
if (args.useObjLabelOrVis in ['label', 'both']):
agent.objLabelEncoder = try_cuda(EncoderLSTMGlove(dict_glove.shape[0], 300, int((enc_hidden_size / 2)), vocab_pad_idx, dropout_ratio, bidirectional=True, glove=dict_glove))
else:
agent.objLabelEncoder = None
else:
agent.pointer = None
if args.scorer:
agent.scorer = make_scorer(args)
if (args.load_follower is not ''):
scorer_exists = osp.isfile((args.load_follower + '_scorer_enc'))
agent.load(args.load_follower, load_scorer=((args.load_scorer is '') and scorer_exists))
print(colorize(('load follower ' + args.load_follower)))
return agent |
_module()
class WrapFieldsToLists():
def __call__(self, results):
for (key, val) in results.items():
results[key] = [val]
return results
def __repr__(self):
return f'{self.__class__.__name__}()' |
def test_main(capsys):
main(['7'])
captured = capsys.readouterr()
assert ('The 7-th Fibonacci number is 13' in captured.out) |
class Small_CNN(nn.Module):
def __init__(self, hidden_size=20000):
super(Small_CNN, self).__init__()
self.cnn = nn.Sequential(nn.Conv2d(3, 32, 3, padding=1), nn.ReLU(), nn.Conv2d(32, 32, 3, padding=1, stride=2), nn.ReLU(), nn.Conv2d(32, 64, 3, padding=1), nn.ReLU(), nn.Conv2d(64, 64, 3, padding=1, stride=2), nn.ReLU(), Flatten(), nn.Linear(4096, hidden_size), nn.ReLU(), nn.Linear(hidden_size, 10))
def forward(self, x):
return self.cnn(x) |
def config_parallelisation(config, igpu, ngpus):
if (ngpus < 2):
pass
elif (ngpus >= 2):
config_list = list(ParameterGrid(param_grid=config))
config = [config_list[i] for i in list(range(igpu, len(config_list), ngpus))]
return config |
class NCPruner(WeightPruner):
def __init__(self, args, model, teacher, train_loader, test_loader):
super(NCPruner, self).__init__(args, model, teacher, train_loader, test_loader)
def prune_record(self, log):
print(log)
self.logger.write((log + '\n'))
def init_prune(self):
ratio = self.args.weight_init_prune_ratio
log = f'Init prune ratio {ratio:.2f}'
self.prune_record(log)
self.weight_prune(ratio)
self.check_param_num()
def check_param_num(self):
model = self.model
total = sum([module.weight.nelement() for module in model.modules() if isinstance(module, nn.Conv2d)])
num = total
for m in model.modules():
if isinstance(m, nn.Conv2d):
num -= int((m.weight.data == 0).sum())
ratio = ((total - num) / total)
log = f'===>Check: Total {total}, current {num}, prune ratio {ratio:2f}'
self.prune_record(log)
def load_nc_info(self):
path = osp.join(self.args.nc_info_dir, 'accumulate_coverage.npy')
with open(path, 'rb') as f:
accumulate_coverage = np.load(f, allow_pickle=True)
path = osp.join(self.args.nc_info_dir, 'log_module_names.npy')
with open(path, 'rb') as f:
log_names = np.load(f, allow_pickle=True)
return (accumulate_coverage, log_names)
st()
def weight_prune(self, prune_ratio, random_prune=False):
model = self.model.cpu()
total_weight = 0
for (name, module) in model.named_modules():
if isinstance(module, nn.Conv2d):
total_weight += module.weight.numel()
(accumulate_coverage, log_names) = self.load_nc_info()
all_weight_coverage = []
for (layer_idx, (input_coverage, output_coverage)) in enumerate(accumulate_coverage):
(input_dim, output_dim) = (len(input_coverage), len(output_coverage))
for input_idx in range(input_dim):
for output_idx in range(output_dim):
all_weight_coverage.append((input_coverage[input_idx] + output_coverage[output_idx]))
total = len(all_weight_coverage)
sorted_coverage = np.sort(all_weight_coverage)
thre_index = int((total * prune_ratio))
if (thre_index == total):
thre_index -= 1
thre = sorted_coverage[thre_index]
log = f'Pruning threshold: {thre:.4f}'
self.prune_record(log)
prune_index = {}
for (layer_index, module_name) in enumerate(log_names):
prune_index[module_name] = []
for (layer_idx, (input_coverage, output_coverage)) in enumerate(accumulate_coverage):
(input_dim, output_dim) = (len(input_coverage), len(output_coverage))
module_name = log_names[layer_idx]
for input_idx in range(input_dim):
for output_idx in range(output_dim):
score = (input_coverage[input_idx] + output_coverage[output_idx])
if (score < thre):
prune_index[module_name].append((input_idx, output_idx))
pruned = 0
for (name, module) in model.named_modules():
if isinstance(module, nn.Conv2d):
weight_copy = module.weight.data.abs().clone()
assert (name in prune_index), f'{name} not in log names'
if (len(prune_index[name]) == 0):
continue
for (input_idx, output_idx) in prune_index[name]:
weight_copy[(output_idx, input_idx)] -= weight_copy[(output_idx, input_idx)]
weight_copy[(weight_copy != 0)] = 1.0
mask = weight_copy
pruned = ((pruned + mask.numel()) - torch.sum(mask))
module.weight.data.mul_(mask)
remain_ratio = (int(torch.sum(mask)) / mask.numel())
log = f'layer {name} total params: {mask.numel()} remaining params: {int(torch.sum(mask))}({remain_ratio:.2f})'
self.prune_record(log)
log = f'Total conv params: {total_weight}, Pruned conv params: {pruned}, Pruned ratio: {(pruned / total_weight):.2f}'
self.prune_record(log)
self.model = model.cuda()
self.check_param_num()
def final_check_param_num(self):
self.logger = open(self.log_path, 'a')
self.check_param_num()
self.logger.close() |
def get_etypes(annots: list[str]) -> list[(None | str)]:
return [(annot[2:] if (annot != 'O') else None) for annot in annots] |
def diapreresnet164bn_cifar10(num_classes=10, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=164, bottleneck=True, model_name='diapreresnet164bn_cifar10', **kwargs) |
def find_2d_configuration():
cudnn.deterministic = False
cudnn.benchmark = True
patch_size = (512, 512)
max_num_features = 512
num_modalities = 1
num_classes = 3
batch_size = 12
blocks_per_stage_encoder = FabiansUNet.default_blocks_per_stage_encoder
blocks_per_stage_decoder = FabiansUNet.default_blocks_per_stage_decoder
initial_num_features = 30
pool_op_kernel_sizes = [[1, 1], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]]
conv_op_kernel_sizes = [[3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3], [3, 3]]
unet = FabiansUNet(num_modalities, initial_num_features, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], 2, pool_op_kernel_sizes, conv_op_kernel_sizes, get_default_network_config(2, dropout_p=None), num_classes, blocks_per_stage_decoder[:(len(conv_op_kernel_sizes) - 1)], False, False, max_features=max_num_features).cuda()
optimizer = SGD(unet.parameters(), lr=0.1, momentum=0.95)
loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-05, 'do_bg': False}, {})
dummy_input = torch.rand((batch_size, num_modalities, *patch_size)).cuda()
dummy_gt = (torch.rand((batch_size, 1, *patch_size)) * num_classes).round().clamp_(0, 2).cuda().long()
for _ in range(20):
optimizer.zero_grad()
skips = unet.encoder(dummy_input)
print([i.shape for i in skips])
output = unet.decoder(skips)
l = loss(output, dummy_gt)
l.backward()
optimizer.step()
if (_ == 0):
torch.cuda.empty_cache()
print(FabiansUNet.compute_approx_vram_consumption(patch_size, initial_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, blocks_per_stage_encoder[:len(conv_op_kernel_sizes)], blocks_per_stage_decoder[:(len(conv_op_kernel_sizes) - 1)], 2, batch_size)) |
def load_data(args):
train_dataset = depthDataset(csv_file=os.path.join(args.data_dir, 'nyu2_train.csv'), transform=transforms.Compose([Scale(240), CenterCrop([304, 228], [304, 228]), ToTensor(is_test=False)]))
train_dataloader = DataLoader(train_dataset, 256, shuffle=False, num_workers=16, pin_memory=False)
test_dataset = depthDataset(csv_file=os.path.join(args.data_dir, 'nyu2_test.csv'), transform=transforms.Compose([Scale(240), CenterCrop([304, 228], [304, 228]), ToTensor(is_test=True)]))
test_dataloader = DataLoader(test_dataset, 256, shuffle=False, num_workers=16, pin_memory=False)
return (train_dataloader, test_dataloader) |
_start_docstrings(VISION_TEXT_DUAL_ENCODER_START_DOCSTRING)
class TFVisionTextDualEncoderModel(TFPreTrainedModel):
config_class = VisionTextDualEncoderConfig
base_model_prefix = 'vision_text_dual_encoder'
load_weight_prefix = 'tf_vision_text_dual_encoder_model'
def __init__(self, config: Optional[VisionTextDualEncoderConfig]=None, vision_model: Optional[TFPreTrainedModel]=None, text_model: Optional[TFPreTrainedModel]=None):
if ((config is None) and ((vision_model is None) or (text_model is None))):
raise ValueError('Either a configuration or an vision and a text model has to be provided')
if (config is None):
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config)
elif (not isinstance(config, self.config_class)):
raise ValueError(f'config: {config} has to be of type {self.config_class}')
super().__init__(config)
if (vision_model is None):
if isinstance(config.vision_config, CLIPVisionConfig):
vision_model = TFCLIPVisionModel.from_config(config.vision_config, name='vision_model')
else:
vision_model = TFAutoModel.from_config(config.vision_config, name='vision_model')
if (text_model is None):
text_model = TFAutoModel.from_config(config.text_config, name='text_model')
self.vision_model = vision_model
self.text_model = text_model
self.vision_model.config = self.config.vision_config
self.text_model.config = self.config.text_config
self.vision_embed_dim = config.vision_config.hidden_size
self.text_embed_dim = config.text_config.hidden_size
self.projection_dim = config.projection_dim
self.visual_projection = Dense(self.projection_dim, use_bias=False, name='visual_projection')
self.text_projection = Dense(self.projection_dim, use_bias=False, name='text_projection')
self.logit_scale = None
def build(self, input_shape=None):
initializer = tf.keras.initializers.Constant(self.config.logit_scale_init_value)
self.logit_scale = self.add_weight(shape=(1,), initializer=initializer, name='logit_scale')
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
if kwargs.get('from_pt', False):
def tf_to_pt_weight_rename(tf_weight):
if ('vision_model' in tf_weight):
if (tf_weight.count('vision_model') == 1):
return re.sub('vision_model\\..*?\\.', 'vision_model.', tf_weight)
elif (tf_weight.count('vision_model') == 2):
return re.sub('vision_model\\..*?\\.vision_model', 'vision_model.vision_model', tf_weight)
else:
raise ValueError(f'Unexpected weight name {tf_weight}. Please file an issue on the Transformers repo to let us know about this error!')
elif ('text_model' in tf_weight):
return re.sub('text_model\\..*?\\.', 'text_model.', tf_weight)
else:
return tf_weight
kwargs['tf_to_pt_weight_rename'] = tf_to_pt_weight_rename
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_TEXT_INPUTS_DOCSTRING)
def get_text_features(self, input_ids=None, attention_mask=None, position_ids=None, token_type_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None):
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features
_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_VISION_INPUTS_DOCSTRING)
def get_image_features(self, pixel_values=None, output_attentions=None, output_hidden_states=None, return_dict=None):
vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = vision_outputs[1]
image_features = self.visual_projection(pooled_output)
return image_features
_inputs
_start_docstrings_to_model_forward(VISION_TEXT_DUAL_ENCODER_INPUTS_DOCSTRING)
_return_docstrings(output_type=TFCLIPOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids: Optional[tf.Tensor]=None, pixel_values: Optional[tf.Tensor]=None, attention_mask: Optional[tf.Tensor]=None, position_ids: Optional[tf.Tensor]=None, return_loss: Optional[bool]=None, token_type_ids: Optional[tf.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[(Tuple[tf.Tensor], TFCLIPOutput)]:
return_dict = (return_dict if (return_dict is not None) else self.config.return_dict)
vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
image_embeds = (image_embeds / tf.norm(image_embeds, axis=(- 1), keepdims=True))
text_embeds = (text_embeds / tf.norm(text_embeds, axis=(- 1), keepdims=True))
logit_scale = tf.math.exp(self.logit_scale)
logits_per_text = (tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale)
logits_per_image = tf.transpose(logits_per_text)
loss = None
if return_loss:
loss = clip_loss(logits_per_text)
if (loss.shape.rank == 0):
loss = tf.expand_dims(loss, 0)
if (not return_dict):
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return (((loss,) + output) if (loss is not None) else output)
return TFCLIPOutput(loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs)
def from_vision_text_pretrained(cls, vision_model_name_or_path: str=None, text_model_name_or_path: str=None, *model_args, **kwargs) -> TFPreTrainedModel:
kwargs_vision = {argument[len('vision_'):]: value for (argument, value) in kwargs.items() if argument.startswith('vision_')}
kwargs_text = {argument[len('text_'):]: value for (argument, value) in kwargs.items() if argument.startswith('text_')}
for key in kwargs_vision.keys():
del kwargs[('vision_' + key)]
for key in kwargs_text.keys():
del kwargs[('text_' + key)]
vision_model = kwargs_vision.pop('model', None)
if (vision_model is None):
if (vision_model_name_or_path is None):
raise ValueError('If `vision_model` is not defined as an argument, a `vision_model_name_or_path` has to be defined')
kwargs_vision['name'] = 'vision_model'
kwargs_vision['load_weight_prefix'] = cls.load_weight_prefix
(vision_config_dict, unused_args) = PretrainedConfig.get_config_dict(vision_model_name_or_path, **kwargs)
if (vision_config_dict.get('model_type', None) == 'clip_vision_model'):
vision_config = CLIPVisionConfig.from_dict(vision_config_dict)
else:
vision_config = AutoConfig.from_pretrained(vision_model_name_or_path)
if (vision_config.model_type == 'clip_vision_model'):
kwargs_vision['config'] = vision_config
vision_class = TFCLIPVisionModel
elif (vision_config.model_type == 'clip'):
kwargs_vision['config'] = vision_config.vision_config
vision_class = TFCLIPVisionModel
else:
kwargs_vision['config'] = vision_config
vision_class = TFAutoModel
vision_model = vision_class.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision)
text_model = kwargs_text.pop('model', None)
if (text_model is None):
if (text_model_name_or_path is None):
raise ValueError('If `text_model` is not defined as an argument, a `text_model_name_or_path` has to be defined')
kwargs_text['name'] = 'text_model'
kwargs_text['load_weight_prefix'] = cls.load_weight_prefix
if ('config' not in kwargs_text):
text_config = AutoConfig.from_pretrained(text_model_name_or_path)
kwargs_text['config'] = text_config
text_model = TFAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text)
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config, **kwargs)
model = cls(config=config, vision_model=vision_model, text_model=text_model)
logger.warning("The projection layer and logit scale weights `['visual_projection.weight', 'text_projection.weight', 'logit_scale']` are newly initialized. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.")
if (vision_model.name != 'vision_model'):
raise ValueError('vision model must be created with the name `vision_model`.')
if (text_model.name != 'text_model'):
raise ValueError('text model must be created with the name `text_model`.')
model(model.dummy_inputs)
return model
def dummy_inputs(self):
input_ids = tf.constant(DUMMY_INPUTS, dtype=tf.int32)
(batch_size, seq_len) = input_ids.shape
VISION_DUMMY_INPUTS = tf.random.uniform(shape=(batch_size, self.config.vision_config.num_channels, self.config.vision_config.image_size, self.config.vision_config.image_size), dtype=tf.float32)
pixel_values = tf.constant(VISION_DUMMY_INPUTS)
dummy = {'pixel_values': pixel_values, 'input_ids': input_ids}
return dummy |
class BaseTrainer():
def __init__(self, config, model, train_loader, test_loader=None, device=None):
self.config = config
self.metric_criterion = 'abs_rel'
if (device is None):
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
self.device = device
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = self.init_optimizer()
self.scheduler = self.init_scheduler()
def resize_to_target(self, prediction, target):
if (prediction.shape[2:] != target.shape[(- 2):]):
prediction = nn.functional.interpolate(prediction, size=target.shape[(- 2):], mode='bilinear', align_corners=True)
return prediction
def load_ckpt(self, checkpoint_dir='./checkpoints', ckpt_type='best'):
import glob
import os
from zoedepth.models.model_io import load_wts
if hasattr(self.config, 'checkpoint'):
checkpoint = self.config.checkpoint
elif hasattr(self.config, 'ckpt_pattern'):
pattern = self.config.ckpt_pattern
matches = glob.glob(os.path.join(checkpoint_dir, f'*{pattern}*{ckpt_type}*'))
if (not (len(matches) > 0)):
raise ValueError(f'No matches found for the pattern {pattern}')
checkpoint = matches[0]
else:
return
model = load_wts(self.model, checkpoint)
print('Loaded weights from {0}'.format(checkpoint))
warnings.warn('Resuming training is not properly supported in this repo. Implement loading / saving of optimizer and scheduler to support it.')
self.model = model
def init_optimizer(self):
m = (self.model.module if self.config.multigpu else self.model)
if self.config.same_lr:
print('Using same LR')
if hasattr(m, 'core'):
m.core.unfreeze()
params = self.model.parameters()
else:
print('Using diff LR')
if (not hasattr(m, 'get_lr_params')):
raise NotImplementedError(f'Model {m.__class__.__name__} does not implement get_lr_params. Please implement it or use the same LR for all parameters.')
params = m.get_lr_params(self.config.lr)
return optim.AdamW(params, lr=self.config.lr, weight_decay=self.config.wd)
def init_scheduler(self):
lrs = [l['lr'] for l in self.optimizer.param_groups]
return optim.lr_scheduler.OneCycleLR(self.optimizer, lrs, epochs=self.config.epochs, steps_per_epoch=len(self.train_loader), cycle_momentum=self.config.cycle_momentum, base_momentum=0.85, max_momentum=0.95, div_factor=self.config.div_factor, final_div_factor=self.config.final_div_factor, pct_start=self.config.pct_start, three_phase=self.config.three_phase)
def train_on_batch(self, batch, train_step):
raise NotImplementedError
def validate_on_batch(self, batch, val_step):
raise NotImplementedError
def raise_if_nan(self, losses):
for (key, value) in losses.items():
if torch.isnan(value):
raise ValueError(f'{key} is NaN, Stopping training')
def iters_per_epoch(self):
return len(self.train_loader)
def total_iters(self):
return (self.config.epochs * self.iters_per_epoch)
def should_early_stop(self):
if (self.config.get('early_stop', False) and (self.step > self.config.early_stop)):
return True
def train(self):
print(f'Training {self.config.name}')
if (self.config.uid is None):
self.config.uid = str(uuid.uuid4()).split('-')[(- 1)]
run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}"
self.config.run_id = run_id
self.config.experiment_id = f'{self.config.name}{self.config.version_name}_{run_id}'
self.should_write = ((not self.config.distributed) or (self.config.rank == 0))
self.should_log = self.should_write
if self.should_log:
tags = (self.config.tags.split(',') if (self.config.tags != '') else None)
wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root, tags=tags, notes=self.config.notes, settings=wandb.Settings(start_method='fork'))
self.model.train()
self.step = 0
best_loss = np.inf
validate_every = int((self.config.validate_every * self.iters_per_epoch))
if self.config.prefetch:
for (i, batch) in (tqdm(enumerate(self.train_loader), desc=f'Prefetching...', total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader)):
pass
losses = {}
def stringify_losses(L):
return '; '.join(map((lambda kv: f'{colors.fg.purple}{kv[0]}{colors.reset}: {round(kv[1].item(), 3):.4e}'), L.items()))
for epoch in range(self.config.epochs):
if self.should_early_stop():
break
self.epoch = epoch
if self.should_log:
wandb.log({'Epoch': epoch}, step=self.step)
pbar = (tqdm(enumerate(self.train_loader), desc=f'Epoch: {(epoch + 1)}/{self.config.epochs}. Loop: Train', total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader))
for (i, batch) in pbar:
if self.should_early_stop():
print('Early stopping')
break
losses = self.train_on_batch(batch, i)
self.raise_if_nan(losses)
if (is_rank_zero(self.config) and self.config.print_losses):
pbar.set_description(f'Epoch: {(epoch + 1)}/{self.config.epochs}. Loop: Train. Losses: {stringify_losses(losses)}')
self.scheduler.step()
if (self.should_log and ((self.step % 50) == 0)):
wandb.log({f'Train/{name}': loss.item() for (name, loss) in losses.items()}, step=self.step)
self.step += 1
if self.test_loader:
if ((self.step % validate_every) == 0):
self.model.eval()
if self.should_write:
self.save_checkpoint(f'{self.config.experiment_id}_latest.pt')
(metrics, test_losses) = self.validate()
if self.should_log:
wandb.log({f'Test/{name}': tloss for (name, tloss) in test_losses.items()}, step=self.step)
wandb.log({f'Metrics/{k}': v for (k, v) in metrics.items()}, step=self.step)
if ((metrics[self.metric_criterion] < best_loss) and self.should_write):
self.save_checkpoint(f'{self.config.experiment_id}_best.pt')
best_loss = metrics[self.metric_criterion]
self.model.train()
if self.config.distributed:
dist.barrier()
self.step += 1
self.model.eval()
self.save_checkpoint(f'{self.config.experiment_id}_latest.pt')
if self.test_loader:
(metrics, test_losses) = self.validate()
if self.should_log:
wandb.log({f'Test/{name}': tloss for (name, tloss) in test_losses.items()}, step=self.step)
wandb.log({f'Metrics/{k}': v for (k, v) in metrics.items()}, step=self.step)
if ((metrics[self.metric_criterion] < best_loss) and self.should_write):
self.save_checkpoint(f'{self.config.experiment_id}_best.pt')
best_loss = metrics[self.metric_criterion]
self.model.train()
def validate(self):
with torch.no_grad():
losses_avg = RunningAverageDict()
metrics_avg = RunningAverageDict()
for (i, batch) in tqdm(enumerate(self.test_loader), desc=f'Epoch: {(self.epoch + 1)}/{self.config.epochs}. Loop: Validation', total=len(self.test_loader), disable=(not is_rank_zero(self.config))):
(metrics, losses) = self.validate_on_batch(batch, val_step=i)
if losses:
losses_avg.update(losses)
if metrics:
metrics_avg.update(metrics)
return (metrics_avg.get_value(), losses_avg.get_value())
def save_checkpoint(self, filename):
if (not self.should_write):
return
root = self.config.save_dir
if (not os.path.isdir(root)):
os.makedirs(root)
fpath = os.path.join(root, filename)
m = (self.model.module if self.config.multigpu else self.model)
torch.save({'model': m.state_dict(), 'optimizer': None, 'epoch': self.epoch}, fpath)
def log_images(self, rgb: Dict[(str, list)]={}, depth: Dict[(str, list)]={}, scalar_field: Dict[(str, list)]={}, prefix='', scalar_cmap='jet', min_depth=None, max_depth=None):
if (not self.should_log):
return
if (min_depth is None):
try:
min_depth = self.config.min_depth
max_depth = self.config.max_depth
except AttributeError:
min_depth = None
max_depth = None
depth = {k: colorize(v, vmin=min_depth, vmax=max_depth) for (k, v) in depth.items()}
scalar_field = {k: colorize(v, vmin=None, vmax=None, cmap=scalar_cmap) for (k, v) in scalar_field.items()}
images = {**rgb, **depth, **scalar_field}
wimages = {(prefix + 'Predictions'): [wandb.Image(v, caption=k) for (k, v) in images.items()]}
wandb.log(wimages, step=self.step)
def log_line_plot(self, data):
if (not self.should_log):
return
plt.plot(data)
plt.ylabel('Scale factors')
wandb.log({'Scale factors': wandb.Image(plt)}, step=self.step)
plt.close()
def log_bar_plot(self, title, labels, values):
if (not self.should_log):
return
data = [[label, val] for (label, val) in zip(labels, values)]
table = wandb.Table(data=data, columns=['label', 'value'])
wandb.log({title: wandb.plot.bar(table, 'label', 'value', title=title)}, step=self.step) |
class Net(nn.Module):
def __init__(self, in_count, out_count):
super(Net, self).__init__()
self.fc1 = nn.Linear(in_count, 50)
self.fc2 = nn.Linear(50, 25)
self.fc3 = nn.Linear(25, out_count)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = self.fc3(x)
return self.softmax(x) |
def side_branch(x, nc, factor, initializer=tf.random_normal_initializer(0, 0.02), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.0001), bias_regularizer=tf.contrib.layers.l2_regularizer(0.0001)):
y = tf.layers.conv2d(x, nc, kernel_size=1, strides=(1, 1), padding='same', kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer)
return tf.layers.conv2d_transpose(y, nc, kernel_size=(2 * factor), strides=(factor, factor), padding='same', kernel_initializer=initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer) |
def row_to_dict(schema, row):
row_dict = {}
for (k, field) in schema.items():
if (field.feature_type == FeatureType.IMAGE):
row_dict[k] = row[k]
elif (field.feature_type == FeatureType.NDARRAY):
row_dict[k] = decode_ndarray(row[k])
else:
row_dict[k] = row[k]
return row_dict |
def taskonomy_features_transform_collated(task_path, encoder_type='taskonomy', dtype=np.float32):
_rescale_thunk = rescale_centercrop_resize((3, 256, 256))
_pixels_as_state_thunk = pixels_as_state((8, 16, 16))
if ((task_path != 'pixels_as_state') and (task_path != 'blind')):
if (encoder_type == 'taskonomy'):
net = TaskonomyEncoder(normalize_outputs=False)
if (task_path != 'None'):
checkpoint = torch.load(task_path)
if any([isinstance(v, nn.Module) for v in checkpoint.values()]):
net = [v for v in checkpoint.values() if isinstance(v, nn.Module)][0]
elif ('state_dict' in checkpoint.keys()):
net.load_state_dict(checkpoint['state_dict'])
else:
assert False, f'Cannot read task_path {task_path}, no nn.Module or state_dict found. Encoder_type is {encoder_type}'
net = net.cuda()
net.eval()
def encode(x):
if ((task_path == 'pixels_as_state') or (task_path == 'blind')):
return x
with torch.no_grad():
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
(rescale, _) = _rescale_thunk(obs_space)
(pixels_as_state, _) = _pixels_as_state_thunk(obs_space)
def pipeline(x):
with torch.no_grad():
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
x = encode(x)
return x
def pixels_as_state_pipeline(x):
return pixels_as_state(x).cpu()
def blind_pipeline(x):
batch_size = x.shape[0]
return torch.zeros((batch_size, 8, 16, 16))
if (task_path == 'blind'):
return (blind_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
elif (task_path == 'pixels_as_state'):
return (pixels_as_state_pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
else:
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk |
def robosuite_action_adjustment(robosuite_env, verbose=False):
if verbose:
action_space = robosuite_env.action_space
high = action_space.high
same_high = np.all((high == high[0]))
low = action_space.low
same_low = np.all((low == low[0]))
shape = action_space.shape[0]
print('RoboSuite Action Space Report:')
if (same_high and same_low):
print(f'Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}')
else:
print(f'Non-uniform Bounded Action Space with elements = {zip(low, high)}')
print('\nAttempting to normalize action space using dc.envs.Normalize...\n')
env = NormalizeContinuousActionSpace(robosuite_env)
if verbose:
action_space = env.action_space
high = action_space.high
same_high = np.all((high == high[0]))
low = action_space.low
same_low = np.all((low == low[0]))
shape = action_space.shape[0]
print('Normalized RoboSuite Action Space Report:')
if (same_high and same_low):
print(f'Uniformly Bounded Action Space in [{low[0]}, {high[0]}]^{shape}')
else:
print(f'Non-uniform Bounded Action Space with elements = {zip(low, high)}')
return env |
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average, normalize=True) |
def test_statcast_batter_exitvelo_barrels() -> None:
min_bbe = 250
result: pd.DataFrame = statcast_batter_exitvelo_barrels(2019, min_bbe)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == 18)
assert (len(result) > 0)
assert (len(result[(result['attempts'] < min_bbe)]) == 0) |
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 1
self.depth = 1.33
self.width = 1.25
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split('.')[0]
self.train_ann = 'train.json'
self.val_ann = 'val_half.json'
self.input_size = (800, 1440)
self.test_size = (800, 1440)
self.random_size = (18, 32)
self.max_epoch = 80
self.print_interval = 20
self.eval_interval = 5
self.test_conf = 0.1
self.nmsthre = 0.7
self.no_aug_epochs = 10
self.basic_lr_per_img = (0.001 / 64.0)
self.warmup_epochs = 1
def get_data_loader(self, batch_size, is_distributed, no_aug=False):
from yolox.data import MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection
dataset = MOTDataset(data_dir=os.path.join(get_yolox_datadir(), 'ch_all'), json_file=self.train_ann, name='', img_size=self.input_size, preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500))
dataset = MosaicDetection(dataset, mosaic=(not no_aug), img_size=self.input_size, preproc=TrainTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup)
self.dataset = dataset
if is_distributed:
batch_size = (batch_size // dist.get_world_size())
sampler = InfiniteSampler(len(self.dataset), seed=(self.seed if self.seed else 0))
batch_sampler = YoloBatchSampler(sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=(not no_aug))
dataloader_kwargs = {'num_workers': self.data_num_workers, 'pin_memory': True}
dataloader_kwargs['batch_sampler'] = batch_sampler
train_loader = DataLoader(self.dataset, **dataloader_kwargs)
return train_loader
def get_eval_loader(self, batch_size, is_distributed, testdev=False):
from yolox.data import MOTDataset, ValTransform
valdataset = MOTDataset(data_dir=os.path.join(get_yolox_datadir(), 'mot'), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=ValTransform(rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)))
if is_distributed:
batch_size = (batch_size // dist.get_world_size())
sampler = torch.utils.data.distributed.DistributedSampler(valdataset, shuffle=False)
else:
sampler = torch.utils.data.SequentialSampler(valdataset)
dataloader_kwargs = {'num_workers': self.data_num_workers, 'pin_memory': True, 'sampler': sampler}
dataloader_kwargs['batch_size'] = batch_size
val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
return val_loader
def get_evaluator(self, batch_size, is_distributed, testdev=False):
from yolox.evaluators import COCOEvaluator
val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
evaluator = COCOEvaluator(dataloader=val_loader, img_size=self.test_size, confthre=self.test_conf, nmsthre=self.nmsthre, num_classes=self.num_classes, testdev=testdev)
return evaluator |
def construct_exp_name(arg_dict: dict):
focus_item = OrderedDict({'input_size': 's', 'batch_size': 'bs', 'epoch_num': 'e', 'warmup_epoch': 'we', 'use_amp': 'amp', 'lr': 'lr', 'lr_type': 'lt', 'optim': 'ot', 'use_aux_loss': 'al', 'use_bigt': 'bi', 'size_list': 'ms', 'info': 'info'})
exp_name = f"{arg_dict['model']}"
for (k, v) in focus_item.items():
item = arg_dict[k]
if isinstance(item, bool):
item = ('Y' if item else 'N')
elif isinstance(item, (list, tuple)):
item = ('Y' if item else 'N')
elif isinstance(item, str):
if (not item):
continue
if ('_' in item):
item = item.replace('_', '')
elif (item == None):
item = 'N'
if isinstance(item, str):
item = item.lower()
exp_name += f'_{v.upper()}{item}'
return exp_name |
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth) |
def trans_conv(dim=2):
if (dim == 2):
return nn.ConvTranspose2d
return nn.ConvTranspose3d |
def inquire_confirm(msg):
return prompt([{'type': 'confirm', 'message': (msg + ' Confirm?'), 'name': 'confirm', 'default': True}], style=custom_style_2)['confirm'] |
def pretrain():
T = Trainer()
T.task = 'debug'
T.note = f'debug'
T.batch = 64
T.epochs = 800
T.warmup_epochs = 40
T.input_size = 224
T.accum_iter = 16
T.device = '0,1,2,3'
T.dataset = 'ImageNet-LT'
T.model = f'mae_vit_base_patch16'
T.mask_ratio = 0.75
T.blr = 0.00015
T.weight_decay = 0.05
T.num_workers = 16
T.pretrain() |
def train(data, datadir, model, num_cls, outdir='', num_epoch=100, batch=128, lr=0.0001, betas=(0.9, 0.999), weight_decay=0):
if torch.cuda.is_available():
kwargs = {'num_workers': 1, 'pin_memory': True}
else:
kwargs = {}
net = get_model(model, num_cls=num_cls)
print('-------Training net')
print(net)
train_data = load_data(data, 'train', batch=batch, rootdir=datadir, num_channels=net.num_channels, image_size=net.image_size, download=True, kwargs=kwargs)
test_data = load_data(data, 'test', batch=batch, rootdir=datadir, num_channels=net.num_channels, image_size=net.image_size, download=True, kwargs=kwargs)
opt_net = optim.Adam(net.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
print('Training {} model for {}'.format(model, data))
for epoch in range(num_epoch):
train_epoch(train_data, net, opt_net, epoch)
if (test_data is not None):
print('Evaluating {}-{} model on {} test set'.format(model, data, data))
test(test_data, net)
os.makedirs(outdir, exist_ok=True)
outfile = join(outdir, '{:s}_net_{:s}.pth'.format(model, data))
print('Saving to', outfile)
net.save(outfile)
return net |
def texture(tex, uv, uv_da=None, filter_mode='auto', boundary_mode='wrap', tex_const=False, max_mip_level=None):
assert ((tex_const is True) or (tex_const is False))
if (filter_mode == 'auto'):
filter_mode = ('linear-mipmap-linear' if (uv_da is not None) else 'linear')
tex_const = (tex_const or _is_constant(tex, np.float32))
tex_const = (1 if tex_const else 0)
if (max_mip_level is None):
max_mip_level = (- 1)
else:
max_mip_level = int(max_mip_level)
assert (max_mip_level >= 0)
tex = tf.convert_to_tensor(tex, dtype=tf.float32)
uv = tf.convert_to_tensor(uv, dtype=tf.float32)
if ('mipmap' in filter_mode):
uv_da = tf.convert_to_tensor(uv_da, dtype=tf.float32)
out_shape = [None, None, None, None]
if (uv.shape.rank is not None):
assert (uv.shape.rank == 4)
out_shape = [uv.shape[0].value, uv.shape[1].value, uv.shape[2].value, None]
if (tex.shape.rank is not None):
assert (tex.shape.rank == (5 if (boundary_mode == 'cube') else 4))
out_shape[(- 1)] = tex.shape[(- 1)].value
if ((max_mip_level == 0) and (filter_mode in ['linear-mipmap-nearest', 'linear-mipmap-linear'])):
filter_mode = 'linear'
filter_mode_dict = {'nearest': 0, 'linear': 1, 'linear-mipmap-nearest': 2, 'linear-mipmap-linear': 3}
filter_mode_enum = filter_mode_dict[filter_mode]
boundary_mode_dict = {'cube': 0, 'wrap': 1, 'clamp': 2, 'zero': 3}
boundary_mode_enum = boundary_mode_dict[boundary_mode]
_gradient
def func_linear_mipmap_linear(tex, uv, uv_da):
(out, mip) = _get_plugin().texture_fwd_mip(tex, uv, uv_da, filter_mode_enum, boundary_mode_enum, tex_const, max_mip_level)
out.set_shape(out_shape)
def grad(dy):
return _get_plugin().texture_grad_linear_mipmap_linear(tex, uv, dy, uv_da, mip, filter_mode_enum, boundary_mode_enum, max_mip_level)
return (out, grad)
_gradient
def func_linear_mipmap_nearest(tex, uv):
(out, mip) = _get_plugin().texture_fwd_mip(tex, uv, uv_da, filter_mode_enum, boundary_mode_enum, tex_const, max_mip_level)
out.set_shape(out_shape)
def grad(dy):
return _get_plugin().texture_grad_linear_mipmap_nearest(tex, uv, dy, uv_da, mip, filter_mode_enum, boundary_mode_enum, max_mip_level)
return (out, grad)
_gradient
def func_linear(tex, uv):
out = _get_plugin().texture_fwd(tex, uv, filter_mode_enum, boundary_mode_enum)
out.set_shape(out_shape)
def grad(dy):
return _get_plugin().texture_grad_linear(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
return (out, grad)
_gradient
def func_nearest(tex):
out = _get_plugin().texture_fwd(tex, uv, filter_mode_enum, boundary_mode_enum)
out.set_shape(out_shape)
def grad(dy):
return _get_plugin().texture_grad_nearest(tex, uv, dy, filter_mode_enum, boundary_mode_enum)
return (out, grad)
if (filter_mode == 'linear-mipmap-linear'):
return func_linear_mipmap_linear(tex, uv, uv_da)
elif (filter_mode == 'linear-mipmap-nearest'):
return func_linear_mipmap_nearest(tex, uv)
elif (filter_mode == 'linear'):
return func_linear(tex, uv)
elif (filter_mode == 'nearest'):
return func_nearest(tex) |
_sentencepiece
class M2M100TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = M2M100Tokenizer
test_rust_tokenizer = False
test_seq2seq = False
test_sentencepiece = True
def setUp(self):
super().setUp()
vocab = ['</s>', '<unk>', 'This', 'is', 'a', 't', 'est', 'G', '<pad>']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
save_dir = Path(self.tmpdirname)
save_json(vocab_tokens, (save_dir / VOCAB_FILES_NAMES['vocab_file']))
if (not (save_dir / VOCAB_FILES_NAMES['spm_file']).exists()):
copyfile(SAMPLE_SP, (save_dir / VOCAB_FILES_NAMES['spm_file']))
tokenizer = M2M100Tokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return M2M100Tokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
return ('This is a test', 'This is a test')
def test_convert_token_and_id(self):
token = '</s>'
token_id = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '</s>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[(- 1)], '<s>')
self.assertEqual(len(vocab_keys), 10)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 117)
('Skip this test while all models are still to be uploaded.')
def test_pretrained_model_lists(self):
pass
def test_full_tokenizer(self):
tokenizer = self.get_tokenizer()
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [2, 3, 4, 5, 6])
back_tokens = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(back_tokens, ['This', 'is', 'a', 't', 'est'])
text = tokenizer.convert_tokens_to_string(tokens)
self.assertEqual(text, 'This is a test')
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='facebook/m2m100_418M', revision='c168bae485c864188cf9aa0e4108b0b6934dc91e') |
class _Open3DArgumentParser(argparse.ArgumentParser):
def error(self, message):
print(f'''Error: {message}
''', file=sys.stderr)
self.exit(2) |
def _get_filenames_with_labels(mode, data_dir, split_dir):
if (mode == 'train'):
scenario_list_file = os.path.join(split_dir, 'train.txt')
elif (mode == 'eval'):
scenario_list_file = os.path.join(split_dir, 'eval.txt')
elif (mode == 'test'):
scenario_list_file = os.path.join(split_dir, 'test.txt')
else:
raise ValueError(('Mode %s is not supported!' % mode))
with open(scenario_list_file) as f:
scenario_list = f.read().split('\n')
scenario_list.pop()
filenames = []
labels = []
for (i, scenario) in enumerate(scenario_list):
if (((i + 1) % 100) == 0):
print(('%s / %s : %s' % ((i + 1), len(scenario_list), scenario)))
scenario_dir = os.path.join(data_dir, 'recordings', scenario)
if (('vcom=0' in scenario) and ('vpsf=0' in scenario)):
label = 0.0
else:
label = 1.0
for img_file in filter((lambda f: (f.startswith('rgb-') and f.endswith('-mono-0.png'))), os.listdir(scenario_dir)):
filenames.append(os.path.join(scenario_dir, img_file))
labels.append(label)
return (filenames, labels) |
def loss_ISD(x, y):
y = (y + 1e-10)
ret = torch.sum((((x / y) - torch.log((x / y))) - 1))
return ret |
def assert_dict_equal(source, target):
assert (len(target) == len(source))
for (k, v) in target.items():
assert (v == source[k]) |
def pixel_deflection_without_map(img, deflections, window):
img = np.copy(img)
(H, W, C) = img.shape
while (deflections > 0):
for c in range(C):
(x, y) = (randint(0, (H - 1)), randint(0, (W - 1)))
while True:
(a, b) = (randint(((- 1) * window), window), randint(((- 1) * window), window))
if (((x + a) < H) and ((x + a) > 0) and ((y + b) < W) and ((y + b) > 0)):
break
img[(x, y, c)] = img[((x + a), (y + b), c)]
deflections -= 1
return img |
_module()
class SingleStageInstanceSegmentor(BaseDetector):
def __init__(self, backbone: ConfigType, neck: OptConfigType=None, bbox_head: OptConfigType=None, mask_head: OptConfigType=None, train_cfg: OptConfigType=None, test_cfg: OptConfigType=None, data_preprocessor: OptConfigType=None, init_cfg: OptMultiConfig=None) -> None:
super().__init__(data_preprocessor=data_preprocessor, init_cfg=init_cfg)
self.backbone = MODELS.build(backbone)
if (neck is not None):
self.neck = MODELS.build(neck)
else:
self.neck = None
if (bbox_head is not None):
bbox_head.update(train_cfg=copy.deepcopy(train_cfg))
bbox_head.update(test_cfg=copy.deepcopy(test_cfg))
self.bbox_head = MODELS.build(bbox_head)
else:
self.bbox_head = None
assert mask_head, f'`mask_head` must be implemented in {self.__class__.__name__}'
mask_head.update(train_cfg=copy.deepcopy(train_cfg))
mask_head.update(test_cfg=copy.deepcopy(test_cfg))
self.mask_head = MODELS.build(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, batch_inputs: Tensor) -> Tuple[Tensor]:
x = self.backbone(batch_inputs)
if self.with_neck:
x = self.neck(x)
return x
def _forward(self, batch_inputs: Tensor, batch_data_samples: OptSampleList=None, **kwargs) -> tuple:
outs = ()
x = self.extract_feat(batch_inputs)
positive_infos = None
if self.with_bbox:
assert (batch_data_samples is not None)
bbox_outs = self.bbox_head.forward(x)
outs = (outs + (bbox_outs,))
_ = self.bbox_head.loss(x, batch_data_samples, **kwargs)
positive_infos = self.bbox_head.get_positive_infos()
if (positive_infos is None):
mask_outs = self.mask_head.forward(x)
else:
mask_outs = self.mask_head.forward(x, positive_infos)
outs = (outs + (mask_outs,))
return outs
def loss(self, batch_inputs: Tensor, batch_data_samples: SampleList, **kwargs) -> dict:
x = self.extract_feat(batch_inputs)
losses = dict()
positive_infos = None
if self.with_bbox:
bbox_losses = self.bbox_head.loss(x, batch_data_samples, **kwargs)
losses.update(bbox_losses)
positive_infos = self.bbox_head.get_positive_infos()
mask_loss = self.mask_head.loss(x, batch_data_samples, positive_infos=positive_infos, **kwargs)
assert (not (set(mask_loss.keys()) & set(losses.keys())))
losses.update(mask_loss)
return losses
def predict(self, batch_inputs: Tensor, batch_data_samples: SampleList, rescale: bool=True, **kwargs) -> SampleList:
x = self.extract_feat(batch_inputs)
if self.with_bbox:
bbox_rescale = (rescale if (not self.with_mask) else False)
results_list = self.bbox_head.predict(x, batch_data_samples, rescale=bbox_rescale)
else:
results_list = None
results_list = self.mask_head.predict(x, batch_data_samples, rescale=rescale, results_list=results_list)
batch_data_samples = self.add_pred_to_datasample(batch_data_samples, results_list)
return batch_data_samples |
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear', align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if (normalization == 'batchnorm'):
ops.append(nn.BatchNorm3d(n_filters_out))
elif (normalization == 'groupnorm'):
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif (normalization == 'instancenorm'):
ops.append(nn.InstanceNorm3d(n_filters_out))
elif (normalization != 'none'):
assert False
ops.append(nn.ReLU(inplace=False))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x |
class FWMRNN(nn.Module):
def __init__(self, isize, hsize, withFWM, params, wdrop=0.5):
super().__init__()
s_size = params['s_size']
r_size = params['r_size']
t_size = params['t_size']
self.rnn = nn.LSTM(isize, hsize, 1, dropout=0)
if withFWM:
self.fwm = FWM(hsize, s_size, r_size, t_size)
self.linear = nn.Linear(t_size, hsize)
self.isize = isize
self.hsize = hsize
self.hasFWM = withFWM
self.rnn = WeightDrop(self.rnn, ['weight_hh_l0'], dropout=wdrop)
def reset(self):
pass
def forward(self, inputs, hidden):
(lstm_hidden, F) = hidden
(x, lstm_hidden) = self.rnn(inputs, lstm_hidden)
outputs = []
if self.hasFWM:
for (t, x_t) in enumerate(x):
F = self.fwm.write(x_t, F)
o_t = self.fwm(x_t, F)
outputs.append(o_t)
s = torch.stack(outputs, dim=0)
output = (x + self.linear(s))
else:
output = x
hidden = (lstm_hidden, F)
return (output, hidden) |
def getList():
btenvs = [('- ' + spec.id) for spec in gym.envs.registry.all() if (spec.id.find('Bullet') >= 0)]
return btenvs |
def as_tensor(data, dtype=None):
if isinstance(data, torch.Tensor):
if ((dtype is None) or (data.dtype == dtype)):
return data
return data.type(dtype=dtype)
return torch.as_tensor(data, dtype=dtype) |
class TestWeightSharingAcc(unittest.TestCase):
def setUpClass(self):
self.skipTest(self, 'currently not support Unit Test for dispatcher, but this function is supported. Will improve Unit Test very soon.')
code = '\nimport time\nimport math\nimport os\nimport sys\nimport numpy as np\nfrom transformers import AutoTokenizer\nfrom datasets import load_from_disk, load_metric,load_dataset\nfrom intel_extension_for_transformers.llm.runtime.deprecated.compile.graph import Graph\n\n\nclass MRPCDataSet():\n def __init__(self, batch_size, data_dir, tokenizer_dir):\n self.batch_size = batch_size\n dataset = load_dataset(\'glue\',\'mrpc\',cache_dir=data_dir,split=\'validation\')\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)\n self.dataset = dataset.map(lambda e: tokenizer(e[\'sentence1\'], e[\'sentence2\'],\n truncation=False, padding=\'do_not_pad\'), batched=True)\n\n def __getitem__(self, idx):\n start = idx * self.batch_size\n end = start + self.batch_size\n if end > len(self.dataset):\n input_ids_data = self.dataset[start:][\'input_ids\']\n segment_ids_data = self.dataset[start:][\'token_type_ids\']\n input_mask_data = self.dataset[start:][\'attention_mask\']\n label_data = self.dataset[start:][\'label\']\n else:\n input_ids_data = self.dataset[start:end][\'input_ids\']\n segment_ids_data = self.dataset[start:end][\'token_type_ids\']\n input_mask_data = self.dataset[start:end][\'attention_mask\']\n label_data = self.dataset[start:end][\'label\']\n\n sample_size = len(input_ids_data) if isinstance(input_ids_data, list) else 1\n\n return [np.array(input_ids_data).reshape(sample_size, -1).astype(\'int32\'),\n np.array(segment_ids_data).reshape(sample_size, -1).astype(\'int32\'),\n np.array(input_mask_data).reshape(sample_size, -1).astype(\'int32\')], np.array(label_data).reshape(sample_size, -1).astype(\'int32\')\n\n def __len__(self):\n return math.ceil(len(self.dataset)/self.batch_size)\n\ndef load_model(engine_model_path):\n model = Graph()\n model.graph_init(os.path.join(engine_model_path, "conf.yaml"),\n os.path.join(engine_model_path, "model.bin"), load_weight=True)\n return model\n\ndef run():\n os.environ[\'GLOG_minloglevel\'] = \'2\'\n # cycle buffer\n if os.environ.get(\'DIRECT_BUFFER\'):\n del os.environ[\'DIRECT_BUFFER\']\n if os.environ.get(\'UNIFIED_BUFFER\'):\n del os.environ[\'UNIFIED_BUFFER\']\n data_path = "/home/tensorflow/.cache/nlp_toolkit/bert_mini_mrpc"\n model_path = "/tf_dataset2/models/nlp_toolkit/bert_mini_mrpc"\n dataset = MRPCDataSet(1, data_path, model_path)\n model = load_model("ir")\n metric = load_metric(\'glue\', \'mrpc\')\n log_path = sys.argv[1]\n for idx in range(len(dataset)):\n inputs = dataset[idx][0]\n labels = dataset[idx][1]\n predictions = model.inference(inputs)\n predictions = list(predictions.values())[0]\n predictions = np.argmax(predictions, axis=1)\n metric.add_batch(\n predictions=predictions,\n references=labels,\n )\n eval_metric = metric.compute()\n acc = eval_metric.get("accuracy")\n with open(log_path, \'w\') as f:\n f.write(format(acc, \'.4f\'))\n\nif __name__ == "__main__":\n run()\n\n'
with open('run.py', 'w', encoding='utf-8') as f:
f.write(code)
model_path = '/tf_dataset2/models/nlp_toolkit/bert_mini_mrpc'
torch_model = BertForSequenceClassification.from_pretrained(model_path)
with torch.no_grad():
inputs = {'input_ids': torch.ones(1, 128, dtype=torch.int32), 'attention_mask': torch.ones(1, 128, dtype=torch.int32), 'token_type_ids': torch.ones(1, 128, dtype=torch.int32)}
outputs = torch_model(**inputs)
symbolic_names = {0: 'batch_size', 1: 'max_seq_len'}
torch.onnx.export(torch_model, (inputs['input_ids'], inputs['attention_mask'], inputs['token_type_ids']), 'onnx_fp32.onnx', opset_version=11, do_constant_folding=True, input_names=['input_ids', 'input_mask', 'segment_ids'], output_names=['output'], dynamic_axes={'input_ids': symbolic_names, 'input_mask': symbolic_names, 'segment_ids': symbolic_names})
graph = compile('onnx_fp32.onnx')
graph.save()
def tearDownClass(self):
os.remove('run.py')
os.remove('onnx_fp32.onnx')
shutil.rmtree('./ir', ignore_errors=True)
for i in range(7):
try:
os.remove((('log' + str(i)) + '_ws0.txt'))
os.remove((('log' + str(i)) + '_ws1.txt'))
except:
continue
def test_weight_sharing_acc(self):
cmd = 'numactl -l -C 0-3 python run.py log0_ws0.txt & numactl -l -C 4-7 python run.py log1_ws0.txt & numactl -l -C 8-11 python run.py log2_ws0.txt & numactl -l -C 12-15 python run.py log3_ws0.txt &numactl -l -C 16-19 python run.py log4_ws0.txt &numactl -l -C 20-23 python run.py log5_ws0.txt &numactl -l -C 24-27 python run.py log6_ws0.txt'
os.environ['WEIGHT_SHARING'] = '1'
os.environ['INST_NUM'] = '7'
os.environ['GLOG_minloglevel'] = '2'
if os.environ.get('DIRECT_BUFFER'):
del os.environ['DIRECT_BUFFER']
if os.environ.get('UNIFIED_BUFFER'):
del os.environ['UNIFIED_BUFFER']
process = subprocess.Popen(cmd, shell=True)
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
for i in range(7):
log_exist = os.path.exists((('log' + str(i)) + '_ws0.txt'))
time_exit = 0
while (not log_exist):
time.sleep(1)
time_exit += 1
log_exist = os.path.exists((('log' + str(i)) + '_ws0.txt'))
if (time_exit >= 600):
break
acc_on = []
for i in range(7):
with open((('log' + str(i)) + '_ws0.txt'), 'r') as f:
acc_on.append(float(f.readline().strip()))
del os.environ['WEIGHT_SHARING']
cmd = 'numactl -l -C 0-3 python run.py log0_ws1.txt & numactl -l -C 4-7 python run.py log1_ws1.txt & numactl -l -C 8-11 python run.py log2_ws1.txt & numactl -l -C 12-15 python run.py log3_ws1.txt &numactl -l -C 16-19 python run.py log4_ws1.txt &numactl -l -C 20-23 python run.py log5_ws1.txt &numactl -l -C 24-27 python run.py log6_ws1.txt'
process = subprocess.Popen(cmd, shell=True)
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
for i in range(7):
log_exist = os.path.exists((('log' + str(i)) + '_ws1.txt'))
time_exit = 0
while (not log_exist):
time.sleep(1)
time_exit += 1
log_exist = os.path.exists((('log' + str(i)) + '_ws1.txt'))
if (time_exit >= 600):
break
acc_off = []
for i in range(7):
with open((('log' + str(i)) + '_ws1.txt'), 'r') as f:
acc_off.append(float(f.readline().strip()))
self.assertListEqual(acc_on, acc_off) |
class LinearClassifier(nn.Module):
def __init__(self, name='cnn6', num_classes=4, device='cpu'):
super(LinearClassifier, self).__init__()
(_, feat_dim) = model_dict[name]
self.fc = nn.Linear(feat_dim, num_classes).to(device)
def forward(self, features):
return self.fc(features) |
class TestClipGradNorm(unittest.TestCase):
(((not torch.cuda.is_available()) or (torch.cuda.device_count() < 2)), 'No gpu available for cuda tests')
def test_fsdp_strategy_clip_grad_norm(self):
world_size = 2
mp.spawn(_fsdp_strategy_clip_grad_norm, args=(world_size, find_free_port()), nprocs=world_size, join=True, daemon=False, start_method='spawn')
(((not torch.cuda.is_available()) or (torch.cuda.device_count() < 2)), 'No gpu available for cuda tests')
def test_tp_strategy_clip_grad_norm(self):
world_size = 2
mp.spawn(_tp_strategy_clip_grad_norm, args=(world_size, find_free_port()), nprocs=world_size, join=True, daemon=False, start_method='spawn') |
_experiment
def categorical_cnn_policy(ctxt, env_id, seed):
deterministic.set_seed(seed)
with LocalTFRunner(ctxt, max_cpus=12) as runner:
env = GarageEnv(normalize(gym.make(env_id)))
policy = CategoricalCNNPolicy(env_spec=env.spec, conv_filters=hyper_params['conv_filters'], conv_strides=hyper_params['conv_strides'], conv_pad=hyper_params['conv_pad'], hidden_sizes=hyper_params['hidden_sizes'])
baseline = GaussianCNNBaseline(env_spec=env.spec, regressor_args=dict(filters=hyper_params['conv_filters'], strides=hyper_params['conv_strides'], padding=hyper_params['conv_pad'], hidden_sizes=hyper_params['hidden_sizes'], use_trust_region=hyper_params['use_trust_region']))
algo = PPO(env_spec=env.spec, policy=policy, baseline=baseline, max_path_length=100, discount=0.99, gae_lambda=0.95, lr_clip_range=0.2, policy_ent_coeff=0.0, optimizer_args=dict(batch_size=32, max_epochs=10, learning_rate=0.001), flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=hyper_params['n_epochs'], batch_size=hyper_params['batch_size']) |
def convert(path):
tg = textgrid.TextGrid.fromFile(path)
word_time = [tg[0][j].maxTime for j in range(len(tg[0]))]
word_text = [tg[0][j].mark for j in range(len(tg[0]))]
word_time = ','.join(map(str, word_time))
word_text = ','.join(word_text)
return (word_time, word_text) |
class TestGroupedBatchSampler(unittest.TestCase):
def test_missing_group_id(self):
sampler = SequentialSampler(list(range(100)))
group_ids = ([1] * 100)
s = GroupedBatchSampler(sampler, group_ids, 2)
for k in s:
self.assertEqual(len(k), 2)
def test_groups(self):
sampler = SequentialSampler(list(range(100)))
group_ids = ([1, 0] * 50)
s = GroupedBatchSampler(sampler, group_ids, 2)
for k in s:
self.assertTrue((((k[0] + k[1]) % 2) == 0)) |
class OrderedEasyDict(OrderedDict):
def __init__(self, d=None, **kwargs):
super(OrderedEasyDict, self).__init__()
if (d is None):
d = OrderedDict()
if kwargs:
d.update(**kwargs)
for (k, v) in d.items():
setattr(self, k, v)
for k in self.__class__.__dict__.keys():
if (not (k.startswith('__') and k.endswith('__'))):
setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if (name.startswith('_') and (name.endswith('__root') or name.endswith('__map'))):
super(OrderedEasyDict, self).__setattr__(name, value)
else:
if isinstance(value, (list, tuple)):
value = [(self.__class__(x) if isinstance(x, dict) else x) for x in value]
else:
value = (self.__class__(value) if isinstance(value, dict) else value)
super(OrderedEasyDict, self).__setattr__(name, value)
super(OrderedEasyDict, self).__setitem__(name, value)
__setitem__ = __setattr__ |
def gather_targets(roots, col_sent):
targets = []
exp_root_idxs = dict([(token.id, {}) for token in roots])
for token in col_sent:
if (len(token.scope) > 0):
for (idx, label) in token.scope:
if ((idx in exp_root_idxs) and ('targ' in label)):
exp_root_idxs[idx][token.id] = [token]
for token2 in col_sent:
if (len(token2.scope) > 0):
for (idx2, label2) in token2.scope:
if ((idx2 == token.id) and (token2 not in exp_root_idxs[idx][token.id])):
exp_root_idxs[idx][token.id].append(token2)
for (root_idx, target_group) in exp_root_idxs.items():
root_targets = []
for (target_idx, target_tokens) in target_group.items():
target_tokens = sort_tokens(target_tokens)
(char_offset, token_groups) = get_char_offsets(target_tokens)
tokens = []
char_offsets = []
for token_group in token_groups:
token_string = ''
for i in token_group:
token_string += (target_tokens[i].form + ' ')
tokens.append(token_string.strip())
for (bidx, eidx) in char_offset:
char_offsets.append('{0}:{1}'.format(bidx, eidx))
root_targets.append([tokens, char_offsets])
if (len(root_targets) > 0):
targets.append(root_targets)
else:
targets.append([[[], []]])
return targets |
class Graphviz(object):
def __init__(self):
self.internal_color = 'lavenderblush4'
self.colors = ['aquamarine', 'bisque', 'blue', 'blueviolet', 'brown', 'cadetblue', 'chartreuse', 'coral', 'cornflowerblue', 'crimson', 'darkgoldenrod', 'darkgreen', 'darkkhaki', 'darkmagenta', 'darkorange', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategrey', 'darkviolet', 'deepskyblue', 'dodgerblue', 'firebrick', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'gray', 'grey', 'green', 'greenyellow', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'maroon', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumturquoise', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'powderblue', 'purple', 'red', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'silver', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'teal', 'thistle', 'tomato', 'violet', 'wheat', 'burlywood', 'chocolate']
self.color_map = {}
self.color_counter = 0
def format_id(self, ID):
if (not ID.startswith('id')):
return ('id%s' % ID).replace('-', '').replace('#', '_HASH_').replace('.', '_DOT_')
else:
return ('%s' % ID).replace('-', '').replace('#', '_HASH_').replace('.', '_DOT_')
def clean_label(self, s):
return s.replace('[/:.]', '_')
def get_node_label(self, node):
lbl = []
lbl.append(self.format_id(node.id))
lbl.append('<BR/>')
lbl.append(('num pts: %d' % len(node.leaves())))
lbl.append('<BR/>')
try:
lbl.append(('purity: %f' % node.purity()))
except Exception:
pass
try:
lbl.append('<BR/>')
lbl.append(('across: %s' % node.best_across_debug))
except Exception:
pass
return ''.join(lbl)
def get_color(self, lbl):
if (lbl in self.color_map):
return self.color_map[lbl]
else:
self.color_map[lbl] = self.colors[self.color_counter]
self.color_counter = ((self.color_counter + 1) % len(self.colors))
return self.color_map[lbl]
def format_graphiz_node(self, node):
s = []
color = self.internal_color
try:
if (node.purity() == 1.0):
if (hasattr(node, 'pts') and (len(node.pts) > 0)):
w_gt = [x for x in node.pts if (x[1] and (x[1] != 'None'))]
if w_gt:
color = self.get_color(w_gt[0][1])
else:
color = self.get_color('None')
except Exception:
pass
shape = 'egg'
if (node.parent is None):
s.append(('\n%s[shape=%s;style=filled;color=%s;label=<%s<BR/>%s<BR/>>]' % (self.format_id(node.id), shape, color, self.get_node_label(node), color)))
s.append('\nROOTNODE[shape=star;style=filled;color=gold;label=<ROOT>]')
s.append(('\nROOTNODE->%s' % self.format_id(node.id)))
else:
leaf_m = ''
if (hasattr(node, 'pts') and node.pts and (len(node.pts) > 0)):
if hasattr(node.pts[0][0], 'mid'):
leaf_m = (('%s|%s' % (node.pts[0][0].mid, node.pts[0][0].gt)) if node.is_leaf() else '')
else:
leaf_m = (('%s|%s' % (node.pts[0][2], node.pts[0][1])) if node.is_leaf() else '')
s.append(('\n%s[shape=%s;style=filled;color=%s;label=<%s<BR/>%s<BR/>%s<BR/>>]' % (self.format_id(node.id), shape, color, self.get_node_label(node), color, leaf_m)))
s.append(('\n%s->%s' % (self.format_id(node.parent.id), self.format_id(node.id))))
return ''.join(s)
def graphviz_tree(self, root):
s = []
s.append('digraph TreeStructure {\n')
s.append(self.format_graphiz_node(root))
for d in root.descendants():
s.append(self.format_graphiz_node(d))
s.append('\n}')
return ''.join(s)
def write_tree(filename, root):
gv = Graphviz()
tree = gv.graphviz_tree(root)
with open(filename, 'w') as fout:
fout.write(tree) |
_param('conf', param_alias='config')
def fit(model, conf, eval_func=None, eval_dataloader=None, eval_metric=None, **kwargs):
if (eval_dataloader is not None):
check_dataloader(eval_dataloader)
if (conf.precisions in conf.excluded_precisions):
logger.warning('Target precision is in excluded_precisions, please modify precision or excluded_precisions to make it understandable.')
sys.exit(0)
wrapped_model = Model(model, conf=conf)
precisions = list((set(conf.precisions) - set(conf.excluded_precisions)))
if ((('bf16' in precisions) or ('fp16' in precisions)) and (conf.framework == 'onnxruntime')):
if (('fp16' in precisions) and (not ((conf.device == 'gpu') and (conf.backend == 'onnxrt_cuda_ep')))):
logger.warning("Mix precision exits due to fp16 for onnx modelsneeds 'gpu' device and 'onnxrt_cuda_ep' backend.")
sys.exit(0)
elif (('bf16' in precisions) and ((not ((conf.backend == 'onnxrt_cuda_ep') and (conf.device == 'gpu'))) and (not ((conf.backend == 'onnxrt_dnnl_ep') and (conf.device == 'cpu'))))):
logger.warning("Mix precision exits due to bf16 for onnx models needs 'gpu' device and 'onnxrt_cuda_ep' backend, or 'cpu' device and 'onnxrt_dnnl_ep' backend.")
sys.exit(0)
elif (('bf16' in precisions) and (not CpuInfo().bf16) and (conf.framework != 'onnxruntime')):
if (os.getenv('FORCE_BF16') == '1'):
logger.warning("Mix precision will generate bf16 graph although the hardware doesn't support bf16 instruction.")
else:
logger.warning("Mix precision exits due to the hardware doesn't support bf16 instruction.")
sys.exit(0)
elif (('fp16' in precisions) and (conf.framework != 'onnxruntime')):
logger.warning('Currently mix precision only supports fp16 for onnx models.')
sys.exit(0)
if (eval_metric is not None):
metric = register_customer_metric(eval_metric, conf.framework)
else:
metric = None
config = _Config(mixed_precision=conf, quantization=None, benchmark=None, pruning=None, distillation=None, nas=None)
seed = options.random_seed
random.seed(seed)
np.random.seed(seed)
_resume = None
resume_file = (os.path.abspath(os.path.expanduser(options.resume_from)) if (options.workspace and options.resume_from) else None)
if resume_file:
assert os.path.exists(resume_file), "The specified resume file {} doesn't exist!".format(resume_file)
with open(resume_file, 'rb') as f:
_resume = pickle.load(f).__dict__
strategy = STRATEGIES['automixedprecision'](model=wrapped_model, conf=config, eval_func=eval_func, eval_dataloader=eval_dataloader, eval_metric=metric, resume=_resume, q_hooks=None)
try:
with time_limit(conf.tuning_criterion.timeout):
strategy.traverse()
except KeyboardInterrupt:
pass
except Exception as e:
logger.error('Unexpected exception {} happened during tuning.'.format(repr(e)))
import traceback
traceback.print_exc()
finally:
if strategy.best_qmodel:
logger.info('Specified timeout or max trials is reached! Found a quantized model which meet accuracy goal. Exit.')
strategy.deploy_config()
else:
logger.error('Specified timeout or max trials is reached! Not found any quantized model which meet accuracy goal. Exit.')
return strategy.best_qmodel |
def load_fields_from_vocab(vocab, data_type='text'):
vocab = dict(vocab)
n_src_features = len(collect_features(vocab, 'src'))
n_qa_features = len(collect_features(vocab, 'qa'))
n_tgt_features = len(collect_features(vocab, 'tgt'))
fields = get_fields(n_src_features, n_qa_features, n_tgt_features, data_type)
for (k, v) in vocab.items():
v.stoi = defaultdict((lambda : 0), v.stoi)
fields[k].vocab = v
if isinstance(fields[k], NestedField):
fields[k].nesting_field.vocab = v
return fields |
def test_double_solve(vrblvl=0):
polynomials = ['x^3 + 2*x*y - x^2;', 'x + y - x^3;']
set_double_system(2, polynomials, vrblvl)
(nbr, roco) = solve_double_system(vrblvl)
if (vrblvl > 0):
print('number of solutions :', nbr)
print('root counts :\n', roco)
write_double_solutions(vrblvl)
sols = get_double_solutions(vrblvl)
if (vrblvl > 0):
print('number of retrieved solutions :', len(sols))
clear_double_solutions(vrblvl)
return (len(sols) != 3) |
def test_ignore_main():
from unittest.mock import Mock
(name, type) = ('test', 'loss')
Mock.__module__ = '__main__'
with pytest.warns(UserWarning):
_ = register(name, type)(Mock)
assert (name not in LOSS_REG), 'Class from `__main__` not ignored.' |
class OntoNotesNERProcessor(Processor):
def __init__(self, label_list=None, path=None, padding=None, unknown=None, bert_model='bert-base-cased', max_length=256):
super().__init__(label_list, path, padding=padding, unknown=unknown, bert_model=bert_model, max_length=max_length)
def process(self, dataset):
datable = DataTable()
for (sentence, label) in zip(dataset['sentence'], dataset['label']):
(input_id, attention_mask, segment_id, head_index, label_id, label_mask) = process(sentence, label, self.tokenizer, self.vocabulary, self.max_length)
if ((len(input_id) <= self.max_length) and (len(head_index) <= self.max_length) and (len(label_id) <= self.max_length)):
datable('input_ids', input_id)
datable('attention_mask', attention_mask)
datable('segment_ids', segment_id)
datable('head_indexes', head_index)
datable('label_ids', label_id)
datable('label_masks', label_mask)
return datable |
class Evaluator():
default_metrics = ['False Positive Rate', 'Dice', 'Jaccard', 'Precision', 'Recall', 'Accuracy', 'False Omission Rate', 'Negative Predictive Value', 'False Negative Rate', 'True Negative Rate', 'False Discovery Rate', 'Total Positives Test', 'Total Positives Reference']
default_advanced_metrics = []
def __init__(self, test=None, reference=None, label_values=None, label_names=None, metrics=None, advanced_metrics=None, nan_for_nonexisting=True):
self.test = None
self.reference = None
self.confusion_matrix = ConfusionMatrix()
self.label_values = None
self.label_names = None
self.nan_for_nonexisting = nan_for_nonexisting
self.result = None
self.metrics = []
if (metrics is None):
for m in self.default_metrics:
self.metrics.append(m)
else:
for m in metrics:
self.metrics.append(m)
self.advanced_metrics = []
if (advanced_metrics is None):
for m in self.default_advanced_metrics:
self.advanced_metrics.append(m)
else:
for m in advanced_metrics:
self.advanced_metrics.append(m)
self.set_reference(reference)
self.set_test(test)
if (label_values is not None):
self.set_labels(label_values, label_names)
elif ((test is not None) and (reference is not None)):
self.construct_labels()
def set_test(self, test):
self.test = test
def set_reference(self, reference):
self.reference = reference
def set_labels(self, label_values, label_names=None):
self.label_values = label_values
if (label_names is not None):
self.set_label_names(label_names)
def set_label_names(self, label_names):
self.label_names = label_names
def construct_labels(self):
if ((self.test is None) and (self.reference is None)):
raise ValueError('No test or reference segmentations.')
elif (self.test is None):
label_values = np.unique(self.reference)
else:
label_values = np.union1d(np.unique(self.test), np.unique(self.reference))
self.label_values = list(map((lambda x: int(x)), label_values))
def set_metrics(self, metrics):
if isinstance(metrics, set):
self.metrics = list(metrics)
elif isinstance(metrics, (list, tuple, np.ndarray)):
self.metrics = metrics
else:
raise TypeError('Can only handle list, tuple, set & numpy array, but input is of type {}'.format(type(metrics)))
def add_metric(self, metric):
if (metric not in self.metrics):
self.metrics.append(metric)
def evaluate(self, test=None, reference=None, advanced=False, **metric_kwargs):
if (test is not None):
self.set_test(test)
if (reference is not None):
self.set_reference(reference)
if ((self.test is None) or (self.reference is None)):
raise ValueError('Need both test and reference segmentations.')
if (self.label_values is None):
self.construct_labels()
self.metrics.sort()
_funcs = {m: ALL_METRICS[m] for m in (self.metrics + self.advanced_metrics)}
frames = inspect.getouterframes(inspect.currentframe())
for metric in self.metrics:
for f in frames:
if (metric in f[0].f_locals):
_funcs[metric] = f[0].f_locals[metric]
break
else:
if (metric in _funcs):
continue
else:
raise NotImplementedError('Metric {} not implemented.'.format(metric))
self.result = {}
eval_metrics = self.metrics
if advanced:
eval_metrics += self.advanced_metrics
for (i, label_value) in enumerate(self.label_values):
if (self.label_names is not None):
name = self.label_names[label_value]
else:
name = str(label_value)
self.result[name] = {}
if (not hasattr(label_value, '__iter__')):
self.confusion_matrix.set_test((self.test == label_value))
self.confusion_matrix.set_reference((self.reference == label_value))
else:
current_test = 0
current_reference = 0
for l in label_value:
current_test += (self.test == l)
current_reference += (self.reference == l)
self.confusion_matrix.set_test(current_test)
self.confusion_matrix.set_reference(current_reference)
for metric in eval_metrics:
self.result[name][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix, nan_for_nonexisting=self.nan_for_nonexisting, **metric_kwargs)
return self.result
def to_dict(self):
if (self.result is None):
self.evaluate()
return self.result
def to_array(self):
if (self.result is None):
self.evaluate()
result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())
a = np.zeros((len(self.label_values), len(result_metrics)), dtype=np.float32)
for (i, label) in enumerate(self.label_values):
if (self.label_names is not None):
label = self.label_names[label]
for (j, metric) in enumerate(result_metrics):
a[i][j] = self.result[label][metric]
return a |
class MapIterator(CheckpointableIterator):
def __init__(self, source_iterator: CheckpointableIterator, transform: Callable[([str], Any)]):
if (not isinstance(source_iterator, CheckpointableIterator)):
raise ValueError('source_iterator has to be a CheckpointableIterator')
self._source_iterator = source_iterator
self._transform = transform
def getstate(self) -> Dict:
return self._source_iterator.getstate()
def setstate(self, checkpoint: Optional[Dict]):
self._source_iterator.setstate(checkpoint)
def __next__(self):
return self._transform(next(self._source_iterator)) |
def main():
lvl = 10
fail = test_double_functions(lvl)
fail = (fail + test_double_solution_class(lvl))
if (fail == 0):
print('=> All tests passed.')
else:
print('Number of failed tests :', fail) |
def test(args, model, dataloader, criterion):
model.eval()
loss_stack = []
label_stack = []
pred_stack = []
for (imgs_train, imgs_test, imgs_label, imgs_idx) in tqdm(dataloader, ncols=60):
imgs_test = imgs_test.cuda()
(_, pred_cls) = model(imgs_test)
img_onehot_labels = torch.zeros_like(pred_cls).scatter(1, imgs_label.cuda().unsqueeze(1), 1)
loss = criterion(pred_cls, img_onehot_labels)
(_, pred_idx) = torch.max(pred_cls.cpu(), dim=1)
loss_stack.append(loss.cpu().item())
label_stack.append(imgs_label)
pred_stack.append(pred_idx)
test_loss = np.mean(loss_stack)
label_stack = torch.cat(label_stack, dim=0)
pred_stack = torch.cat(pred_stack, dim=0)
test_acc = ((torch.sum((label_stack == pred_stack)) / (len(pred_stack) + 0.0001)) * 100)
if (args.dataset == 'VisDA'):
confu_mat = confusion_matrix(label_stack, pred_stack)
acc_list = ((confu_mat.diagonal() / confu_mat.sum(axis=1)) * 100)
test_acc = acc_list.mean()
acc_str = ' '.join(['{:.2f}'.format(i) for i in acc_list])
else:
acc_str = None
return (test_loss, test_acc, acc_str) |
def has_vowel(w):
for ch in w:
if (ch in VOWELS):
return True
return False |
_model
def nf_regnet_b2(pretrained=False, **kwargs):
return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) |
(True, 'Failed on gpu, accl is not installed')
class DistributedCudaAcclTest(unittest.TestCase):
def test_init_distributed_accl(self):
res = init_distributed('accl')
self.assertTrue(res)
self.assertEqual(world_size(), 1)
self.assertEqual(rank(), 0)
self.assertEqual(local_rank(), 0)
reset_distributed()
def test_two_process_init_distributed(self):
run_dist_code('test_basic_dist_with_accl', nproc=2) |
class TestMemory(unittest.TestCase):
def setUp(self):
return super().setUp()
def tearDown(self) -> None:
return super().tearDown()
def test_memory(self):
query = 'hello'
answer = "Hello! It's nice to meet you. Is there something I can help you with or would you like to chat?"
memory = Memory()
memory.add(query, answer)
context = memory.get()
text = 'User Query: hello'
self.assertIn(text, context)
def test_buffer_memory(self):
query = 'hello'
answer = "Hello! It's nice to meet you. Is there something I can help you with or would you like to chat?"
buffer_memory = Buffer_Memory()
buffer_memory.add(query, answer)
context = buffer_memory.get()
text = 'User Query: hello'
self.assertIn(text, context) |
class Athame(BaseDagger):
def __init__(self):
super().__init__('athame', weight=10, damage=D.Dice.from_str('d3'), material=M.Iron, hit=2) |
class GoalDirectedMotionOption(AbstractOption):
def __init__(self, world, goal, pose, pose_tolerance=(0.001, 0.01), joint_velocity_tolerance=0.025, closed_loop=False, *args, **kwargs):
super(GoalDirectedMotionOption, self).__init__(name='goal_directed_motion')
self.goal = goal
self.goal_id = world.getObjectId(goal)
self.closed_loop = closed_loop
if (pose is not None):
(self.position, self.rotation) = pose
(self.position_tolerance, self.rotation_tolerance) = pose_tolerance
else:
raise RuntimeError('Must specify pose.')
def makePolicy(self, world):
return (CartesianMotionPolicy(self.position, self.rotation, goal=self.goal), GoalPositionCondition(self.goal, self.position, self.rotation, self.position_tolerance, self.rotation_tolerance))
def samplePolicy(self, world):
if (not self.closed_loop):
return (CartesianMotionPolicy(self.position, self.rotation, goal=self.goal), GoalPositionCondition(self.goal, self.position, self.rotation, self.position_tolerance, self.rotation_tolerance))
else:
obj = world.getObject(self.goal)
pg = kdl.Vector(*self.position)
Rg = kdl.Rotation.Quaternion(*self.rotation)
Tg = kdl.Frame(Rg, pg)
T = (obj.state.T * Tg)
position = list(T.p)
rotation = list(T.M.GetQuaternion())
return (CartesianMotionPolicy(position, rotation, goal=None), AbsolutePositionCondition(position, rotation, self.position_tolerance, self.rotation_tolerance)) |
def new_scale_plan_watcher(platform, job_name, namespace, job_uuid):
logger.info('New %s NodeWatcher', platform)
if (platform in (PlatformType.KUBERNETES, PlatformType.PY_KUBERNETES)):
from dlrover.python.master.watcher.k8s_watcher import K8sScalePlanWatcher
return K8sScalePlanWatcher(job_name, namespace, job_uuid)
elif (platform in PlatformType.RAY):
from dlrover.python.master.watcher.ray_watcher import RayScalePlanWatcher
return RayScalePlanWatcher(job_name, namespace, job_uuid)
else:
raise ValueError('Not support engine %s', platform) |
def reproduc(seed, benchmark=False, deterministic=True):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = benchmark
torch.backends.cudnn.deterministic = deterministic |
def image_train(dataset, resize_size=256, crop_size=224):
if (dataset == 'dg5'):
return transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose([transforms.RandomResizedCrop(224, scale=(0.7, 1.0)), transforms.RandomHorizontalFlip(), transforms.ColorJitter(0.3, 0.3, 0.3, 0.3), transforms.RandomGrayscale(), transforms.ToTensor(), normalize]) |
class BPEWordSplitter(object):
def __init__(self, model_path):
super().__init__()
from subword_nmt.apply_bpe import BPE
with open(model_path) as f:
self.model = BPE(f)
def split(self, string):
return self.model.process_line(string).split()
def end_idx_last_full_word(self, tokens):
bow_indices = ([0] + [(i + 1) for (i, t) in enumerate(tokens[1:]) if (t[(- 2):] != '')])
if (len(bow_indices) < 2):
return 0
else:
return bow_indices[(- 1)]
def merge(self, list_of_string):
return ' '.join([item.replace('', '') for item in list_of_string]) |
class StudentsTLossFunction(nn.Module):
def __init__(self, num_dims, float_dtype, device, scale_lo=1e-05, scale_init=1.0):
super(StudentsTLossFunction, self).__init__()
if (not np.isscalar(scale_lo)):
raise ValueError('`scale_lo` must be a scalar, but is of type {}'.format(type(scale_lo)))
if (not np.isscalar(scale_init)):
raise ValueError('`scale_init` must be a scalar, but is of type {}'.format(type(scale_init)))
if (not (scale_lo > 0)):
raise ValueError('`scale_lo` must be > 0, but is {}'.format(scale_lo))
if (not (scale_init >= scale_lo)):
raise ValueError('`scale_init` = {} must be >= `scale_lo` = {}'.format(scale_init, scale_lo))
self.num_dims = num_dims
if (float_dtype == np.float32):
float_dtype = torch.float32
if (float_dtype == np.float64):
float_dtype = torch.float64
self.float_dtype = float_dtype
self.device = device
if (isinstance(device, int) or (isinstance(device, str) and ('cuda' in device)) or (isinstance(device, torch.device) and (device.type == 'cuda'))):
torch.cuda.set_device(self.device)
self.log_df = torch.nn.Parameter(torch.zeros((1, self.num_dims)).to(dtype=self.float_dtype, device=self.device), requires_grad=True)
self.register_parameter('log_df', self.log_df)
if (scale_lo == scale_init):
self.latent_scale = None
self.scale = torch.tensor(scale_init, dtype=self.float_dtype, device=self.device)[(np.newaxis, np.newaxis)].repeat(1, self.num_dims)
else:
self.latent_scale = torch.nn.Parameter(torch.zeros((1, self.num_dims)).to(dtype=self.float_dtype, device=self.device), requires_grad=True)
self.register_parameter('latent_scale', self.latent_scale)
self.df = (lambda : torch.exp(self.log_df))
self.scale = (lambda : util.affine_softplus(self.latent_scale, lo=scale_lo, ref=scale_init))
def lossfun(self, x):
x = torch.as_tensor(x)
assert (len(x.shape) == 2)
assert (x.shape[1] == self.num_dims)
assert (x.dtype == self.float_dtype)
return util.students_t_nll(x, self.df(), self.scale()) |
class ChineseTextToSpeech():
def __init__(self):
self.tts_executor = TTSExecutor()
def text2speech(self, input, output_audio_path):
self.tts_executor(text=input, output=output_audio_path, am='fastspeech2_csmsc', am_config=None, am_ckpt=None, am_stat=None, spk_id=0, phones_dict=None, tones_dict=None, speaker_dict=None, voc='pwgan_csmsc', voc_config=None, voc_ckpt=None, voc_stat=None, lang='zh', device=paddle.get_device())
return output_audio_path
def post_llm_inference_actions(self, text, output_audio_path):
return self.text2speech(text, output_audio_path) |
class DocDataset(Dataset):
def __init__(self, docs, n_vocab, device):
super(DocDataset, self).__init__()
self.docs = docs
self.n_vocab = n_vocab
self.device = device
def __getitem__(self, item):
d = self.docs[item]
v = np.zeros(self.n_vocab, dtype=np.float32)
for (w, f) in d:
v[w] += f
return torch.Tensor(v).to(self.device)
def __len__(self):
return len(self.docs) |
def _lws_processor(hparams):
import lws
return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode='speech') |
class FlaxXGLMPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def plot_bytes_written_and_read(sys_metrics, rolling_window=10, figsize=(10, 8), title_fontsize=16, **kwargs):
plt.figure(figsize=figsize)
server_metrics = sys_metrics.groupby(NUM_ROUND_KEY, as_index=False).sum()
rounds = server_metrics[NUM_ROUND_KEY]
server_metrics = server_metrics.rolling(rolling_window, on=NUM_ROUND_KEY, min_periods=1).sum()
plt.plot(rounds, server_metrics['bytes_written'], alpha=0.7)
plt.plot(rounds, server_metrics['bytes_read'], alpha=0.7)
plt.title('Bytes Written and Read by Server vs. Round Number', fontsize=title_fontsize)
plt.xlabel('Round Number')
plt.ylabel('Bytes')
plt.legend(['Bytes Written', 'Bytes Read'], loc='upper left')
_set_plot_properties(kwargs)
plt.show() |
def get_resnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 14):
layers = [1, 1, 1, 1]
elif (blocks == 26):
layers = [2, 2, 2, 2]
elif (blocks == 38):
layers = [3, 3, 3, 3]
elif (blocks == 50):
layers = [3, 4, 6, 3]
elif (blocks == 101):
layers = [3, 4, 23, 3]
else:
raise ValueError('Unsupported ResNeXt with number of blocks: {}'.format(blocks))
assert (((sum(layers) * 3) + 2) == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class SaintEncoder(nn.Module):
def __init__(self, input_dim: int, n_heads: int, use_bias: bool, attn_dropout: float, ff_dropout: float, activation: str, n_feat: int):
super(SaintEncoder, self).__init__()
self.n_feat = n_feat
self.col_attn = MultiHeadedAttention(input_dim, n_heads, use_bias, attn_dropout)
self.col_attn_ff = PositionwiseFF(input_dim, ff_dropout, activation)
self.col_attn_addnorm = AddNorm(input_dim, attn_dropout)
self.col_attn_ff_addnorm = AddNorm(input_dim, ff_dropout)
self.row_attn = MultiHeadedAttention((n_feat * input_dim), n_heads, use_bias, attn_dropout)
self.row_attn_ff = PositionwiseFF((n_feat * input_dim), ff_dropout, activation)
self.row_attn_addnorm = AddNorm((n_feat * input_dim), attn_dropout)
self.row_attn_ff_addnorm = AddNorm((n_feat * input_dim), ff_dropout)
def forward(self, X: Tensor) -> Tensor:
x = self.col_attn_addnorm(X, self.col_attn)
x = self.col_attn_ff_addnorm(x, self.col_attn_ff)
x = einops.rearrange(x, 'b n d -> 1 b (n d)')
x = self.row_attn_addnorm(x, self.row_attn)
x = self.row_attn_ff_addnorm(x, self.row_attn_ff)
x = einops.rearrange(x, '1 b (n d) -> b n d', n=self.n_feat)
return x |
def test_memoryview_from_buffer_empty_shape():
view = m.test_memoryview_from_buffer_empty_shape()
assert isinstance(view, memoryview)
assert (view.format == 'B')
assert (bytes(view) == b'') |
class PayloadModule(MsfModule):
def __init__(self, rpc, payload):
super(PayloadModule, self).__init__(rpc, 'payload', payload) |
def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c24'], ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model_kwargs = dict(block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=(kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))), **kwargs)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model |
def resnet101(pretrained=False, att_position=[[], [], [], []], att_dim=128, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], att_position, att_dim, **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model |
def simulate_from_dag_lg(tam, n_sample, mean=0, variance=1):
num_nodes = len(tam)
def get_value(i, e):
if (values[i] == None):
val = e[i]
for j in range(num_nodes):
if (tam[j][i] != 0.0):
val += (get_value(j, e) * tam[j][i])
values[i] = val
return val
else:
return values[i]
simulation_data = []
for i in range(n_sample):
errors = np.random.normal(mean, variance, num_nodes)
values = [None for _ in range(num_nodes)]
for i in range(num_nodes):
values[i] = get_value(i, errors)
simulation_data.append(values)
return simulation_data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.