code stringlengths 101 5.91M |
|---|
def SARE_loss(q_vec, pos_vecs, neg_vecs):
num_pos = pos_vecs.get_shape()[1]
query_copies_p = tf.tile(q_vec, [1, int(num_pos), 1])
num_neg = neg_vecs.get_shape()[1]
dif_p = (- tf.reduce_sum(tf.squared_difference(pos_vecs, query_copies_p), 2))
print('dif_p', dif_p)
p_exp = tf.reduce_sum(tf.exp(dif_p), 1)
print('p_exp', p_exp)
query_copies_n = tf.tile(q_vec, [1, int(num_neg), 1])
dif_n = (- tf.reduce_sum(tf.squared_difference(neg_vecs, query_copies_n), 2))
print('dif_p', dif_n)
n_exp = tf.reduce_sum(tf.exp(dif_n), 1)
print('n_exp', n_exp)
loss = tf.reduce_sum((- tf.log(tf.div(p_exp, (p_exp + n_exp)))))
return loss |
class DrQv2Value(nn.Module):
def __init__(self, observation_space: gym.Space, action_space: gym.Space, feature_dim: int=50, hidden_layers: List[int]=(1024, 1024), ensemble_size: int=1, **kwargs):
super().__init__()
self.trunk = nn.Sequential(nn.Linear(observation_space.shape[0], feature_dim), nn.LayerNorm(feature_dim), nn.Tanh())
self.ensemble_size = ensemble_size
if (self.ensemble_size > 1):
self.mlp = EnsembleMLP(feature_dim, 1, ensemble_size=ensemble_size, hidden_layers=hidden_layers, **kwargs)
else:
self.mlp = MLP(feature_dim, 1, hidden_layers=hidden_layers, **kwargs)
self.reset_parameters()
def reset_parameters(self):
self.apply(drqv2_weight_init)
def forward(self, obs):
v = self.trunk(obs)
v = self.mlp(v).squeeze((- 1))
if (self.ensemble_size == 1):
v = v.unsqueeze(0)
return v |
def main(args):
data_path = Path(args.data_path)
output_path = Path(args.out_path)
os.makedirs(str(output_path), exist_ok=True)
convert(data_path, 'val', output_path, args.coco_path, 0, 0) |
def preprocess_function(examples):
args = (examples[sentence1_key], examples[sentence2_key])
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
return result |
class MultiControlNetModel(ModelMixin):
def __init__(self, controlnets: Union[(List[ControlNetModel], Tuple[ControlNetModel])]):
super().__init__()
self.nets = nn.ModuleList(controlnets)
def forward(self, sample: torch.FloatTensor, timestep: Union[(torch.Tensor, float, int)], encoder_hidden_states: torch.Tensor, controlnet_cond: List[torch.tensor], conditioning_scale: List[float], class_labels: Optional[torch.Tensor]=None, timestep_cond: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, added_cond_kwargs: Optional[Dict[(str, torch.Tensor)]]=None, cross_attention_kwargs: Optional[Dict[(str, Any)]]=None, guess_mode: bool=False, return_dict: bool=True) -> Union[(ControlNetOutput, Tuple)]:
for (i, (image, scale, controlnet)) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
(down_samples, mid_sample) = controlnet(sample=sample, timestep=timestep, encoder_hidden_states=encoder_hidden_states, controlnet_cond=image, conditioning_scale=scale, class_labels=class_labels, timestep_cond=timestep_cond, attention_mask=attention_mask, added_cond_kwargs=added_cond_kwargs, cross_attention_kwargs=cross_attention_kwargs, guess_mode=guess_mode, return_dict=return_dict)
if (i == 0):
(down_block_res_samples, mid_block_res_sample) = (down_samples, mid_sample)
else:
down_block_res_samples = [(samples_prev + samples_curr) for (samples_prev, samples_curr) in zip(down_block_res_samples, down_samples)]
mid_block_res_sample += mid_sample
return (down_block_res_samples, mid_block_res_sample)
def save_pretrained(self, save_directory: Union[(str, os.PathLike)], is_main_process: bool=True, save_function: Callable=None, safe_serialization: bool=True, variant: Optional[str]=None):
idx = 0
model_path_to_save = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(model_path_to_save, is_main_process=is_main_process, save_function=save_function, safe_serialization=safe_serialization, variant=variant)
idx += 1
model_path_to_save = (model_path_to_save + f'_{idx}')
def from_pretrained(cls, pretrained_model_path: Optional[Union[(str, os.PathLike)]], **kwargs):
idx = 0
controlnets = []
model_path_to_load = pretrained_model_path
while os.path.isdir(model_path_to_load):
controlnet = ControlNetModel.from_pretrained(model_path_to_load, **kwargs)
controlnets.append(controlnet)
idx += 1
model_path_to_load = (pretrained_model_path + f'_{idx}')
logger.info(f'{len(controlnets)} controlnets loaded from {pretrained_model_path}.')
if (len(controlnets) == 0):
raise ValueError(f"No ControlNets found under {os.path.dirname(pretrained_model_path)}. Expected at least {(pretrained_model_path + '_0')}.")
return cls(controlnets) |
def leaky_relu(x, alpha=0.2):
return tf.where(tf.greater_equal(x, 0.0), x, tf.multiply(alpha, x)) |
class MapFeatures(SourcewiseTransformer):
def __init__(self, data_stream, fn, **kwargs):
super(MapFeatures, self).__init__(data_stream, produces_examples=False, which_sources='features')
self.fn = fn
def transform_source_batch(self, source_batch, source_name):
if (source_name != 'features'):
raise
if (self.fn is None):
return source_batch
return self.fn(source_batch) |
class CustomPolicy(FeedForwardPolicy):
def __init__(self, *args, **kwargs):
super(CustomPolicy, self).__init__(*args, **kwargs, net_arch=[64, 64, 64], act_fun=tf.nn.relu, feature_extraction='mlp') |
def recall(gt, pr, class_weights=1, class_indexes=None, smooth=SMOOTH, per_image=False, threshold=None, **kwargs):
backend = kwargs['backend']
(gt, pr) = gather_channels(gt, pr, indexes=class_indexes, **kwargs)
pr = round_if_needed(pr, threshold, **kwargs)
axes = get_reduce_axes(per_image, **kwargs)
tp = backend.sum((gt * pr), axis=axes)
fn = (backend.sum(gt, axis=axes) - tp)
score = ((tp + smooth) / ((tp + fn) + smooth))
score = average(score, per_image, class_weights, **kwargs)
return score |
def gaussian_sample(x, mu=None, log_sigma=None):
if ((mu is None) or (log_sigma is None)):
return x
return (mu + (torch.exp(log_sigma) * x)) |
class CIFAR10Policy(object):
def __init__(self, fillcolor=(128, 128, 128), magnitude_factor=1):
print(f'AutoAugment CIFAR10 - Magnitude {magnitude_factor}')
self.policies = [SubPolicy(0.1, 'invert', 7, 0.2, 'contrast', 6, fillcolor, magnitude_factor), SubPolicy(0.7, 'rotate', 2, 0.3, 'translateX', 9, fillcolor, magnitude_factor), SubPolicy(0.8, 'sharpness', 1, 0.9, 'sharpness', 3, fillcolor, magnitude_factor), SubPolicy(0.5, 'shearY', 8, 0.7, 'translateY', 9, fillcolor, magnitude_factor), SubPolicy(0.5, 'autocontrast', 8, 0.9, 'equalize', 2, fillcolor, magnitude_factor), SubPolicy(0.2, 'shearY', 7, 0.3, 'posterize', 7, fillcolor, magnitude_factor), SubPolicy(0.4, 'color', 3, 0.6, 'brightness', 7, fillcolor, magnitude_factor), SubPolicy(0.3, 'sharpness', 9, 0.7, 'brightness', 9, fillcolor, magnitude_factor), SubPolicy(0.6, 'equalize', 5, 0.5, 'equalize', 1, fillcolor, magnitude_factor), SubPolicy(0.6, 'contrast', 7, 0.6, 'sharpness', 5, fillcolor, magnitude_factor), SubPolicy(0.7, 'color', 7, 0.5, 'translateX', 8, fillcolor, magnitude_factor), SubPolicy(0.3, 'equalize', 7, 0.4, 'autocontrast', 8, fillcolor, magnitude_factor), SubPolicy(0.4, 'translateY', 3, 0.2, 'sharpness', 6, fillcolor, magnitude_factor), SubPolicy(0.9, 'brightness', 6, 0.2, 'color', 8, fillcolor, magnitude_factor), SubPolicy(0.5, 'solarize', 2, 0.0, 'invert', 3, fillcolor, magnitude_factor), SubPolicy(0.2, 'equalize', 0, 0.6, 'autocontrast', 0, fillcolor, magnitude_factor), SubPolicy(0.2, 'equalize', 8, 0.6, 'equalize', 4, fillcolor, magnitude_factor), SubPolicy(0.9, 'color', 9, 0.6, 'equalize', 6, fillcolor, magnitude_factor), SubPolicy(0.8, 'autocontrast', 4, 0.2, 'solarize', 8, fillcolor, magnitude_factor), SubPolicy(0.1, 'brightness', 3, 0.7, 'color', 0, fillcolor, magnitude_factor), SubPolicy(0.4, 'solarize', 5, 0.9, 'autocontrast', 3, fillcolor, magnitude_factor), SubPolicy(0.9, 'translateY', 9, 0.7, 'translateY', 9, fillcolor, magnitude_factor), SubPolicy(0.9, 'autocontrast', 2, 0.8, 'solarize', 3, fillcolor, magnitude_factor), SubPolicy(0.8, 'equalize', 8, 0.1, 'invert', 3, fillcolor, magnitude_factor), SubPolicy(0.7, 'translateY', 9, 0.9, 'autocontrast', 1, fillcolor, magnitude_factor)]
def __call__(self, img):
policy_idx = random.randint(0, (len(self.policies) - 1))
return self.policies[policy_idx](img)
def __repr__(self):
return 'AutoAugment CIFAR10 Policy' |
def test(args):
processor = data_utils.AscProcessor()
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_dir)
eval_examples = processor.get_test_examples(args.data_dir, 'test_rels.json', method=args.method)
eval_features = data_utils.convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, 'asc')
logger.info('***** Running evaluation *****')
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_indices for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_label_ids, all_target_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
model = torch.load(os.path.join(args.output_dir, 'model.pt'))
model.cuda()
model.eval()
full_logits = []
predictions = []
golds = []
ids = [e.guid for e in eval_examples]
targets = [str(e.text_a) for e in eval_examples]
texts = [str(e.text_b) for e in eval_examples]
for (step, batch) in enumerate(eval_dataloader):
batch = tuple((t.cuda() for t in batch))
(input_ids, segment_ids, input_mask, label_ids, target_ids) = batch
with torch.no_grad():
logits = model(input_ids=input_ids, attention_mask=input_mask, target_indices=target_ids)
logits = logits.detach().cpu().numpy()
preds = logits.argmax(1)
label_ids = label_ids.cpu().numpy()
predictions.extend(preds.tolist())
full_logits.extend(logits.tolist())
golds.extend(label_ids.tolist())
test_acc = accuracy_score(golds, predictions)
test_f1 = f1_score(golds, predictions, average='macro')
output_eval_json = os.path.join(args.output_dir, 'test_preds.json')
with open(output_eval_json, 'w') as fw:
json.dump({'logits': full_logits, 'predictions': predictions, 'golds': golds, 'acc': test_acc, 'f1': test_f1, 'ids': ids, 'targets': targets, 'texts': texts}, fw) |
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s, tf.name_scope(s.original_name_scope):
(yield s) |
_lr_scheduler('inverse_linear')
class InverseLinearRootSchedule(FairseqLRScheduler):
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
warmup_end_lr = args.lr
if (args.warmup_init_lr < 0):
args.warmup_init_lr = warmup_end_lr
self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates)
self.decay_factor = warmup_end_lr
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
def add_args(parser):
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr')
def step(self, epoch, val_loss=None):
super().step(epoch, val_loss)
return self.optimizer.get_lr()
def step_update(self, num_updates):
if (num_updates < self.args.warmup_updates):
self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step))
else:
self.lr = (self.decay_factor * (0.8 ** (num_updates / (5 * self.args.warmup_updates))))
self.optimizer.set_lr(self.lr)
return self.lr |
class MemcachedBackend(BaseStorageBackend):
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if (sys_path is not None):
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError('Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg)
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError |
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
ix = line.find('DataLayer<Dtype>::LayerSetUp')
if ((ix >= 0) and ((line.find('void AnnotatedDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void DataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void ImageDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void VideoDataLayer<Dtype>::LayerSetUp') != (- 1)) or (line.find('void WindowDataLayer<Dtype>::LayerSetUp') != (- 1)))):
error(filename, linenum, 'caffe/data_layer_setup', 2, ((('Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers') + ' define common SetUp steps, the subclasses should') + ' not override them.'))
ix = line.find('DataLayer<Dtype>::DataLayerSetUp')
if ((ix >= 0) and ((line.find('void Base') == (- 1)) and (line.find('void AnnotatedDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void DataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void VideoDataLayer<Dtype>::DataLayerSetUp') == (- 1)) and (line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == (- 1)))):
error(filename, linenum, 'caffe/data_layer_setup', 2, ((('Except the base classes, Caffe DataLayer should define' + ' DataLayerSetUp instead of LayerSetUp. The base DataLayers') + ' define common SetUp steps, the subclasses should') + ' not override them.')) |
def nlvr2_triplet_eval_collate(inputs):
(qids, batch) = ([], [])
for (id_, *tensors) in inputs:
qids.append(id_)
batch.append(tensors)
batch = nlvr2_triplet_collate(batch)
batch['qids'] = qids
return batch |
def tf_mobilenetv3_small_075(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs)
return model |
def generate_cca_projection():
(images, sentences) = [torch.cat(l) for l in zip(*[(d[0], d[1][0]) for d in train_loader])]
emb = fn_to_emb(sentences.int())
(corr, (im_proj, emb_proj)) = cca([images, emb], k=40)
print('Largest eigen value from CCA: {:.3f}'.format(corr[0]))
torch.save(images.mean(dim=0), (runPath + '/images_mean.pt'))
torch.save(emb.mean(dim=0), (runPath + '/emb_mean.pt'))
torch.save(im_proj, (runPath + '/im_proj.pt'))
torch.save(emb_proj, (runPath + '/emb_proj.pt')) |
def _download_single_image(label_path: Path, img_tuple: tuple, i: int, timeout: int=4) -> None:
suffix = re.findall('\\.\\w+?(?=(?:\\?|$))', img_Tuple[1])
suffix = (suffix[0].lower() if (len(suffix) > 0) else '.jpg')
fname = f'{i:08d}{suffix}'
download_url(img_Tuple[1], (label_path / fname), timeout=timeout) |
def get_bijection(layer_config, x_shape):
if (layer_config['type'] == 'acl'):
return get_acl_bijection(config=layer_config, x_shape=x_shape)
elif (layer_config['type'] == 'squeeze'):
return Squeeze2dBijection(x_shape=x_shape, factor=layer_config['factor'])
elif (layer_config['type'] == 'logit'):
return LogitBijection(x_shape=x_shape)
elif (layer_config['type'] == 'sigmoid'):
return LogitBijection(x_shape=x_shape).inverse()
elif (layer_config['type'] == 'tanh'):
return TanhBijection(x_shape=x_shape)
elif (layer_config['type'] == 'scalar-mult'):
return ScalarMultiplicationBijection(x_shape=x_shape, value=layer_config['value'])
elif (layer_config['type'] == 'scalar-add'):
return ScalarAdditionBijection(x_shape=x_shape, value=layer_config['value'])
elif (layer_config['type'] == 'flatten'):
return ViewBijection(x_shape=x_shape, z_shape=(int(np.prod(x_shape)),))
elif (layer_config['type'] == 'made'):
assert (len(x_shape) == 1)
return MADEBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation']))
elif (layer_config['type'] == 'batch-norm'):
return BatchNormBijection(x_shape=x_shape, per_channel=layer_config['per_channel'], apply_affine=layer_config['apply_affine'], momentum=layer_config['momentum'])
elif (layer_config['type'] == 'act-norm'):
return ActNormBijection(x_shape=x_shape)
elif (layer_config['type'] == 'affine'):
return AffineBijection(x_shape=x_shape, per_channel=layer_config['per_channel'])
elif (layer_config['type'] == 'cond-affine'):
return ConditionalAffineBijection(x_shape=x_shape, coupler=get_coupler(input_shape=(layer_config['num_u_channels'], *x_shape[1:]), num_channels_per_output=x_shape[0], config=layer_config['st_coupler']))
elif (layer_config['type'] == 'flip'):
return FlipBijection(x_shape=x_shape, dim=1)
elif (layer_config['type'] == 'invconv'):
if layer_config['lu']:
return LUInvertible1x1ConvBijection(x_shape=x_shape)
else:
return BruteForceInvertible1x1ConvBijection(x_shape=x_shape)
elif (layer_config['type'] == 'linear'):
assert (len(x_shape) == 1)
return LULinearBijection(num_input_channels=x_shape[0])
elif (layer_config['type'] == 'rand-channel-perm'):
return RandomChannelwisePermutationBijection(x_shape=x_shape)
elif (layer_config['type'] == 'sos'):
assert (len(x_shape) == 1)
return SumOfSquaresPolynomialBijection(num_input_channels=x_shape[0], hidden_channels=layer_config['hidden_channels'], activation=get_activation(layer_config['activation']), num_polynomials=layer_config['num_polynomials'], polynomial_degree=layer_config['polynomial_degree'])
elif (layer_config['type'] == 'nsf-ar'):
assert (len(x_shape) == 1)
return AutoregressiveRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability'])
elif (layer_config['type'] == 'nsf-c'):
assert (len(x_shape) == 1)
return CoupledRationalQuadraticSplineBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], num_hidden_channels=layer_config['num_hidden_channels'], num_bins=layer_config['num_bins'], tail_bound=layer_config['tail_bound'], activation=get_activation(layer_config['activation']), dropout_probability=layer_config['dropout_probability'], reverse_mask=layer_config['reverse_mask'])
elif (layer_config['type'] == 'bnaf'):
assert (len(x_shape) == 1)
return BlockNeuralAutoregressiveBijection(num_input_channels=x_shape[0], num_hidden_layers=layer_config['num_hidden_layers'], hidden_channels_factor=layer_config['hidden_channels_factor'], activation=layer_config['activation'], residual=layer_config['residual'])
elif (layer_config['type'] == 'ode'):
assert (len(x_shape) == 1)
return FFJORDBijection(x_shape=x_shape, velocity_hidden_channels=layer_config['hidden_channels'], relative_tolerance=layer_config['numerical_tolerance'], absolute_tolerance=layer_config['numerical_tolerance'], num_u_channels=layer_config['num_u_channels'])
elif (layer_config['type'] == 'planar'):
assert (len(x_shape) == 1)
return PlanarBijection(num_input_channels=x_shape[0])
elif (layer_config['type'] == 'cond-planar'):
assert (len(x_shape) == 1)
return ConditionalPlanarBijection(num_input_channels=x_shape[0], num_u_channels=layer_config['num_u_channels'], cond_hidden_channels=layer_config['cond_hidden_channels'], cond_activation=get_activation(layer_config['cond_activation']))
elif (layer_config['type'] == 'resblock'):
return ResidualFlowBijection(x_shape=x_shape, lipschitz_net=get_lipschitz_net(input_shape=x_shape, num_output_channels=x_shape[0], config=layer_config['net']), reduce_memory=layer_config['reduce_memory'])
else:
assert False, f"Invalid layer type `{layer_config['type']}'" |
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x) |
def resnet152gn(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model |
def spheric2cartesian(r, theta, phi):
x = ((r * np.cos(phi)) * np.cos(theta))
y = ((r * np.sin(phi)) * np.cos(theta))
z = (r * np.sin(theta))
return (x, y, z) |
class StudentT(Normal):
def __init__(self, dofs, means=None, covs=None, covariance_type='diag', min_cov=None, inertia=0.0, frozen=False, check_data=True):
self.name = 'StudentT'
dofs = _check_parameter(_cast_as_tensor(dofs), 'dofs', min_value=1, ndim=0, dtypes=(torch.int32, torch.int64))
self.dofs = dofs
super().__init__(means=means, covs=covs, min_cov=min_cov, covariance_type=covariance_type, inertia=inertia, frozen=frozen, check_data=check_data)
del self.dofs
self.register_buffer('dofs', _cast_as_tensor(dofs))
self.register_buffer('_lgamma_dofsp1', torch.lgamma(((dofs + 1) / 2.0)))
self.register_buffer('_lgamma_dofs', torch.lgamma((dofs / 2.0)))
def _reset_cache(self):
super()._reset_cache()
if (self._initialized == False):
return
self.register_buffer('_log_sqrt_dofs_pi_cov', torch.log(torch.sqrt(((self.dofs * math.pi) * self.covs))))
def sample(self, n):
return torch.distributions.StudentT(self.means, self.covs).sample([n])
def log_probability(self, X):
X = _check_parameter(_cast_as_tensor(X), 'X', ndim=2, shape=((- 1), self.d), check_parameter=self.check_data)
t = (((X - self.means) ** 2) / self.covs)
return torch.sum((((self._lgamma_dofsp1 - self._lgamma_dofs) - self._log_sqrt_dofs_pi_cov) - (((self.dofs + 1) / 2.0) * torch.log((1 + (t / self.dofs))))), dim=(- 1)) |
def constrain_norm(grad: P, norm_constraint: chex.Numeric=0.001) -> P:
sq_norm_scaled_grads = tree_inner_product(grad, grad)
sq_norm_scaled_grads = utils.distribute.pmean_if_pmap(sq_norm_scaled_grads)
norm_scale_factor = jnp.sqrt((norm_constraint / sq_norm_scaled_grads))
coefficient = jnp.minimum(norm_scale_factor, 1)
constrained_grads = multiply_tree_by_scalar(grad, coefficient)
return constrained_grads |
def test_sort_parents(a_pcmci):
(pcmci, _) = a_pcmci
orig_parents = []
n_parents = 10
for i in range(n_parents):
orig_parents.append((i, i))
parent_vals = {}
sign = 1
for (val, par) in enumerate(orig_parents):
sign *= (- 1)
parent_vals[par] = (val * sign)
sorted_parents = pcmci._sort_parents(parent_vals)
assert (sorted_parents == orig_parents[::(- 1)]), 'Parents must be sorted by abolute value of the test metric' |
class PolyOptimizer(torch.optim.SGD):
def __init__(self, params, lr, weight_decay, max_step, momentum=0.9):
super().__init__(params, lr, weight_decay)
self.global_step = 0
self.max_step = max_step
self.momentum = momentum
self.__initial_lr = [group['lr'] for group in self.param_groups]
def step(self, closure=None):
if (self.global_step < self.max_step):
lr_mult = ((1 - (self.global_step / self.max_step)) ** self.momentum)
for i in range(len(self.param_groups)):
self.param_groups[i]['lr'] = (self.__initial_lr[i] * lr_mult)
super().step(closure)
self.global_step += 1 |
class KLDivLoss(nn.Module):
def __init__(self, reduction='batchmean', log_target=False):
super().__init__()
self.kld = nn.KLDivLoss(reduction=reduction, log_target=log_target)
def forward(self, pred, target):
return self.kld(pred.log_softmax(dim=1), target) |
_torch
_vision
class CLIPImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase):
image_processing_class = (CLIPImageProcessor if is_vision_available() else None)
def setUp(self):
self.image_processor_tester = CLIPImageProcessingTester(self)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, 'do_resize'))
self.assertTrue(hasattr(image_processing, 'size'))
self.assertTrue(hasattr(image_processing, 'do_center_crop'))
self.assertTrue(hasattr(image_processing, 'center_crop'))
self.assertTrue(hasattr(image_processing, 'do_normalize'))
self.assertTrue(hasattr(image_processing, 'image_mean'))
self.assertTrue(hasattr(image_processing, 'image_std'))
self.assertTrue(hasattr(image_processing, 'do_convert_rgb'))
def test_image_processor_from_dict_with_kwargs(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {'shortest_edge': 20})
self.assertEqual(image_processor.crop_size, {'height': 18, 'width': 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84})
def test_batch_feature(self):
pass
def test_call_pil(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
def test_call_numpy(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
def test_call_pytorch(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
image_inputs = self.image_processor_tester.prepare_inputs(equal_resolution=False, torchify=True)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
encoded_images = image_processing(image_inputs[0], return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width']))
encoded_images = image_processing(image_inputs, return_tensors='pt').pixel_values
self.assertEqual(encoded_images.shape, (self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'])) |
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, gpu_ids=[]):
super(NLayerDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
kw = 4
padw = int(np.ceil(((kw - 1) / 2)))
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min((2 ** n), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=2, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)]
nf_mult_prev = nf_mult
nf_mult = min((2 ** n_layers), 8)
sequence += [nn.Conv2d((ndf * nf_mult_prev), (ndf * nf_mult), kernel_size=kw, stride=1, padding=padw), norm_layer((ndf * nf_mult), affine=True), nn.LeakyReLU(0.2, True)]
sequence += [nn.Conv2d((ndf * nf_mult), 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def forward(self, input):
if (len(self.gpu_ids) and isinstance(input.data, torch.cuda.FloatTensor)):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input) |
def rla_mobilenetv2_k6_eca(eca=True):
print('Constructing rla_mobilenetv2_k6_eca......')
model = RLA_MobileNetV2(rla_channel=6, ECA=eca)
return model |
class WDS(nn.Module):
def __init__(self, in_channels, num_classes):
super(WDS, self).__init__()
self.b1_1 = basic_block(in_channels, 64)
self.b1_2 = basic_block(64, 64)
self.b1_3 = basic_block(64, 64)
self.b1_4 = basic_block(64, 64)
self.b1_5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b1_6 = basic_block(64, 128)
self.b1_7 = basic_block(128, 128)
self.b1_8 = basic_block(128, 128)
self.b1_9 = basic_block(128, 128)
self.b1_10 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b2_1 = basic_block(in_channels, 64)
self.b2_2 = basic_block(64, 64)
self.b2_3 = basic_block(64, 64)
self.b2_4 = basic_block(64, 64)
self.b2_5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b2_6 = basic_block(64, 128)
self.b2_7 = basic_block(128, 128)
self.b2_8 = basic_block(128, 128)
self.b2_9 = basic_block(128, 128)
self.b2_10 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b3_1 = basic_block(in_channels, 64)
self.b3_2 = basic_block(64, 64)
self.b3_3 = basic_block(64, 64)
self.b3_4 = basic_block(64, 64)
self.b3_5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b3_6 = basic_block(64, 128)
self.b3_7 = basic_block(128, 128)
self.b3_8 = basic_block(128, 128)
self.b3_9 = basic_block(128, 128)
self.b3_10 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b4_1 = basic_block(in_channels, 64)
self.b4_2 = basic_block(64, 64)
self.b4_3 = basic_block(64, 64)
self.b4_4 = basic_block(64, 64)
self.b4_5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.b4_6 = basic_block(64, 128)
self.b4_7 = basic_block(128, 128)
self.b4_8 = basic_block(128, 128)
self.b4_9 = basic_block(128, 128)
self.b4_10 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.output_layer = nn.Sequential(nn.Conv2d((128 * 4), 128, kernel_size=3, stride=1, padding=1, bias=False), nn.ReLU(inplace=True), nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1, bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.001)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, LL, LH, HL, HH):
(H, W) = (LL.shape[2], LL.shape[3])
LL = self.b1_1(LL)
LL = self.b1_2(LL)
LL = self.b1_3(LL)
LL = self.b1_4(LL)
LL = self.b1_5(LL)
LL = self.b1_6(LL)
LL = self.b1_7(LL)
LL = self.b1_8(LL)
LL = self.b1_9(LL)
LL = self.b1_10(LL)
LH = self.b2_1(LH)
LH = self.b2_2(LH)
LH = self.b2_3(LH)
LH = self.b2_4(LH)
LH = self.b2_5(LH)
LH = self.b2_6(LH)
LH = self.b2_7(LH)
LH = self.b2_8(LH)
LH = self.b2_9(LH)
LH = self.b2_10(LH)
HL = self.b3_1(HL)
HL = self.b3_2(HL)
HL = self.b3_3(HL)
HL = self.b3_4(HL)
HL = self.b3_5(HL)
HL = self.b3_6(HL)
HL = self.b3_7(HL)
HL = self.b3_8(HL)
HL = self.b3_9(HL)
HL = self.b3_10(HL)
HH = self.b4_1(HH)
HH = self.b4_2(HH)
HH = self.b4_3(HH)
HH = self.b4_4(HH)
HH = self.b4_5(HH)
HH = self.b4_6(HH)
HH = self.b4_7(HH)
HH = self.b4_8(HH)
HH = self.b4_9(HH)
HH = self.b4_10(HH)
x = torch.cat((LL, LH, HL, HH), dim=1)
x = self.output_layer(x)
x = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)
return x |
class KITTI_Odo(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self.train_seqs = ['00', '01', '02', '03', '04', '05', '06', '07', '08']
def __len__(self):
raise NotImplementedError
def prepare_data_mp(self, output_dir, stride=1):
num_processes = 16
processes = []
q = mp.Queue()
if (not os.path.isfile(os.path.join(output_dir, 'train.txt'))):
os.makedirs(output_dir)
print('Preparing sequence data....')
if (not os.path.isdir(self.data_dir)):
raise
dirlist = os.listdir(self.data_dir)
total_dirlist = []
for d in dirlist:
if (d in self.train_seqs):
q.put(d)
for rank in range(num_processes):
p = mp.Process(target=process_folder, args=(q, self.data_dir, output_dir, stride))
p.start()
processes.append(p)
for p in processes:
p.join()
f = open(os.path.join(output_dir, 'train.txt'), 'w')
for d in self.train_seqs:
train_file = open(os.path.join(output_dir, d, 'train.txt'), 'r')
for l in train_file.readlines():
f.write(l)
command = ((('cp ' + os.path.join(self.data_dir, d, 'calib.txt')) + ' ') + os.path.join(output_dir, d, 'calib.txt'))
os.system(command)
print('Data Preparation Finished.')
def __getitem__(self, idx):
raise NotImplementedError |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x, H, W):
x = self.fc1(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x |
def conv_resblock_two(in_channels, out_channels, stride=1):
return nn.Sequential(conv3x3(in_channels, out_channels, stride), nn.ReLU(), ResBlock(out_channels), ResBlock(out_channels)) |
class FactoryType(ValueType):
def __init__(self, name, typeMap):
self.name = name
self.typeMap = typeMap
self.nameMap = {}
for (key, value) in typeMap.items():
self.nameMap[value] = key
def from_xml(self, node):
cur_type = self.typeMap.get(node.tag)
if (cur_type is None):
raise Exception('Invalid {} tag: {}'.format(self.name, node.tag))
value_type = get_type(cur_type)
return value_type.from_xml(node)
def get_name(self, obj):
cur_type = type(obj)
name = self.nameMap.get(cur_type)
if (name is None):
raise Exception('Invalid {} type: {}'.format(self.name, cur_type))
return name
def write_xml(self, node, obj):
obj.write_xml(node) |
def is_spark_below_2_2():
import pyspark
if hasattr(pyspark, 'version'):
full_version = pyspark.version.__version__
parts = full_version.split('.')
spark_version = ((parts[0] + '.') + parts[1])
if (compare_version(spark_version, '2.2') >= 0):
return False
return True |
def run_circuit(num_qubits):
reg = iqs.QubitRegister(num_qubits, 'base', 0, 0)
for i in range(num_qubits):
reg.ApplyHadamard(i)
reg.ApplyRotationZ(i, (np.pi / 3))
return reg |
def train_autokeras(X_train, X_test, y_train, y_test, mtype, common_name_model, problemtype, classes, default_featurenames, transform_model, settings, model_session):
files = list()
model_name = common_name_model
if (mtype == 'c'):
if ('structured_data_classifier' in os.listdir()):
shutil.rmtree('structured_data_classifier')
model = ak.StructuredDataClassifier(max_trials=100)
model.fit(X_train, y_train)
files.append('structured_data_classifier')
elif (mtype == 'r'):
if ('structured_data_regressor' in os.listdir()):
shutil.rmtree('structured_data_regressor')
model = ak.StructuredDataRegressor(max_trials=100)
model.fit(X_train, y_train)
files.append('structured_data_regressor')
predictions = model.predict(X_test).flatten()
print(predictions)
model = model.export_model()
print(type(model))
model.save((model_name + '.h5'))
model_name = (model_name + '.h5')
files.append(model_name)
model_dir = os.getcwd()
return (model_name, model_dir, files) |
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) |
def mixture_function(X, w0=1.0, w1=1.0):
X_sum = X.sum(axis=0)
return ((w0 * numpy.sqrt(X_sum).sum()) + (w1 * numpy.log1p(X_sum).sum())) |
_module
class FMF_Concat_VN(SingleStageDetector):
def __init__(self, reader, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(FMF_Concat_VN, self).__init__(reader, backbone, neck, bbox_head, train_cfg, test_cfg, pretrained)
def extract_feat(self, data):
input_features = self.reader(data['features'], data['num_voxels'])
(x, voxel_feature) = self.backbone(input_features, data['coors'], data['batch_size'], data['input_shape'])
if self.with_neck:
x = self.neck(x)
return (x, voxel_feature)
def forward(self, example, return_loss=True, **kwargs):
voxels = example['voxels']
coordinates = example['coordinates']
num_points_in_voxel = example['num_points']
num_voxels = example['num_voxels']
batch_size = len(num_voxels)
data = dict(features=voxels, num_voxels=num_points_in_voxel, coors=coordinates, batch_size=batch_size, input_shape=example['shape'][0])
(x, _) = self.extract_feat(data)
preds = self.bbox_head(x)
if return_loss:
return self.bbox_head.loss(example, preds)
else:
return self.bbox_head.predict(example, preds, self.test_cfg)
def forward_two_stage(self, example, return_loss=True, **kwargs):
voxels = example['voxels']
coordinates = example['coordinates']
num_points_in_voxel = example['num_points']
num_voxels = example['num_voxels']
batch_size = len(num_voxels)
data = dict(features=voxels, num_voxels=num_points_in_voxel, coors=coordinates, batch_size=batch_size, input_shape=example['shape'][0])
(x, voxel_feature) = self.extract_feat(data)
bev_feature = x
preds = self.bbox_head(x)
new_preds = []
for pred in preds:
new_pred = {}
for (k, v) in pred.items():
new_pred[k] = v.detach()
new_preds.append(new_pred)
boxes = self.bbox_head.predict(example, new_preds, self.test_cfg)
if return_loss:
return (boxes, bev_feature, voxel_feature, self.bbox_head.loss(example, preds))
else:
return (boxes, bev_feature, voxel_feature, None) |
class ResBlock_SFT(nn.Module):
def __init__(self):
super(ResBlock_SFT, self).__init__()
self.sft0 = SFTLayer()
self.conv0 = nn.Conv2d(64, 64, 3, 1, 1)
self.sft1 = SFTLayer()
self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
def forward(self, x):
fea = self.sft0(x)
fea = F.relu(self.conv0(fea), inplace=True)
fea = self.sft1((fea, x[1]))
fea = self.conv1(fea)
return ((x[0] + fea), x[1]) |
def prototype_ubuntu_GaussPiecewise_NormOp_VHRED_Exp15():
state = prototype_state()
state['end_sym_utterance'] = '__eot__'
state['unk_sym'] = 0
state['eos_sym'] = 1
state['eod_sym'] = (- 1)
state['first_speaker_sym'] = (- 1)
state['second_speaker_sym'] = (- 1)
state['third_speaker_sym'] = (- 1)
state['minor_speaker_sym'] = (- 1)
state['voice_over_sym'] = (- 1)
state['off_screen_sym'] = (- 1)
state['pause_sym'] = (- 1)
state['train_dialogues'] = '../UbuntuData/Training.dialogues.pkl'
state['test_dialogues'] = '../UbuntuData/Test.dialogues.pkl'
state['valid_dialogues'] = '../UbuntuData/Validation.dialogues.pkl'
state['dictionary'] = '../UbuntuData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'UbuntuModel_'
state['updater'] = 'adam'
state['bidirectional_utterance_encoder'] = True
state['deep_dialogue_encoder_input'] = False
state['deep_utterance_decoder_out'] = True
state['bs'] = 80
state['utterance_decoder_gating'] = 'LSTM'
state['direct_connection_between_encoders_and_decoder'] = True
state['qdim_encoder'] = 1000
state['qdim_decoder'] = 2000
state['sdim'] = 1000
state['rankdim'] = 400
state['add_latent_gaussian_per_utterance'] = True
state['latent_gaussian_per_utterance_dim'] = 100
state['scale_latent_gaussian_variable_variances'] = 0.1
state['add_latent_piecewise_per_utterance'] = True
state['latent_piecewise_per_utterance_dim'] = 100
state['latent_piecewise_alpha_variables'] = 3
state['scale_latent_piecewise_variable_alpha_use_softplus'] = False
state['scale_latent_piecewise_variable_prior_alpha'] = 1.0
state['scale_latent_piecewise_variable_posterior_alpha'] = 1.0
state['condition_latent_variable_on_dialogue_encoder'] = True
state['train_latent_variables_with_kl_divergence_annealing'] = True
state['kl_divergence_annealing_rate'] = (1.0 / 75000.0)
state['decoder_drop_previous_input_tokens'] = True
state['decoder_drop_previous_input_tokens_rate'] = 0.75
state['deep_utterance_decoder_input'] = True
state['patience'] = 20
state['kl_divergence_max_weight'] = 0.75
return state |
def sample_actions(rng: PRNGKey, actor_def: nn.Module, actor_params: Params, observations: np.ndarray, temperature: float=1.0) -> Tuple[(PRNGKey, jnp.ndarray)]:
return _sample_actions(rng, actor_def, actor_params, observations, temperature) |
def get_local_db_shimmer(amplitudes, frequencies, max_a_factor, p_floor, p_ceil, max_p_factor):
cumsum = 0
counter = 0
for ((freq1, freq2), (amp1, amp2)) in zip(shifted_sequence(frequencies, 2), shifted_sequence(amplitudes, 2)):
if validate_amplitudes([amp1, amp2], [freq1, freq2], max_a_factor, p_floor, p_ceil, max_p_factor):
cumsum += np.abs((20 * np.log10((amp2 / amp1))))
counter += 1
local_db_shimmer = ((cumsum / counter) if (counter != 0) else None)
return local_db_shimmer |
def check_part_score(coco_dt, part):
flag_no_part_score = False
for k in coco_dt.anns.keys():
if ('{}_score'.format(part) not in coco_dt.anns[k]):
flag_no_part_score = True
coco_dt.anns[k]['{}_score'.format(part)] = coco_dt.anns[k]['score']
if flag_no_part_score:
warnings.warn("'{}_score' not found, use 'score' instead.".format(part)) |
def nature2022():
if (sf.backend() == 'tensorflow'):
loss = 'sparse_categorical_crossentropy'
else:
loss = 'CrossEntropy'
return sf.ModelParams(model='xception', tile_px=299, tile_um=302, batch_size=128, epochs=[1], early_stop=True, early_stop_method='accuracy', dropout=0.1, uq=False, hidden_layer_width=1024, optimizer='Adam', learning_rate=0.0001, learning_rate_decay_steps=512, learning_rate_decay=0.98, loss=loss, normalizer='reinhard_fast', include_top=False, hidden_layers=2, pooling='avg', augment='xyrjb') |
def get_xla_device_type(device: 'torch.device') -> Optional[str]:
if is_torch_tpu_available():
return xm.xla_real_devices([device])[0].split(':')[0]
return None |
class Up34(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d((in_channels // 2), (in_channels // 2), kernel_size=2, stride=2)
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(out_channels, out_channels, kernel_size=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x1, x2):
x1 = self.conv1(x1)
x1 = self.up(x1)
x1 = self.conv2(x1)
if (x2 is None):
return x1
x = torch.cat([x2, x1], dim=1)
return x |
_config
def cfg_navigation():
uuid = 'gibson_visualnavigation'
cfg = {}
cfg['learner'] = {'algo': 'ppo', 'clip_param': 0.1, 'entropy_coef': 0.0001, 'eps': 1e-05, 'gamma': 0.99, 'internal_state_size': 512, 'lr': 0.0001, 'num_steps': 512, 'num_mini_batch': 8, 'num_stack': 4, 'max_grad_norm': 0.5, 'ppo_epoch': 8, 'recurrent_policy': False, 'tau': 0.95, 'use_gae': True, 'value_loss_coef': 0.001, 'perception_network': 'AtariNet', 'perception_network_kwargs': {}, 'test': False, 'use_replay': True, 'replay_buffer_size': 10000, 'on_policy_epoch': 8, 'off_policy_epoch': 8}
image_dim = 84
cfg['env'] = {'add_timestep': False, 'env_name': 'Gibson_HuskyVisualNavigateEnv', 'env_specific_kwargs': {'blind': False, 'blank_sensor': True, 'gibson_config': '/root/perception_module/evkit/env/gibson/husky_visual_navigate.yaml', 'start_locations_file': os.path.join(get_model_path('Beechwood'), 'first_floor_poses.csv')}, 'sensors': {'rgb_filled': None, 'features': None, 'taskonomy': None, 'map': None, 'target': None}, 'transform_fn_pre_aggregation': None, 'transform_fn_post_aggregation': None, 'num_processes': 1, 'num_val_processes': 0, 'repeat_count': 0}
cfg['saving'] = {'checkpoint': None, 'checkpoint_configs': False, 'log_dir': LOG_DIR, 'log_interval': 1, 'save_interval': 100, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'vis_interval': 200, 'visdom_server': 'localhost', 'visdom_port': '8097'}
cfg['training'] = {'cuda': True, 'seed': random.randint(0, 1000), 'num_frames': 500000.0, 'resumable': True} |
_model
def vit_base_resnet50d_224(pretrained=False, **kwargs):
backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4])
model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer_hybrid('vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **model_kwargs)
return model |
class ExamplesTests(TestCasePlus):
def test_run_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_glue.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
def test_run_clm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_clm.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
if (torch.cuda.device_count() > 1):
return
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_clm.main()
result = get_results(tmp_dir)
self.assertLess(result['perplexity'], 100)
def test_run_clm_config_overrides(self):
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_clm.py
--model_type gpt2
--tokenizer_name gpt2
--train_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--config_overrides n_embd=10,n_head=2
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
logger = run_clm.logger
with patch.object(sys, 'argv', testargs):
with CaptureLogger(logger) as cl:
run_clm.main()
self.assertIn('"n_embd": 10', cl.out)
self.assertIn('"n_head": 2', cl.out)
def test_run_mlm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--prediction_loss_only
--num_train_epochs=1
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_mlm.main()
result = get_results(tmp_dir)
self.assertLess(result['perplexity'], 42)
def test_run_ner(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
epochs = (7 if (get_gpu_count() > 1) else 2)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
if (torch_device != 'cuda'):
testargs.append('--no_cuda')
with patch.object(sys, 'argv', testargs):
run_ner.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.75)
self.assertLess(result['eval_loss'], 0.5)
def test_run_squad(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_squad.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_f1'], 30)
self.assertGreaterEqual(result['eval_exact'], 30)
def test_run_squad_seq2seq(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_seq2seq_qa.py
--model_name_or_path t5-small
--context_column context
--question_column question
--answer_column answers
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(sys, 'argv', testargs):
run_squad_seq2seq.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_f1'], 30)
self.assertGreaterEqual(result['eval_exact'], 30)
def test_run_swag(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_swag.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=20
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(sys, 'argv', testargs):
run_swag.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
def test_generation(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = ['run_generation.py', '--prompt=Hello', '--length=10', '--seed=42']
if is_cuda_and_apex_available():
testargs.append('--fp16')
(model_type, model_name) = ('--model_type=gpt2', '--model_name_or_path=sshleifer/tiny-gpt2')
with patch.object(sys, 'argv', (testargs + [model_type, model_name])):
result = run_generation.main()
self.assertGreaterEqual(len(result[0]), 10)
def test_run_summarization(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(sys, 'argv', testargs):
run_summarization.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_rouge1'], 10)
self.assertGreaterEqual(result['eval_rouge2'], 2)
self.assertGreaterEqual(result['eval_rougeL'], 7)
self.assertGreaterEqual(result['eval_rougeLsum'], 7)
def test_run_translation(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_translation.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
--source_lang en_XX
--target_lang ro_RO
'''.split()
with patch.object(sys, 'argv', testargs):
run_translation.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_bleu'], 30)
('This is currently broken.')
def test_run_image_classification(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_image_classification.py
--output_dir {tmp_dir}
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_image_classification.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result['eval_accuracy'], 0.8)
def test_run_speech_recognition_ctc(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_speech_recognition_ctc.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_speech_recognition_ctc.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_speech_recognition_seq2seq(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_speech_recognition_seq2seq.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-speech-encoder-decoder
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 4
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_speech_recognition_seq2seq.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_audio_classification(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_audio_classification.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name anton-l/superb_demo
--dataset_config_name ks
--train_split_name test
--eval_split_name test
--audio_column_name audio
--label_column_name label
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--num_train_epochs 10
--max_steps 50
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_audio_classification.main()
result = get_results(tmp_dir)
self.assertLess(result['eval_loss'], result['train_loss'])
def test_run_wav2vec2_pretraining(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_wav2vec2_pretraining_no_trainer.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_names clean
--dataset_split_names validation
--learning_rate 1e-4
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--preprocessing_num_workers 16
--max_train_steps 2
--validation_split_percentage 5
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_wav2vec2_pretraining_no_trainer.main()
model = Wav2Vec2ForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model)
('This is currently broken.')
def test_run_vit_mae_pretraining(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f'''
run_mae.py
--output_dir {tmp_dir}
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16')
with patch.object(sys, 'argv', testargs):
run_mae.main()
model = ViTMAEForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model) |
def MakeJigsawsMultiDecoder(model, decoder, num_images=4, h_dim=(12, 16)):
h = Input((h_dim[0], h_dim[1], 64), name='h_in')
xs = []
for i in range(num_images):
xi = h
xi = AddConv2D(xi, 64, [5, 5], stride=1, dropout_rate=0.0)
xi = AddConv2D(xi, model.encoder_channels, [5, 5], stride=1, dropout_rate=0.0)
xi = decoder(xi)
img_x = Lambda((lambda y: K.expand_dims(y, 1)), name=('img_hypothesis_%d' % i))(xi)
xs.append(img_x)
img_out = Concatenate(axis=1)(xs)
mm = Model(h, img_out, name='multi')
mm.compile(loss='mae', optimizer=model.getOptimizer())
return mm |
class TransformerCore(nn.Module):
def __init__(self, embed, num_layers, latent_dim, hidden_size, heads, dropout=0.0, max_length=100):
super(TransformerCore, self).__init__()
self.embed = embed
self.padding_idx = embed.padding_idx
embed_dim = embed.embedding_dim
self.embed_scale = math.sqrt(embed_dim)
assert (embed_dim == latent_dim)
layers = [TransformerEncoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.pos_enc = PositionalEncoding(latent_dim, self.padding_idx, (max_length + 1))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, src_sents, masks) -> Tuple[(torch.Tensor, torch.Tensor)]:
x = (self.embed_scale * self.embed(src_sents))
x += self.pos_enc(src_sents)
x = F.dropout(x, p=0.2, training=self.training)
key_mask = masks.eq(0)
if (not key_mask.any()):
key_mask = None
for layer in self.layers:
x = layer(x, key_mask)
x *= masks.unsqueeze(2)
batch = src_sents.size(0)
idx = (masks.sum(dim=1).long() - 1)
batch_idx = torch.arange(0, batch).long().to(idx.device)
ctx = x[(batch_idx, idx)]
return (x, ctx) |
def test_load_image_accepts_pil(mocker):
preprocess_mocker = mocker.patch('imagededup.utils.image_utils.preprocess_image')
load_image(PATH_SINGLE_IMAGE)
preprocess_mocker.assert_called_once_with(Image.open(PATH_SINGLE_IMAGE), target_size=None, grayscale=False) |
def assert_filetree(args):
gt_folders = set(os.listdir(args.gt_folder))
pred_folders = set(os.listdir(args.pred_folder))
assert (gt_folders == pred_folders), '{} and {} contains different PDF files!'.format(args.gt_folder, args.pred_folder) |
_schema(TranspileConfigSchema)
class TranspileConfig(BaseModel):
def __init__(self, optimization_level, **kwargs):
self.optimization_level = optimization_level
super().__init__(**kwargs) |
def custom_mlp_args(parser):
custom_mlp_args = parser.add_argument_group('custom mlp args', 'architecture arguments for the custom mlp')
custom_mlp_args.add_argument('--body', type=str, default='', metavar='{num}-{num}-...', help='architecture of the shared latent network, each number representing the number of neurons per layer')
custom_mlp_args.add_argument('--pi', type=str, default='', metavar='{num}-{num}-...', help='architecture of the latent policy network, each number representing the number of neurons per layer')
custom_mlp_args.add_argument('--vf', type=str, default='', metavar='{num}-{num}-...', help='architecture of the latent value network, each number representing the number of neurons per layer')
custom_mlp_args.add_argument('--act_fn', type=str, default='relu', choices=['relu', 'sigmoid', 'tanh'], help='activation function to be applied after each hidden layer') |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args, optim_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args, optim_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'''
distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'''))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.train_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
if training_args.do_train:
column_names = raw_datasets['train'].column_names
features = raw_datasets['train'].features
else:
column_names = raw_datasets['validation'].column_names
features = raw_datasets['validation'].features
if (data_args.text_column_name is not None):
text_column_name = data_args.text_column_name
elif ('tokens' in column_names):
text_column_name = 'tokens'
else:
text_column_name = column_names[0]
if (data_args.label_column_name is not None):
label_column_name = data_args.label_column_name
elif (f'{data_args.task_name}_tags' in column_names):
label_column_name = f'{data_args.task_name}_tags'
else:
label_column_name = column_names[1]
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
labels_are_int = isinstance(features[label_column_name].feature, ClassLabel)
if labels_are_int:
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(raw_datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer_name_or_path = (model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path)
if (config.model_type in {'gpt2', 'roberta'}):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if optim_args.int8:
model = OptimizedModel.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
else:
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at to find the model types that meet this requirement')
if (model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id):
if (list(sorted(model.config.label2id.keys())) == list(sorted(label_list))):
if labels_are_int:
label_to_id = {i: int(model.config.label2id[l]) for (i, l) in enumerate(label_list)}
label_list = [model.config.id2label[i] for i in range(num_labels)]
else:
label_list = [model.config.id2label[i] for i in range(num_labels)]
label_to_id = {l: i for (i, l) in enumerate(label_list)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(model.config.label2id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = {i: l for (i, l) in enumerate(label_list)}
b_to_i_label = []
for (idx, label) in enumerate(label_list):
if (label.startswith('B-') and (label.replace('B-', 'I-') in label_list)):
b_to_i_label.append(label_list.index(label.replace('B-', 'I-')))
else:
b_to_i_label.append(idx)
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, truncation=True, max_length=data_args.max_seq_length, is_split_into_words=True)
labels = []
for (i, label) in enumerate(examples[label_column_name]):
word_ids = tokenized_inputs.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
elif data_args.label_all_tokens:
label_ids.append(b_to_i_label[label_to_id[label[word_idx]]])
else:
label_ids.append((- 100))
previous_word_idx = word_idx
labels.append(label_ids)
tokenized_inputs['labels'] = labels
return tokenized_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
metric_name = optim_args.metric_name
training_args.metric_for_best_model = metric_name
trainer = NLPTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if optim_args.tune:
if (not training_args.do_eval):
raise ValueError('do_eval must be set to True for quantization.')
trainer.save_model(training_args.output_dir)
if (optim_args.quantization_approach != 'PostTrainingDynamic'):
if (not training_args.do_train):
raise ValueError('do_train must be set to True for static and aware training quantization.')
if (optim_args.quantization_approach == 'QuantizationAwareTraining'):
early_stopping_patience = 6
early_stopping_threshold = 0.001
trainer.add_callback(transformers.EarlyStoppingCallback(early_stopping_patience, early_stopping_threshold))
tune_metric = metrics.Metric(name=metric_name, is_relative=optim_args.is_relative, criterion=optim_args.perf_tol)
quantization_config = QuantizationConfig(approach=optim_args.quantization_approach, metrics=[tune_metric], sampling_size=(len(train_dataset) // 20))
model = trainer.quantize(quantization_config)
if optim_args.benchmark_only:
model_path = model_args.model_name_or_path
if ('TokenClassification' not in config.architectures[0]):
model_path = model
trainer.benchmark(model_path, batch_size=training_args.per_device_eval_batch_size, cores_per_instance=optim_args.cores_per_instance, num_of_instance=optim_args.num_of_instance)
if (optim_args.benchmark or optim_args.accuracy_only):
results = trainer.evaluate()
logger.info('metrics keys: {}'.format(results.keys()))
bert_task_acc_keys = ['eval_f1', 'eval_accuracy', 'eval_matthews_correlation', 'eval_pearson', 'eval_mcc', 'eval_spearmanr']
ret = False
for key in bert_task_acc_keys:
if (key in results.keys()):
ret = True
throughput = results.get('eval_samples_per_second')
print('Batch size = {}'.format(training_args.per_device_eval_batch_size))
print('Finally Eval {} Accuracy: {}'.format(key, results[key]))
print('Latency: {:.3f} ms'.format((1000 / throughput)))
print('Throughput: {} samples/sec'.format(throughput))
break
assert ret, 'No metric returned, Please check inference metric!' |
class DGEMO(MOBO):
config = {'surrogate': 'gp', 'acquisition': 'identity', 'solver': 'discovery', 'selection': 'dgemo'} |
def load_D_model(args, model):
logging.info('')
if args.D_resume:
if os.path.isfile(args.D_ckpt_path):
checkpoint = torch.load(args.D_ckpt_path)
args.last_step = (checkpoint['step'] if ('step' in checkpoint) else None)
model.load_state_dict(checkpoint['state_dict'])
logging.info("=> loaded checkpoint '{}' \n (step:{} \n )".format(args.D_ckpt_path, args.last_step))
else:
logging.info("=> checkpoint '{}' does not exit".format(args.D_ckpt_path))
else:
logging.info('=> train d from scratch')
logging.info('') |
def parse_annotation(anno_file):
with open(anno_file, 'r') as f:
annotations = json.load(f)['annotations']
q_2_anno = dict([(a['question_id'], a) for a in annotations])
return q_2_anno |
def download_by_url():
import argparse
parser = argparse.ArgumentParser(description='Use this to download pretrained models. This script is intended to download models via url only. If you want to download one of our pretrained models, please use nnUNet_download_pretrained_model. CAREFUL: This script will overwrite existing models (if they share the same trainer class and plans as the pretrained model.')
parser.add_argument('url', type=str, help='URL of the pretrained model')
args = parser.parse_args()
url = args.url
download_and_install_from_url(url) |
def send_graph_to_cpu(g):
labels = g.node_attr_schemes()
for l in labels.keys():
g.ndata[l] = g.ndata.pop(l).cpu()
labels = g.edge_attr_schemes()
for l in labels.keys():
g.edata[l] = g.edata.pop(l).cpu()
return g |
class TestPruningPatterns(unittest.TestCase):
model = torchvision.models.resnet18()
def test_pruning_pattern(self):
local_configs = [{'op_names': ['layer1.*'], 'target_sparsity': 0.5, 'pattern': '5:8', 'pruning_type': 'magnitude'}, {'op_names': ['layer2.*'], 'pattern': '1xchannel', 'pruning_scope': 'global'}, {'start_step': 2, 'end_step': 20, 'op_names': ['layer3.*'], 'target_sparsity': 0.666666, 'pattern': '4x2', 'pruning_type': 'snip_progressive', 'pruning_frequency': 5}]
config = WeightPruningConfig(local_configs, target_sparsity=0.8, sparsity_decay_type='cos', excluded_op_names=['downsample.*'], pruning_scope='local', min_sparsity_ratio_per_op=0.1, start_step=1, end_step=10)
compression_manager = prepare_compression(model=self.model, confs=config)
compression_manager.callbacks.on_train_begin()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(self.model.parameters(), lr=0.0001)
datasets = Datasets('pytorch')
dummy_dataset = datasets['dummy'](shape=(10, 3, 224, 224), low=0.0, high=1.0, label=True)
dummy_dataloader = PyTorchDataLoader(dummy_dataset)
compression_manager.callbacks.on_train_begin()
for epoch in range(5):
self.model.train()
compression_manager.callbacks.on_epoch_begin(epoch)
local_step = 0
for (image, target) in dummy_dataloader:
compression_manager.callbacks.on_step_begin(local_step)
output = self.model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
compression_manager.callbacks.on_before_optimizer_step()
optimizer.step()
compression_manager.callbacks.on_after_optimizer_step()
compression_manager.callbacks.on_step_end()
local_step += 1
compression_manager.callbacks.on_epoch_end()
compression_manager.callbacks.on_train_end()
compression_manager.callbacks.on_before_eval()
compression_manager.callbacks.on_after_eval() |
class AccumMetaLoader(object):
def __init__(self, loaders, distributed=False):
assert isinstance(loaders, dict)
self.name2loader = {}
self.name2iter = {}
self.sampling_pools = []
for (idx, (n, l)) in enumerate(loaders.items()):
if isinstance(l, tuple):
(l, r) = l
elif isinstance(l, DataLoader):
r = 1
else:
raise ValueError()
self.name2loader[n] = l
self.name2iter[n] = iter(l)
self.sampling_pools.extend(([n] * r))
self.distributed = distributed
self.step = 0
self.epoch = 0
self.count = 0
self.loader_num = len(loaders)
self.names = list(self.name2iter.keys())
def __iter__(self):
while True:
idx = (self.count % self.loader_num)
task = self.names[idx]
self.count += 1
try:
batch = next(self.name2iter[task])
except StopIteration:
self.epoch = (self.epoch + 1)
if isinstance(self.name2loader[task].sampler, DistributedSampler):
self.name2loader[task].sampler.set_epoch(self.epoch)
else:
pass
iter_ = iter(self.name2loader[task])
batch = next(iter_)
self.name2iter[task] = iter_
(yield (task, batch)) |
class InvLrUpdaterHook(LrUpdaterHook):
def __init__(self, gamma, power=1.0, **kwargs):
self.gamma = gamma
self.power = power
super(InvLrUpdaterHook, self).__init__(**kwargs)
def get_lr(self, trainer, base_lr):
progress = (trainer.epoch if self.by_epoch else trainer.iter)
return (base_lr * ((1 + (self.gamma * progress)) ** (- self.power))) |
def main():
(train_loader, test_loader, criterion, model, optimizer, scheduler, starting_epoch, logfilename, model_path, device, writer) = prologue(args)
for epoch in range(starting_epoch, args.epochs):
before = time.time()
(train_loss, train_acc) = train(train_loader, model, criterion, optimizer, epoch, args.noise_sd, device, writer)
(test_loss, test_acc) = test(test_loader, model, criterion, epoch, args.noise_sd, device, writer, args.print_freq)
after = time.time()
log(logfilename, '{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(epoch, (after - before), scheduler.get_lr()[0], train_loss, train_acc, test_loss, test_acc))
scheduler.step(epoch)
torch.save({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, model_path) |
def compress(model, ratio=0.5):
if isinstance(model.estimators_, CompressedEstimators):
raise Exception('The model is already compressed.')
model.estimators_ = CompressedEstimators(model, ratio) |
def unsplit_query(query, qrepr, vocab_inv):
PAD_WORD_INDEX = 0
if (qrepr == 'word'):
return ' '.join([vocab_inv[int(w)] for w in query if (w != PAD_WORD_INDEX)])
elif (qrepr == 'char'):
return ''.join([vocab_inv[int(w)] for w in query if (w != PAD_WORD_INDEX)])
elif qrepr.endswith('gram'):
query_str = ''
for w in query:
if (w != PAD_WORD_INDEX):
if (len(query_str) == 0):
query_str = vocab_inv[int(w)]
else:
query_str += vocab_inv[int(w)][(- 1)]
return query_str[1:(- 1)]
else:
raise Exception(('Unrecognized representation %s!' % qrepr)) |
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.model_cache_dir)
is_world_process_zero = ((training_args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))
processor = KGProcessor(data_args, tokenizer, is_world_process_zero)
(train_data, dev_data, test_data) = processor.get_dataset(training_args)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.model_cache_dir)
if (not hasattr(config, 'real_vocab_size')):
config.real_vocab_size = config.vocab_size
if (model_args.pos_weight is not None):
model_args.pos_weight = torch.tensor([model_args.pos_weight]).to(training_args.device)
if model_args.pooling_model:
print('using pooling model!')
if tokenizer.__class__.__name__.startswith('Roberta'):
tokenizer_cls = RobertaPoolingForTripletPrediction
elif tokenizer.__class__.__name__.startswith('Bert'):
tokenizer_cls = BertPoolingForTripletPrediction
else:
raise NotImplementedError()
model = tokenizer_cls.from_pretrained(model_args.model_name_or_path, margin=data_args.margin, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.model_cache_dir, pos_weight=model_args.pos_weight, text_loss_weight=model_args.text_loss_weight)
data_collator = PoolingCollator(tokenizer)
else:
raise NotImplementedError()
trainer = KGCTrainer(model=model, args=training_args, data_collator=data_collator, train_dataset=train_data, eval_dataset=dev_data, prediction_loss_only=True)
if data_args.group_shuffle:
print('using group shuffle')
trainer.use_group_shuffle(data_args.num_neg)
if training_args.do_train:
model_path = (model_args.model_name_or_path if ((model_args.model_name_or_path is not None) and os.path.isdir(model_args.model_name_or_path)) else None)
trainer.train(model_path=model_path)
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
if training_args.do_predict:
label_map = {'-1': 0, '1': 1}
trainer.model.set_predict_mode()
trainer.prediction_loss_only = False
trainer.data_collator.set_predict_mode()
(dev_triples, dev_labels) = processor.get_dev_triples(return_label=True)
dev_labels = np.array([label_map[l] for l in dev_labels], dtype=int)
(_, tmp_features) = processor._create_examples_and_features(dev_triples)
all_input_ids = torch.tensor([f.input_ids for f in tmp_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in tmp_features], dtype=torch.long)
eval_data = DictDataset(input_ids=all_input_ids, pos_indicator=all_pos_indicator)
trainer.data_collator.predict_mask_part = 0
preds = trainer.predict(eval_data).predictions
mean_dev = np.mean(preds)
print('mean_dev: ', mean_dev)
a = (- 5)
b = 5
max_acc = 0
for i in range(1000):
m = ((((b - a) / 1000) * i) + a)
tmp_preds = (preds - m)
acc = np.mean(((tmp_preds > 0).astype(int) == dev_labels))
if (acc > max_acc):
max_acc = acc
max_m = m
print('max acc: ', max_acc)
print('max m: ', max_m)
mean_dev = max_m
(test_triples, test_labels) = processor.get_test_triples(return_label=True)
test_labels = np.array([label_map[l] for l in test_labels], dtype=int)
(_, tmp_features) = processor._create_examples_and_features(test_triples)
all_input_ids = torch.tensor([f.input_ids for f in tmp_features], dtype=torch.long)
all_pos_indicator = torch.tensor([f.pos_indicator for f in tmp_features], dtype=torch.long)
eval_data = DictDataset(input_ids=all_input_ids, pos_indicator=all_pos_indicator)
preds = trainer.predict(eval_data).predictions
preds = (preds - mean_dev)
acc = np.mean(((preds > 0).astype(int) == test_labels))
print('test acc: ', acc) |
def main():
args = get_arguments()
for (arg_name, arg_var) in args.__dict__.items():
print(f'{arg_name:<16} : {arg_var}')
seq_lens = [(2 ** i) for i in range(10, 18)]
attn_method = args.attn_method
mode = args.mode
(batch_size, head_size, dim) = (1, 32, 64)
print(f'mode: {mode}, attn_method: {attn_method}, batch_size: {batch_size}, head_size: {head_size}, dim: {dim}')
causal = (not args.no_causal)
for seq_len in seq_lens:
if (attn_method == 'flash'):
ms = run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode=args.mode)
elif (attn_method == 'flash-cuda'):
ms = run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode=args.mode, impl='cuda')
elif (attn_method == 'hyper'):
ms = run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode=args.mode)
elif (attn_method == 'hyper-cuda'):
ms = run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode=args.mode, impl='cuda')
else:
raise NotImplementedError
print(f'[{mode:<8}], {attn_method}, seq_len: {seq_len:<8}, causal: {causal}, ms: {ms[0]:.5f} ({ms[1]:.5f}, {ms[2]:.5f}) | ') |
def _get_inputs(input_queue, num_classes):
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
image = read_data[fields.InputDataFields.image]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32)
classes_gt -= label_id_offset
classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt, depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
return (image, location_gt, classes_gt, masks_gt)
return zip(*map(extract_images_and_targets, read_data_list)) |
_model
def skresnet34(pretrained=False, **kwargs):
sk_kwargs = dict(min_attn_channels=16, attn_reduction=8, split_input=True)
model_args = dict(block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last_bn=False, **kwargs)
return _create_skresnet('skresnet34', pretrained, **model_args) |
def _find_roctracer_config(rocm_install_path):
def roctracer_version_numbers(path):
possible_version_files = ['include/roctracer/roctracer.h', 'roctracer/include/roctracer.h']
version_file = None
for f in possible_version_files:
version_file_path = os.path.join(path, f)
if os.path.exists(version_file_path):
version_file = version_file_path
break
if (not version_file):
raise ConfigError('roctracer version file not found in {}'.format(possible_version_files))
major = _get_header_version(version_file, 'ROCTRACER_VERSION_MAJOR')
minor = _get_header_version(version_file, 'ROCTRACER_VERSION_MINOR')
patch = 0
return (major, minor, patch)
(major, minor, patch) = roctracer_version_numbers(rocm_install_path)
roctracer_config = {'roctracer_version_number': _get_composite_version_number(major, minor, patch)}
return roctracer_config |
class MazeGenerator():
def __init__(self, params) -> None:
self.params = params
def has_el_prev_row(self, grid, row_idx, cell_idx):
return ((row_idx > 0) and (grid[(row_idx - 1)][cell_idx] == 1))
def has_el_next_row(self, grid, row_idx, cell_idx):
return ((row_idx < (len(grid) - 1)) and (grid[(row_idx + 1)][cell_idx] == 1))
def has_el_prev_col(self, grid, row_idx, cell_idx):
return ((cell_idx > 0) and (grid[row_idx][(cell_idx - 1)] == 1))
def has_el_next_col(self, grid, row_idx, cell_idx):
return ((cell_idx < (len(grid[row_idx]) - 1)) and grid[row_idx][(cell_idx + 1)])
def generate(self):
cols = int(self.params['cols'])
rows = int(self.params['rows'])
element_size = float(self.params['element_size'])
element_depth = float(self.params['element_depth'])
wall_thickness = float(self.params['wall_thickness'])
difficulty = float(self.params['difficulty'])
connector_strict = bool(self.params['connector_strict'])
connector_probability = float(self.params['connector_probability'])
connector_height = float(self.params['connector_height'])
xy_offset = (wall_thickness / 2)
wall_size = (element_size + wall_thickness)
m = Maze()
m.generator = DungeonRooms(cols, rows)
m.solver = BacktrackingSolver()
m.generate_monte_carlo(100, 10, difficulty)
urdf = UrdfWallGenerator(self.params.get('color', [0.5, 0.5, 0.5, 1]))
for (row_idx, row) in enumerate(m.grid):
for (cell_idx, cell) in enumerate(row):
curr_x = (xy_offset + (cell_idx * element_size))
curr_y = (xy_offset + (row_idx * element_size))
if (cell == 0):
if (random.random() < connector_probability):
has_prev_row = self.has_el_prev_row(m.grid, row_idx, cell_idx)
has_next_row = self.has_el_next_row(m.grid, row_idx, cell_idx)
has_prev_col = self.has_el_prev_col(m.grid, row_idx, cell_idx)
has_next_col = self.has_el_next_col(m.grid, row_idx, cell_idx)
if ((has_prev_row and has_next_row) or ((connector_strict == False) and (has_prev_row or has_next_row))):
urdf.add_wall(wall_thickness, (element_size * 2), connector_height, curr_x, curr_y, (connector_height / 2))
if ((has_prev_col and has_next_col) or ((connector_strict == False) and (has_prev_col or has_next_col))):
urdf.add_wall((element_size * 2), wall_thickness, connector_height, curr_x, curr_y, (connector_height / 2))
continue
if self.has_el_next_col(m.grid, row_idx, cell_idx):
urdf.add_wall(wall_size, wall_thickness, element_depth, (curr_x + (element_size / 2)), curr_y, (element_depth / 2))
if self.has_el_next_row(m.grid, row_idx, cell_idx):
urdf.add_wall(wall_thickness, wall_size, element_depth, curr_x, (curr_y + (element_size / 2)), (element_depth / 2))
self.solution = m.solutions[0]
return urdf.get_urdf()
def generate_to_file(self, output_path):
with open(output_path, 'w') as outfile:
outfile.write(self.generate()) |
def build_low_latency_conv(input_frames, input_bins, n_classes=12, dropout=0.5):
from keras.layers import Conv2D, Dense, Dropout, Flatten
input_shape = (input_frames, input_bins, 1)
model = keras.Sequential([Conv2D(186, (input_frames, 8), strides=(1, 1), padding='valid', activation='relu', use_bias=True, input_shape=input_shape), Dropout(dropout), Flatten(), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(128, activation=None, use_bias=True), Dropout(dropout), Dense(n_classes, activation='softmax', use_bias=True)])
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model |
class FB15KLoader(BaseLoader):
def __init__(self, dataset_path, download=False):
super().__init__(dataset_path, download, raw_data_path='FB15K/raw_data', processed_data_path='FB15K/processed_data', train_name='freebase_mtr100_mte100-train.txt', valid_name='freebase_mtr100_mte100-valid.txt', test_name='freebase_mtr100_mte100-test.txt', data_name='FB15K')
def download_action(self):
self.downloader.FB15K() |
class Normal(Dist):
def __init__(self, device='cpu'):
super().__init__()
self.device = device
self.c = ((2 * np.pi) * torch.ones(1).to(self.device))
self._dist = dist.normal.Normal(torch.zeros(1).to(self.device), torch.ones(1).to(self.device))
self.name = 'gauss'
def sample(self, mu, v):
eps = self._dist.sample(mu.size()).squeeze()
scaled = eps.mul(v.sqrt())
return scaled.add(mu)
def log_pdf(self, x, mu, v, reduce=True, param_shape=None):
if (param_shape is not None):
(mu, v) = (mu.view(param_shape), v.view(param_shape))
lpdf = ((- 0.5) * ((torch.log(self.c) + v.log()) + (x - mu).pow(2).div(v)))
if reduce:
return lpdf.sum(dim=(- 1))
else:
return lpdf
def log_pdf_full(self, x, mu, v):
(batch_size, d) = mu.size()
cov = torch.einsum('bik,bjk->bij', v, v)
assert (cov.size() == (batch_size, d, d))
inv_cov = torch.inverse(cov)
c = (d * torch.log(self.c))
(_, logabsdets) = self._batch_slogdet(cov)
xmu = (x - mu)
return ((- 0.5) * ((c + logabsdets) + torch.einsum('bi,bij,bj->b', [xmu, inv_cov, xmu])))
def _batch_slogdet(self, cov_batch: torch.Tensor):
batch_size = cov_batch.size(0)
signs = torch.empty(batch_size, requires_grad=False).to(self.device)
logabsdets = torch.empty(batch_size, requires_grad=False).to(self.device)
for (i, cov) in enumerate(cov_batch):
(signs[i], logabsdets[i]) = torch.slogdet(cov)
return (signs, logabsdets) |
class DTLZ5(DTLZ1):
def __init__(self, number_of_variables: int=12, number_of_objectives=3):
super(DTLZ5, self).__init__(number_of_variables, number_of_objectives)
def evaluate(self, solution: FloatSolution) -> FloatSolution:
k = ((self.number_of_variables() - self.number_of_objectives()) + 1)
g = sum([((x - 0.5) ** 2) for x in solution.variables[(self.number_of_variables() - k):]])
t = (pi / (4.0 * (1.0 + g)))
theta = ([0.0] * (self.number_of_objectives() - 1))
theta[0] = ((solution.variables[0] * pi) / 2.0)
theta[1:] = [(t * (1.0 + ((2.0 * g) * solution.variables[i]))) for i in range(1, (self.number_of_objectives() - 1))]
f = [(1.0 + g) for _ in range(self.number_of_objectives())]
for i in range(self.number_of_objectives()):
for j in range((self.number_of_objectives() - (i + 1))):
f[i] *= cos(theta[j])
if (i != 0):
aux = (self.number_of_objectives() - (i + 1))
f[i] *= sin(theta[aux])
solution.objectives = [f[x] for x in range(self.number_of_objectives())]
return solution
def name(self):
return 'DTLZ5' |
def get_fname(line):
p = os.path.basename(line.split('\t')[0])
p = os.path.splitext(p)[0]
return p |
class DistillKL(nn.Module):
def __init__(self, args):
super(DistillKL, self).__init__()
self.T = args.temperature
def forward(self, y_s, y_t):
p_s = F.log_softmax((y_s / self.T), dim=1)
p_t = F.softmax((y_t / self.T), dim=1)
loss = ((F.kl_div(p_s, p_t.detach(), reduction='sum') * (self.T ** 2)) / y_s.shape[0])
return loss |
_module()
class ATSS(SingleStageDetector):
'Implementation of `ATSS <
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) |
def extract_total_degree(result):
(poly, poly_horner, p_x_expected, p_x, p_x_horner) = result
return poly.total_degree |
def set_requires_grad(model, requires_grad: bool) -> None:
for param in model.parameters():
param.requires_grad = requires_grad |
def test_encode_image_2_dim_array_encoded(cnn):
arr_inp = np.array(Image.open(TEST_IMAGE_GRAY))
encoding = cnn.encode_image(image_array=arr_inp)
assert (encoding.shape == (1, 576)) |
def main():
model = create_network().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
EvalAttack = config.create_evaluation_attack_method(device)
now_train_time = 0
for epoch in range(1, (args.epochs + 1)):
adjust_learning_rate(optimizer, epoch)
s_time = time()
descrip_str = 'Training epoch: {}/{}'.format(epoch, args.epochs)
train(args, model, device, train_loader, optimizer, epoch, descrip_str)
now_train_time += (time() - s_time)
(acc, advacc) = eval_one_epoch(model, test_loader, device, EvalAttack)
if ((epoch % args.save_freq) == 0):
torch.save(model.state_dict(), os.path.join(config.model_dir, 'model-wideres-epoch{}.pt'.format(epoch))) |
(scope='module')
def rleaky_hidden_instance():
return snn.RLeaky(beta=0.5, V=0.5, all_to_all=False, init_hidden=True) |
def taskonomy_features_transform_collated(task_path, dtype=np.float32):
net = TaskonomyEncoder().cuda()
net.eval()
checkpoint = torch.load(task_path)
net.load_state_dict(checkpoint['state_dict'])
def encode(x):
with torch.no_grad():
x = torch.Tensor(x).cuda()
if isinstance(x, torch.Tensor):
x = torch.cuda.FloatTensor(x.cuda())
else:
x = torch.cuda.FloatTensor(x).cuda()
x = (x.permute(0, 3, 1, 2) / 255.0)
x = ((2.0 * x) - 1.0)
return net(x)
def _taskonomy_features_transform_thunk(obs_space):
pipeline = (lambda x: encode(x).cpu())
return (pipeline, spaces.Box((- 1), 1, (8, 16, 16), dtype))
return _taskonomy_features_transform_thunk |
def test_fsm_logging():
env = FiniteStateMachineEnv(num_steps=2, network=Network(), initial_stage=0, stages=[FSMStage(0, [], None, [1]), FSMStage(1, [], None, [0])])
episode = MockEpisode()
base_env = MockBaseEnv(env)
callback = RLlibMetricLogger({'stage_0_metric': MockMetric(0, 'sum', fsm_stages=[0]), 'stage_1_metric': MockMetric(1, 'sum', fsm_stages=[1])})()
callback.on_episode_start(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0], 'stage_1_metric': [NotRecorded()]})
base_env.step()
callback.on_episode_step(worker=None, base_env=base_env, episode=episode, env_index=0)
assert (episode.user_data == {'stage_0_metric': [0, NotRecorded()], 'stage_1_metric': [NotRecorded(), 1]})
callback.on_episode_end(worker=None, base_env=base_env, policies=None, episode=episode, env_index=0)
assert (episode.custom_metrics == {'stage_0_metric': 0, 'stage_1_metric': 1}) |
def AutogradCrypTensor(tensor, requires_grad=True):
raise DeprecationWarning('AutogradCrypTensor is deprecated. Please set the requires_grad attribute on the CrypTensor instead.')
if torch.is_tensor(tensor):
tensor = crypten.cryptensor(tensor)
tensor.requires_grad = requires_grad
return tensor |
def train(model, criterion, optimizer, scheduler, train_loader):
model.train()
acc_losses = {}
for (i, (x, _)) in enumerate(train_loader):
optimizer.zero_grad()
x = x.to(args.device)
output = model(x)
(loss, diagnostics) = criterion(x, output, model)
loss.backward(retain_graph=True)
optimizer.step()
scheduler.step()
acc_losses = (Counter(acc_losses) + Counter(diagnostics))
log_interval((i + 1), len(train_loader), acc_losses)
avg_losses = {k: (acc_losses[k] / len(train_loader)) for k in acc_losses}
return avg_losses |
def snapshot(dir_path, run_name, is_best, state):
snapshot_file = os.path.join(dir_path, (run_name + '-model_best.pth'))
if is_best:
torch.save(state, snapshot_file)
logger.info('Snapshot saved to {}\n'.format(snapshot_file)) |
class HDF5OutputParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _HDF5OUTPUTPARAMETER |
def efficientnet_b3b(in_size=(300, 300), **kwargs):
return get_efficientnet(version='b3', in_size=in_size, tf_mode=True, bn_eps=0.001, model_name='efficientnet_b3b', **kwargs) |
def PreResNet110(num_class=10, block=None, attention_module=None):
if (block == PreBasicBlock):
n_blocks = [18, 18, 18]
elif (block == PreBottleNect):
n_blocks = [12, 12, 12]
return PreResNetWrapper(num_blocks=n_blocks, num_class=num_class, block=block, attention_module=attention_module) |
class _NCEGenerator(object):
def __init__(self, dataset, batch_size, context_size, num_noise_words, state):
self.dataset = dataset
self.batch_size = batch_size
self.context_size = context_size
self.num_noise_words = num_noise_words
self._vocabulary = self.dataset.fields['text'].vocab
self._sample_noise = None
self._init_noise_distribution()
self._state = state
def _init_noise_distribution(self):
probs = np.zeros((len(self._vocabulary) - 1))
for (word, freq) in self._vocabulary.freqs.items():
probs[self._word_to_index(word)] = freq
probs = np.power(probs, 0.75)
probs /= np.sum(probs)
self._sample_noise = (lambda : choice(probs.shape[0], self.num_noise_words, p=probs).tolist())
def __len__(self):
num_examples = sum((self._num_examples_in_doc(d) for d in self.dataset))
return ceil((num_examples / self.batch_size))
def vocabulary_size(self):
return (len(self._vocabulary) - 1)
def next(self):
(prev_doc_id, prev_in_doc_pos) = self._state.update_state(self.dataset, self.batch_size, self.context_size, self._num_examples_in_doc)
batch = _NCEBatch(self.context_size)
while (len(batch) < self.batch_size):
if (prev_doc_id == len(self.dataset)):
batch.torch_()
return batch
if (prev_in_doc_pos <= ((len(self.dataset[prev_doc_id].text) - 1) - self.context_size)):
self._add_example_to_batch(prev_doc_id, prev_in_doc_pos, batch)
prev_in_doc_pos += 1
else:
prev_doc_id += 1
prev_in_doc_pos = self.context_size
batch.torch_()
return batch
def _num_examples_in_doc(self, doc, in_doc_pos=None):
if (in_doc_pos is not None):
if ((len(doc.text) - in_doc_pos) >= (self.context_size + 1)):
return ((len(doc.text) - in_doc_pos) - self.context_size)
return 0
if (len(doc.text) >= ((2 * self.context_size) + 1)):
return (len(doc.text) - (2 * self.context_size))
return 0
def _add_example_to_batch(self, doc_id, in_doc_pos, batch):
doc = self.dataset[doc_id].text
batch.doc_ids.append(doc_id)
current_noise = self._sample_noise()
current_noise.insert(0, self._word_to_index(doc[in_doc_pos]))
batch.target_noise_ids.append(current_noise)
if (self.context_size == 0):
return
current_context = []
context_indices = ((in_doc_pos + diff) for diff in range((- self.context_size), (self.context_size + 1)) if (diff != 0))
for i in context_indices:
context_id = self._word_to_index(doc[i])
current_context.append(context_id)
batch.context_ids.append(current_context)
def _word_to_index(self, word):
return (self._vocabulary.stoi[word] - 1) |
class ZeroPadding3D(ZooKerasLayer):
def __init__(self, padding=(1, 1, 1), dim_ordering='th', input_shape=None, **kwargs):
super(ZeroPadding3D, self).__init__(None, padding, dim_ordering, (list(input_shape) if input_shape else None), **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.