code stringlengths 101 5.91M |
|---|
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = (width ** (- 0.5))
self.class_embedding = nn.Parameter((scale * torch.randn(width)))
self.positional_embedding = nn.Parameter((scale * torch.randn((((input_resolution // patch_size) ** 2) + 1), width)))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter((scale * torch.randn(width, output_dim)))
def forward(self, x: torch.Tensor):
x = self.conv1(x)
x = x.reshape(x.shape[0], x.shape[1], (- 1))
x = x.permute(0, 2, 1)
x = torch.cat([(self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[(- 1)], dtype=x.dtype, device=x.device)), x], dim=1)
x = (x + self.positional_embedding.to(x.dtype))
x = self.ln_pre(x)
x = x.permute(1, 0, 2)
x = self.transformer(x)
x = x.permute(1, 0, 2)
x = self.ln_post(x)
return x |
class Sentence():
def __init__(self, tokens, default_date):
self.tokens = tuple(tokens)
self.vector = (sum([t.vector for t in tokens]) / len(tokens))
self.date = default_date
self.time_span = 'd'
for t in tokens:
if t.date:
self.date = t.date
self.time_span = t.time_span
break
def __eq__(self, other):
if isinstance(other, self.__class__):
return ((self.tokens == other.tokens) and numpy.array_equal(self.vector, other.vector) and (self.date == other.date) and (self.time_span == other.time_span))
else:
return False
def __ne__(self, other):
return (not (self == other))
def __hash__(self):
return hash((self.tokens, self.date, self.time_span))
def __str__(self):
return ' '.join([str(t) for t in self.tokens]).strip()
def __iter__(self):
return iter(self.tokens) |
def translate(pat):
(i, n) = (0, len(pat))
res = ''
while (i < n):
c = pat[i]
i = (i + 1)
if (c == '*'):
res = (res + '(.*)')
elif (c == '?'):
res = (res + '(.)')
elif (c == '['):
j = i
if ((j < n) and (pat[j] == '!')):
j = (j + 1)
if ((j < n) and (pat[j] == ']')):
j = (j + 1)
while ((j < n) and (pat[j] != ']')):
j = (j + 1)
if (j >= n):
res = (res + '\\[')
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = (j + 1)
if (stuff[0] == '!'):
stuff = ('^' + stuff[1:])
elif (stuff[0] == '^'):
stuff = ('\\' + stuff)
res = ('%s([%s])' % (res, stuff))
else:
res = (res + re.escape(c))
return (('(?ms)' + res) + '\\Z') |
def GetPageRankMP_PDirNet(Graph, PRankH, C=0.85, Eps=0.0001, MaxIter=100):
return _snap.GetPageRankMP_PDirNet(Graph, PRankH, C, Eps, MaxIter) |
def load_sparse_csr(filename):
loader = np.load(filename, allow_pickle=True)
matrix = sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']), shape=loader['shape'])
return (matrix, (loader['metadata'].item(0) if ('metadata' in loader) else None)) |
(**njit_dict_no_parallel)
def move_r_packet(r_packet, distance, time_explosion, numba_estimator):
doppler_factor = get_doppler_factor(r_packet.r, r_packet.mu, time_explosion)
r = r_packet.r
if (distance > 0.0):
new_r = np.sqrt((((r * r) + (distance * distance)) + (((2.0 * r) * distance) * r_packet.mu)))
r_packet.mu = (((r_packet.mu * r) + distance) / new_r)
r_packet.r = new_r
comov_nu = (r_packet.nu * doppler_factor)
comov_energy = (r_packet.energy * doppler_factor)
if nc.ENABLE_FULL_RELATIVITY:
distance *= doppler_factor
set_estimators(r_packet, distance, numba_estimator, comov_nu, comov_energy) |
def require_pyctcdecode(test_case):
if (not is_pyctcdecode_available()):
return unittest.skip('test requires pyctcdecode')(test_case)
else:
return test_case |
def load_options(args, options):
varargs = vars(args)
name = []
for o in options:
with open(o) as f:
new_opts = yaml.safe_load(f)
for (k, v) in new_opts.items():
if (k not in varargs):
raise ValueError(f'Option {k}={v} doesnt exist!')
varargs.update(new_opts)
name.append(o.split('/')[(- 1)].replace('.yaml', ''))
return '_'.join(name) |
def test_log_softmax_translation(log_softmax_x, log_softmax_expected):
x = (log_softmax_x + 100)
expected = log_softmax_expected
assert_allclose(sc.log_softmax(x), expected, rtol=1e-13) |
def get_mol(smiles_or_mol):
if isinstance(smiles_or_mol, str):
if (len(smiles_or_mol) == 0):
return None
mol = Chem.MolFromSmiles(smiles_or_mol)
if (mol is None):
return None
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return mol
return smiles_or_mol |
def split_text(text, n=100, character=' '):
text = text.split(character)
return [character.join(text[i:(i + n)]).strip() for i in range(0, len(text), n)] |
def merge_features_into_file(directory, postfix):
feat_subf = os.listdir(directory)
feat_subf = list(filter((lambda x: os.path.isdir(os.path.join(directory, x))), feat_subf))
users = os.listdir(os.path.join(directory, feat_subf[0]))
users = list(filter((lambda x: os.path.isdir(os.path.join(directory, feat_subf[0], x))), users))
for fsf in feat_subf:
for user in users:
merge_all_csv_into_one(os.path.join(directory, fsf, user))
for fsf in feat_subf:
merge_all_csv_into_one(os.path.join(directory, fsf), postfix=postfix)
print(users, feat_subf) |
def skip_exception_type(exc_type):
try:
(yield)
except exc_type as e:
raise unittest.SkipTest(f'not implemented: {e}') from e |
class EpilogueThreadMap():
def __init__(self, threads, elements_per_access, element_size_bits, shape, iterations, delta, count):
self.threads = threads
self.elements_per_access = elements_per_access
self.element_size_bits = element_size_bits
self.shape = shape
self.iterations = iterations
self.delta = delta
self.count = count
pass |
def min_linear_barrier(x: sf.Scalar, x_nominal: sf.Scalar, error_nominal: sf.Scalar, dist_zero_to_nominal: sf.Scalar) -> sf.Scalar:
return min_power_barrier(x=x, x_nominal=x_nominal, error_nominal=error_nominal, dist_zero_to_nominal=dist_zero_to_nominal, power=1) |
_metric
def fid50k_trans(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid_trans(opts, max_real=None, num_gen=10000)
return dict(fid50k_trans=fid) |
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for (category_id, name) in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert (file_name not in image_set)
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco |
class SimpleCategoricalLSTMModel(SimpleLSTMModel):
def __init__(self, output_dim, hidden_dim, name, *args, **kwargs):
super().__init__(output_dim, hidden_dim, name)
def network_output_spec(self):
return ['all_output', 'step_output', 'step_hidden', 'step_cell', 'init_hidden', 'init_cell', 'dist']
def _build(self, obs_input, step_obs_input, step_hidden, step_cell, name=None):
(outputs, output, step_hidden, step_cell, hidden_init_var, cell_init_var) = super()._build(obs_input, step_obs_input, step_hidden, step_cell, name)
dist = tfp.distributions.OneHotCategorical(outputs)
return (outputs, output, step_hidden, step_cell, hidden_init_var, cell_init_var, dist) |
def model_fn_builder(bert_config, init_checkpoint, use_tpu, use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
input_ids = features['input_ids']
input_mask = features['input_mask']
input_type_ids = features['input_type_ids']
model = modeling.BertModel(config=bert_config, is_training=False, input_ids=input_ids, input_mask=input_mask, token_type_ids=input_type_ids, use_one_hot_embeddings=use_one_hot_embeddings)
if (mode != tf.estimator.ModeKeys.PREDICT):
raise ValueError(('Only PREDICT modes are supported: %s' % mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map, _) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
all_layers = model.get_all_encoder_layers()
predictions = {}
for feature in ['embedding_tensor_name', 'service_id', 'intent_or_slot_id', 'value_id']:
predictions[feature] = features[feature]
predictions['final_layer'] = all_layers[(- 1)]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn |
class ListObjsCommand(BaseUserCommand):
def tabulate(self, rows: List[List[Union[(str, int)]]], headers: List[str]) -> str:
col_widths = [max((len(str(x)) for x in col)) for col in zip(*rows, headers)]
row_format = ('{{:{}}} ' * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*[('-' * w) for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return '\n'.join(lines)
def run(self):
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit(1)
try:
objs = self._api.list_objs(token, organization=self.args.organization)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
if (len(objs) == 0):
print('No shared file yet')
exit()
rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]
print(self.tabulate(rows, headers=['Filename', 'LastModified', 'ETag', 'Size'])) |
class KLEnergy(object):
def __init__(self, dist):
self._dist = dist
assert (self._dist.covariance_type == 'spherical')
def _params_from_indices(self, i, j):
mu_i = self._dist.mu[i]
mu_j = self._dist.mu[j]
Sigma_i = self._dist.Sigma[i]
Sigma_j = self._dist.Sigma[j]
return (mu_i, mu_j, Sigma_i, Sigma_j)
def energy(self, i, j):
(mu_i, mu_j, Sigma_i, Sigma_j) = self._params_from_indices(i, j)
if (self._dist.covariance_type == 'spherical'):
det_fac = (self._dist.K * np.log((Sigma_j / Sigma_i)))
trace_fac = ((self._dist.K * Sigma_j) / Sigma_i)
elif (self._dist.covariance_type == 'diagonal'):
det_fac = (np.sum(np.log(Sigma_i)) - np.sum(np.log(Sigma_j)))
trace_fac = np.sum((Sigma_j / Sigma_i))
return ((- 0.5) * float((((trace_fac + np.sum((((mu_i - mu_j) ** 2) / Sigma_i))) - self._dist.K) - det_fac)))
def gradient(self, i, j):
(mu_i, mu_j, Sigma_i, Sigma_j) = self._params_from_indices(i, j)
if (self._dist.covariance_type == 'spherical'):
deltaprime = ((1.0 / Sigma_i) * (mu_i - mu_j))
dEdi = (0.5 * (((Sigma_j * ((1.0 / Sigma_i) ** 2)) + np.sum((deltaprime ** 2))) - (1.0 / Sigma_i)))
dEdj = (0.5 * ((1.0 / Sigma_j) - (1.0 / Sigma_i)))
return (((- deltaprime), dEdi), (deltaprime, dEdj)) |
def cdp_delta(rho, eps):
assert (rho >= 0)
assert (eps >= 0)
if (rho == 0):
return 0
amin = 1.01
amax = (((eps + 1) / (2 * rho)) + 2)
for i in range(1000):
alpha = ((amin + amax) / 2)
derivative = (((((2 * alpha) - 1) * rho) - eps) + math.log1p(((- 1.0) / alpha)))
if (derivative < 0):
amin = alpha
else:
amax = alpha
delta = (math.exp((((alpha - 1) * ((alpha * rho) - eps)) + (alpha * math.log1p(((- 1) / alpha))))) / (alpha - 1.0))
return min(delta, 1.0) |
def main():
global args
parser = arg_parser()
args = parser.parse_args()
cfg = setup_cfg(args)
train_split = cfg.DATASET.TRAIN_SPLIT
val_split = cfg.DATASET.VAL_SPLIT
val_gzsl_split = cfg.DATASET.VAL_GZSL_SPLIT
train_dataset = build_dataset(cfg, train_split, cfg.DATASET.ZS_TRAIN)
train_cls_id = train_dataset.cls_id
val_gzsi_dataset = build_dataset(cfg, val_gzsl_split, cfg.DATASET.ZS_TEST)
val_gzsi_cls_id = val_gzsi_dataset.cls_id
val_unseen_dataset = build_dataset(cfg, val_split, cfg.DATASET.ZS_TEST_UNSEEN)
val_unseen_cls_id = val_unseen_dataset.cls_id
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.DATALOADER.TRAIN_X.BATCH_SIZE, shuffle=cfg.DATALOADER.TRAIN_X.SHUFFLE, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
val_gzsi_loader = torch.utils.data.DataLoader(val_gzsi_dataset, batch_size=cfg.DATALOADER.VAL.BATCH_SIZE, shuffle=cfg.DATALOADER.VAL.SHUFFLE, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
val_unseen_loader = torch.utils.data.DataLoader(val_unseen_dataset, batch_size=cfg.DATALOADER.VAL.BATCH_SIZE, shuffle=cfg.DATALOADER.VAL.SHUFFLE, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
classnames = train_dataset.classnames
cls_id = {'train': train_cls_id, 'val_gzsi': val_gzsi_cls_id, 'val_unseen': val_unseen_cls_id}
test_split = cfg.DATASET.TEST_SPLIT
test_gzsl_split = cfg.DATASET.TEST_GZSL_SPLIT
test_gzsi_dataset = build_dataset(cfg, test_gzsl_split, cfg.DATASET.ZS_TEST)
test_gzsi_cls_id = test_gzsi_dataset.cls_id
test_unseen_dataset = build_dataset(cfg, test_split, cfg.DATASET.ZS_TEST_UNSEEN)
test_unseen_cls_id = test_unseen_dataset.cls_id
test_gzsi_loader = torch.utils.data.DataLoader(test_gzsi_dataset, batch_size=cfg.DATALOADER.TEST.BATCH_SIZE, shuffle=cfg.DATALOADER.TEST.SHUFFLE, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
test_unseen_loader = torch.utils.data.DataLoader(test_unseen_dataset, batch_size=cfg.DATALOADER.TEST.BATCH_SIZE, shuffle=cfg.DATALOADER.TEST.SHUFFLE, num_workers=cfg.DATALOADER.NUM_WORKERS, pin_memory=True)
(model, arch_name) = build_model(cfg, args, classnames)
try:
prompt_params = model.prompt_params()
except:
prompt_params = model.module.prompt_params()
prompt_group = {'params': prompt_params}
print('num of params in prompt learner: ', len(prompt_params))
sgd_polices = [prompt_group]
if cfg.TRAINER.FINETUNE_BACKBONE:
try:
backbone_params = model.backbone_params()
except:
backbone_params = model.module.backbone_params()
print('num of params in backbone: ', len(backbone_params))
base_group = {'params': backbone_params, 'lr': (cfg.OPTIM.LR * cfg.OPTIM.BACKBONE_LR_MULT)}
sgd_polices.append(base_group)
if cfg.TRAINER.FINETUNE_ATTN:
try:
attn_params = model.attn_params()
except:
attn_params = model.module.attn_params()
print('num of params in attn layer: ', len(attn_params))
attn_group = {'params': attn_params, 'lr': (cfg.OPTIM.LR * cfg.OPTIM.ATTN_LR_MULT)}
sgd_polices.append(attn_group)
optim = torch.optim.SGD(sgd_polices, lr=cfg.OPTIM.LR, momentum=cfg.OPTIM.MOMENTUM, weight_decay=cfg.OPTIM.WEIGHT_DECAY, dampening=cfg.OPTIM.SGD_DAMPNING, nesterov=cfg.OPTIM.SGD_NESTEROV)
sched = build_lr_scheduler(optim, cfg.OPTIM)
log_folder = os.path.join(cfg.OUTPUT_DIR, arch_name)
if (not os.path.exists(log_folder)):
os.makedirs(log_folder)
logfile_path = os.path.join(log_folder, 'log.log')
if os.path.exists(logfile_path):
logfile = open(logfile_path, 'a')
else:
logfile = open(logfile_path, 'w')
command = ' '.join(sys.argv)
print(command, flush=True)
print(args, flush=True)
print(model, flush=True)
print(cfg, flush=True)
print(command, file=logfile, flush=True)
print(args, file=logfile, flush=True)
print(cfg, file=logfile, flush=True)
if (not args.auto_resume):
print(model, file=logfile, flush=True)
if args.auto_resume:
args.resume = os.path.join(log_folder, 'checkpoint.pth.tar')
best_unseen_F1 = 0
best_gzsl_F1 = 0
args.start_epoch = 0
if (args.resume is not None):
if os.path.exists(args.resume):
print(('... loading pretrained weights from %s' % args.resume))
print(('... loading pretrained weights from %s' % args.resume), file=logfile, flush=True)
checkpoint = torch.load(args.resume, map_location='cpu')
args.start_epoch = checkpoint['epoch']
best_unseen_F1 = checkpoint['best_unseen_F1']
best_gzsl_F1 = checkpoint['best_gzsl_F1']
model.load_state_dict(checkpoint['state_dict'])
optim.load_state_dict(checkpoint['optimizer'])
sched.load_state_dict(checkpoint['scheduler'])
for epoch in range(args.start_epoch, cfg.OPTIM.MAX_EPOCH):
(batch_time, losses, mAP_batches) = train_coop(train_loader, [val_unseen_loader, val_gzsi_loader], model, optim, sched, args, cfg, epoch, cls_id)
print('Train: [{0}/{1}]\tTime {batch_time.avg:.3f}\tLoss {losses.avg:.2f} \tmAP {mAP_batches.avg:.2f}'.format((epoch + 1), cfg.OPTIM.MAX_EPOCH, batch_time=batch_time, losses=losses, mAP_batches=mAP_batches), flush=True)
print('Train: [{0}/{1}]\tTime {batch_time.avg:.3f}\tLoss {losses.avg:.2f} \tmAP {mAP_batches.avg:.2f}'.format((epoch + 1), cfg.OPTIM.MAX_EPOCH, batch_time=batch_time, losses=losses, mAP_batches=mAP_batches), file=logfile, flush=True)
if ((((epoch + 1) % args.val_every_n_epochs) == 0) or (epoch == (args.stop_epochs - 1))):
(p_unseen, r_unseen, f1_unseen, mAP_unseen) = validate_zsl(val_unseen_loader, model, args, val_unseen_cls_id)
(p_gzsl, r_gzsl, f1_gzsl, mAP_gzsl) = validate_zsl(val_gzsi_loader, model, args, val_gzsi_cls_id)
print('Test: [{}/{}]\t P_unseen {:.2f} \t R_unseen {:.2f} \t F1_unseen {:.2f} \t mAP_unseen {:.2f}\t P_gzsl {:.2f} \t R_gzsl {:.2f} \t F1_gzsl {:.2f} \t mAP_gzsl {:.2f}\t'.format((epoch + 1), cfg.OPTIM.MAX_EPOCH, p_unseen, r_unseen, f1_unseen, mAP_unseen, p_gzsl, r_gzsl, f1_gzsl, mAP_gzsl), flush=True)
print('Test: [{}/{}]\t P_unseen {:.2f} \t R_unseen {:.2f} \t F1_unseen {:.2f} \t mAP_unseen {:.2f}\t P_gzsl {:.2f} \t R_gzsl {:.2f} \t F1_gzsl {:.2f} \t mAP_gzsl {:.2f}\t'.format((epoch + 1), cfg.OPTIM.MAX_EPOCH, p_unseen, r_unseen, f1_unseen, mAP_unseen, p_gzsl, r_gzsl, f1_gzsl, mAP_gzsl), file=logfile, flush=True)
is_unseen_best = (f1_unseen > best_unseen_F1)
if is_unseen_best:
best_unseen_F1 = f1_unseen
is_gzsl_best = (f1_gzsl > best_gzsl_F1)
if is_gzsl_best:
best_gzsl_F1 = f1_gzsl
save_dict = {'epoch': (epoch + 1), 'arch': arch_name, 'state_dict': model.state_dict(), 'best_unseen_F1': best_unseen_F1, 'best_gzsl_F1': best_unseen_F1, 'optimizer': optim.state_dict(), 'scheduler': sched.state_dict()}
save_checkpoint(save_dict, is_unseen_best, log_folder, prefix='unseen')
save_checkpoint(save_dict, is_gzsl_best, log_folder, prefix='gzsl')
print('Evaluating the best model', flush=True)
print('Evaluating the best model', file=logfile, flush=True)
best_checkpoints = os.path.join(log_folder, 'unseen_model_best.pth.tar')
print(('... loading pretrained weights from %s for the best unseen zsl' % best_checkpoints), flush=True)
print(('... loading pretrained weights from %s for the best unseen zsl' % best_checkpoints), file=logfile, flush=True)
checkpoint = torch.load(best_checkpoints, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
best_epoch = checkpoint['epoch']
(p_unseen, r_unseen, f1_unseen, mAP_unseen) = validate_zsl(test_unseen_loader, model, args, test_unseen_cls_id)
print('Test: [{}/{}]\t p_unseen {:.2f} \t r_unseen {:.2f} \t f_unseen {:.2f} \t mAP_unseen {:.2f}'.format(best_epoch, cfg.OPTIM.MAX_EPOCH, p_unseen, r_unseen, f1_unseen, mAP_unseen), flush=True)
print('Test: [{}/{}]\t p_unseen {:.2f} \t r_unseen {:.2f} \t f_unseen {:.2f} \t mAP_unseen {:.2f}'.format(best_epoch, cfg.OPTIM.MAX_EPOCH, p_unseen, r_unseen, f1_unseen, mAP_unseen), file=logfile, flush=True)
best_checkpoints = os.path.join(log_folder, 'gzsl_model_best.pth.tar')
print(('... loading pretrained weights from %s for the best gzsl' % best_checkpoints), flush=True)
print(('... loading pretrained weights from %s for the best gzsl' % best_checkpoints), file=logfile, flush=True)
checkpoint = torch.load(best_checkpoints, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'])
best_epoch = checkpoint['epoch']
(p_gzsl, r_gzsl, f1_gzsk, mAP_gzsl) = validate_zsl(test_gzsi_loader, model, args, test_gzsi_cls_id)
print('Test: [{}/{}]\t p_gzsl {:.2f} \t r_gszl {:.2f} \t f_gzsl {:.2f} \t mAP_gzsl {:.2f}'.format(best_epoch, cfg.OPTIM.MAX_EPOCH, p_gzsl, r_gzsl, f1_gzsk, mAP_gzsl), flush=True)
print('Test: [{}/{}]\t p_gzsl {:.2f} \t r_gszl {:.2f} \t f_gzsl {:.2f} \t mAP_gzsl {:.2f}'.format(best_epoch, cfg.OPTIM.MAX_EPOCH, p_gzsl, r_gzsl, f1_gzsk, mAP_gzsl), file=logfile, flush=True) |
def notebook2script(fname):
fname = Path(fname)
fname_out = f"nb_{fname.stem.split('_')[0]}.py"
main_dic = json.load(open(fname, 'r'))
code_cells = [c for c in main_dic['cells'] if is_export(c)]
module = f'''
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
# file to edit: dev_nb/{fname.name}
'''
for cell in code_cells:
module += (''.join(cell['source'][1:]) + '\n\n')
module = re.sub(' +$', '', module, flags=re.MULTILINE)
open(((fname.parent / 'exp') / fname_out), 'w').write(module[:(- 2)])
print(f'Converted {fname} to {fname_out}') |
class BatchSquareDiagonal(nn.Module):
def __init__(self, vector_size):
super().__init__()
self.vector_size = vector_size
self.diag_mask = ptu.Variable(torch.diag(torch.ones(vector_size)), requires_grad=False)
def forward(self, vector, diag_values):
M = ptu.batch_diag(diag_values=diag_values, diag_mask=self.diag_mask)
return ptu.batch_square_vector(vector=vector, M=M) |
def test():
print('SDFG memlet lifetime validation test')
N = dp.symbol('N')
N.set(20)
input = dp.ndarray([N], dp.int32)
output = dp.ndarray([N], dp.int32)
input[:] = dp.int32(5)
output[:] = dp.int32(0)
sdfg1 = SDFG('shouldntwork1')
state = sdfg1.add_state()
A = state.add_array('A', [N], dp.int32)
B = state.add_array('B', [N], dp.int32)
T = state.add_transient('T', [1], dp.int32)
tasklet_gen = state.add_tasklet('mytasklet', {'a'}, {'b'}, 'b = 5*a')
(map_entry, map_exit) = state.add_map('mymap', dict(k='0:N'))
map_entry.add_in_connector('IN_1')
map_entry.add_out_connector('OUT_1')
map_exit.add_in_connector('IN_1')
map_exit.add_out_connector('OUT_1')
state.add_edge(B, None, map_entry, 'IN_1', Memlet.simple(B, '0'))
state.add_edge(map_entry, 'OUT_1', T, None, Memlet.simple(T, '0'))
state.add_edge(T, None, map_exit, 'IN_1', Memlet.simple(B, '0'))
state.add_edge(map_exit, 'OUT_1', tasklet_gen, 'a', Memlet.simple(B, '0'))
state.add_edge(tasklet_gen, 'b', A, None, Memlet.simple(A, '0'))
try:
sdfg1.validate()
raise AssertionError('SDFG passed validation, test FAILED')
except InvalidSDFGError:
print('Test passed, exception successfully caught')
sdfg2 = SDFG('shouldntwork2')
state = sdfg2.add_state()
A = state.add_array('A', [N], dp.int32)
B = state.add_array('B', [N], dp.int32)
T = state.add_transient('T', [N], dp.int32)
tasklet_gen = state.add_tasklet('mytasklet', {'a'}, {'b'}, 'b = 5*a')
(map1_entry, map1_exit) = state.add_map('mymap1', dict(k='0:N'))
(map2_entry, map2_exit) = state.add_map('mymap2', dict(k='0:N'))
map1_entry.add_in_connector('IN_1')
map1_entry.add_out_connector('OUT_1')
map1_exit.add_in_connector('IN_1')
map1_exit.add_out_connector('OUT_1')
map2_entry.add_in_connector('IN_1')
map2_entry.add_out_connector('OUT_1')
map2_exit.add_in_connector('IN_1')
map2_exit.add_out_connector('OUT_1')
state.add_edge(A, None, map1_entry, 'IN_1', Memlet.simple(A, '0:N'))
state.add_edge(map1_entry, 'OUT_1', tasklet_gen, 'a', Memlet.simple(A, 'i'))
state.add_edge(tasklet_gen, 'b', map1_exit, 'IN_1', Memlet.simple(T, 'i'))
state.add_edge(map1_exit, 'OUT_1', map2_entry, 'IN_1', Memlet.simple(T, '0:N'))
state.add_edge(map2_entry, 'OUT_1', T, None, Memlet.simple(T, 'i'))
state.add_edge(T, None, map2_exit, 'IN_1', Memlet.simple(B, 'i'))
state.add_edge(map2_exit, 'OUT_1', B, None, Memlet.simple(B, '0:N'))
try:
sdfg2.validate()
raise AssertionError('SDFG passed validation, test FAILED')
except InvalidSDFGError:
print('Test passed, exception successfully caught') |
def config_log(log_dir, filename='log.txt'):
log_dir = ('logs/' + log_dir)
os.makedirs(log_dir, exist_ok=True)
logging.basicConfig(filename=os.path.join(log_dir, filename), level=logging.INFO, format='%(asctime)s - %(message)s') |
def LinearActivation(d_input, d_output, bias=True, zero_bias_init=False, transposed=False, initializer=None, activation=None, activate=False, weight_norm=False, **kwargs):
linear_cls = (TransposedLinear if transposed else nn.Linear)
if ((activation is not None) and activation.startswith('glu')):
d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
if (initializer is not None):
get_initializer(initializer, activation)(linear.weight)
if (bias and zero_bias_init):
nn.init.zeros_(linear.bias)
if weight_norm:
linear = nn.utils.weight_norm(linear)
if (activate and (activation is not None)):
activation = Activation(activation, d_output, dim=(1 if transposed else (- 1)))
linear = nn.Sequential(linear, activation)
return linear |
def get_model_space(out_filters=64, num_layers=9):
model_space = ModelSpace()
num_pool = 4
expand_layers = [((num_layers // 4) - 1), (((num_layers // 4) * 2) - 1), (((num_layers // 4) * 3) - 1)]
for i in range(num_layers):
model_space.add_layer(i, [Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu'), Operation('conv1d', filters=out_filters, kernel_size=8, activation='relu', dilation=10), Operation('conv1d', filters=out_filters, kernel_size=4, activation='relu', dilation=10), Operation('maxpool1d', filters=out_filters, pool_size=4, strides=1), Operation('avgpool1d', filters=out_filters, pool_size=4, strides=1), Operation('identity', filters=out_filters)])
if (i in expand_layers):
out_filters *= 2
return model_space |
class ModelCard():
def __init__(self, **kwargs):
warnings.warn('The class `ModelCard` is deprecated and will be removed in version 5 of Transformers', FutureWarning)
self.model_details = kwargs.pop('model_details', {})
self.intended_use = kwargs.pop('intended_use', {})
self.factors = kwargs.pop('factors', {})
self.metrics = kwargs.pop('metrics', {})
self.evaluation_data = kwargs.pop('evaluation_data', {})
self.training_data = kwargs.pop('training_data', {})
self.quantitative_analyses = kwargs.pop('quantitative_analyses', {})
self.ethical_considerations = kwargs.pop('ethical_considerations', {})
self.caveats_and_recommendations = kwargs.pop('caveats_and_recommendations', {})
for (key, value) in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
def save_pretrained(self, save_directory_or_file):
if os.path.isdir(save_directory_or_file):
output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME)
else:
output_model_card_file = save_directory_or_file
self.to_json_file(output_model_card_file)
logger.info(f'Model card saved in {output_model_card_file}')
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
cache_dir = kwargs.pop('cache_dir', None)
proxies = kwargs.pop('proxies', None)
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
from_pipeline = kwargs.pop('_from_pipeline', None)
user_agent = {'file_type': 'model_card'}
if (from_pipeline is not None):
user_agent['using_pipeline'] = from_pipeline
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isfile(pretrained_model_name_or_path):
resolved_model_card_file = pretrained_model_name_or_path
is_local = True
else:
try:
resolved_model_card_file = cached_file(pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent)
if is_local:
logger.info(f'loading model card file {resolved_model_card_file}')
else:
logger.info(f'loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}')
modelcard = cls.from_json_file(resolved_model_card_file)
except (EnvironmentError, json.JSONDecodeError):
modelcard = cls()
to_remove = []
for (key, value) in kwargs.items():
if hasattr(modelcard, key):
setattr(modelcard, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info(f'Model card: {modelcard}')
if return_unused_kwargs:
return (modelcard, kwargs)
else:
return modelcard
def from_dict(cls, json_object):
return cls(**json_object)
def from_json_file(cls, json_file):
with open(json_file, 'r', encoding='utf-8') as reader:
text = reader.read()
dict_obj = json.loads(text)
return cls(**dict_obj)
def __eq__(self, other):
return (self.__dict__ == other.__dict__)
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
def to_json_file(self, json_file_path):
with open(json_file_path, 'w', encoding='utf-8') as writer:
writer.write(self.to_json_string()) |
def mean_drop_logit(i: int) -> Callable:
return (lambda x: torch.mean(x[i].drop_logits, dim=0).view((- 1))) |
class DglPCQM4Mv2Dataset(object):
def __init__(self, root='dataset', smiles2graph=smiles2graph):
self.original_root = root
self.smiles2graph = smiles2graph
self.folder = osp.join(root, 'pcqm4m-v2')
self.version = 1
self.url = '
if (osp.isdir(self.folder) and (not osp.exists(osp.join(self.folder, f'RELEASE_v{self.version}.txt')))):
print('PCQM4Mv2 dataset has been updated.')
if (input('Will you update the dataset now? (y/N)\n').lower() == 'y'):
shutil.rmtree(self.folder)
super(DglPCQM4Mv2Dataset, self).__init__()
self.prepare_graph()
def download(self):
if decide_download(self.url):
path = download_url(self.url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
else:
print('Stop download.')
exit((- 1))
def prepare_graph(self):
processed_dir = osp.join(self.folder, 'processed')
raw_dir = osp.join(self.folder, 'raw')
pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')
if osp.exists(pre_processed_file_path):
(self.graphs, label_dict) = load_graphs(pre_processed_file_path)
self.labels = label_dict['labels']
else:
if (not osp.exists(osp.join(raw_dir, 'data.csv.gz'))):
self.download()
data_df = pd.read_csv(osp.join(raw_dir, 'data.csv.gz'))
smiles_list = data_df['smiles']
homolumogap_list = data_df['homolumogap']
print('Converting SMILES strings into graphs...')
self.graphs = []
self.labels = []
for i in tqdm(range(len(smiles_list))):
smiles = smiles_list[i]
homolumogap = homolumogap_list[i]
graph = self.smiles2graph(smiles)
assert (len(graph['edge_feat']) == graph['edge_index'].shape[1])
assert (len(graph['node_feat']) == graph['num_nodes'])
dgl_graph = dgl.graph((graph['edge_index'][0], graph['edge_index'][1]), num_nodes=graph['num_nodes'])
dgl_graph.edata['feat'] = torch.from_numpy(graph['edge_feat']).to(torch.int64)
dgl_graph.ndata['feat'] = torch.from_numpy(graph['node_feat']).to(torch.int64)
self.graphs.append(dgl_graph)
self.labels.append(homolumogap)
self.labels = torch.tensor(self.labels, dtype=torch.float32)
split_dict = self.get_idx_split()
assert all([(not torch.isnan(self.labels[i])) for i in split_dict['train']])
assert all([(not torch.isnan(self.labels[i])) for i in split_dict['valid']])
assert all([torch.isnan(self.labels[i]) for i in split_dict['test-dev']])
assert all([torch.isnan(self.labels[i]) for i in split_dict['test-challenge']])
print('Saving...')
save_graphs(pre_processed_file_path, self.graphs, labels={'labels': self.labels})
def get_idx_split(self):
split_dict = replace_numpy_with_torchtensor(torch.load(osp.join(self.folder, 'split_dict.pt')))
return split_dict
def __getitem__(self, idx):
if isinstance(idx, int):
return (self.graphs[idx], self.labels[idx])
elif (torch.is_tensor(idx) and (idx.dtype == torch.long)):
if (idx.dim() == 0):
return (self.graphs[idx], self.labels[idx])
elif (idx.dim() == 1):
return Subset(self, idx.cpu())
raise IndexError('Only integers and long are valid indices (got {}).'.format(type(idx).__name__))
def __len__(self):
return len(self.graphs)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, len(self)) |
class SG(Enum):
PL_gather_d1coor = 0
PL_gather_d2coor = 1
PL_gather_rec = 2
PL_scatter_d1coor = 3
PL_scatter_d2coor = 4
PE_S_gather_d1coor = 5
PE_S_scatter_d1coor = 6
PE_M_gather_d1coor = 7
PE_S_mask_select = 8
PE_S_nonzero = 9
PE_S_scatter_pp_d1coor = 10
PE_S_gather_hzd = 13
PE_S_scatter_hzd = 14
PE_S_mask_selhzd = 15
PE_S_nonzero_hzd = 16
PE_S_gather_line = 17
PE_S_scatter_line = 18
PE_S_mask_seline = 19
UNKNOWN = (- 1) |
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
rerank(args) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--eval_list', help='Text file containing names of videos to be evaluated on.', type=argparse.FileType('r'), required=True)
parser.add_argument('-s', '--score_list', help='Text file containing paths of input prediction .pt files.', type=argparse.FileType('r'), required=True)
parser.add_argument('-v', '--verbose', help='Increase output verbosity.', action='store_true')
return parser.parse_args() |
class CARDDictionary():
def __init__(self, card_path: str, umls: UMLS, class_map: Mapping[(str, int)]):
self.card_path = card_path
self.umls = umls
self.class_map = class_map
def get_words(self) -> Dict[(int, Dict[(str, List[str])])]:
vabbr: Dict[(int, Dict[(str, List[str])])] = collections.defaultdict((lambda : collections.defaultdict(list)))
with zipfile.ZipFile(self.card_path, 'r') as zp:
for fname in ('VABBR_DS_beta.txt', 'VABBR_CV_beta.txt'):
with zp.open(os.path.join('CARD_dataset_tools', fname), 'r') as raw_f:
casted_raw_f = cast(BinaryIO, raw_f)
with io.TextIOWrapper(casted_raw_f) as f:
reader = csv.DictReader(f, delimiter='\t')
for row in reader:
cui = row['CUI'].upper().split('|')
for c in cui:
for sty in self.umls.get_sty_for_cui(c):
label = self.class_map.get(sty)
if (label is None):
continue
vabbr[label][row['abbreviation'].upper()].append(row['sense'])
return {k: dict(v) for (k, v) in vabbr.items()} |
def test_deepfill_dec():
decoder = DeepFillDecoder(128, out_act_cfg=None)
assert (not decoder.with_out_activation)
decoder = DeepFillDecoder(128)
x = torch.randn((2, 128, 64, 64))
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.stride == (1, 1))
assert (decoder.dec2.out_channels == 128)
assert (not decoder.dec7.with_activation)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1))
if torch.cuda.is_available():
decoder = DeepFillDecoder(128).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.stride == (1, 1))
assert (decoder.dec2.out_channels == 128)
assert (not decoder.dec7.with_activation)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1))
decoder = DeepFillDecoder(128, conv_type='gated_conv', channel_factor=0.75).cuda()
x = torch.randn((2, 128, 64, 64)).cuda()
input_dict = dict(out=x)
res = decoder(input_dict)
assert (res.shape == (2, 3, 256, 256))
assert (decoder.dec2.conv.stride == (1, 1))
assert (decoder.dec2.conv.out_channels == (96 * 2))
assert (not decoder.dec7.with_feat_act)
assert ((res.min().item() >= (- 1.0)) and (res.max().item() <= 1)) |
def create_model(session, vocab_size, forward_only):
model = nlc_model.NLCModel(vocab_size, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, FLAGS.dropout, forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if (ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path)):
print(('Reading model parameters from %s' % ckpt.model_checkpoint_path))
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print('Created model with fresh parameters.')
session.run(tf.initialize_all_variables())
return model |
def ResNeXt29(cardinality, base_width, num_classes=10):
Block = partial(ResNeXtBottleneck, cardinality=cardinality, base_width=base_width)
Block.__name__ = ResNeXtBottleneck.__name__
Block.expansion = ResNeXtBottleneck.expansion
return ResNet(Block, layers=[3, 3, 3], filters=[64, 128, 256], num_classes=num_classes) |
class dual_softmax_loss(nn.Module):
def __init__(self):
super(dual_softmax_loss, self).__init__()
def forward(self, sim_matrix, temp=1000):
sim_matrix = ((sim_matrix * F.softmax((sim_matrix / temp), dim=0)) * len(sim_matrix))
logpt = F.log_softmax(sim_matrix, dim=(- 1))
logpt = torch.diag(logpt)
loss = (- logpt)
return loss |
class HerTd3(HER, TD3):
def __init__(self, *args, td3_kwargs, her_kwargs, base_kwargs, **kwargs):
HER.__init__(self, **her_kwargs)
TD3.__init__(self, *args, **kwargs, **td3_kwargs, **base_kwargs)
assert (isinstance(self.replay_buffer, SimpleHerReplayBuffer) or isinstance(self.replay_buffer, RelabelingReplayBuffer) or isinstance(self.replay_buffer, ObsDictRelabelingBuffer)) |
class GatherEnv(Env, Serializable):
MODEL_CLASS = None
ORI_IND = None
('n_apples', type=int, help='Number of apples in each episode')
('n_bombs', type=int, help='Number of bombs in each episode')
('activity_range', type=float, help='The span for generating objects (x, y in [-range, range])')
('robot_object_spacing', type=float, help='Number of objects in each episode')
('catch_range', type=float, help='Minimum distance range to catch an object')
('n_bins', type=float, help='Number of objects in each episode')
('sensor_range', type=float, help='Maximum sensor range (how far it can go)')
('sensor_span', type=float, help='Maximum sensor span (how wide it can span), in radians')
def __init__(self, n_apples=8, n_bombs=8, activity_range=6.0, robot_object_spacing=2.0, catch_range=1.0, n_bins=10, sensor_range=6.0, sensor_span=math.pi, *args, **kwargs):
self.n_apples = n_apples
self.n_bombs = n_bombs
self.activity_range = activity_range
self.robot_object_spacing = robot_object_spacing
self.catch_range = catch_range
self.n_bins = n_bins
self.sensor_range = sensor_range
self.sensor_span = sensor_span
self.objects = []
super(GatherEnv, self).__init__(*args, **kwargs)
model_cls = self.__class__.MODEL_CLASS
if (model_cls is None):
raise 'MODEL_CLASS unspecified!'
xml_path = osp.join(MODEL_DIR, model_cls.FILE)
tree = ET.parse(xml_path)
worldbody = tree.find('.//worldbody')
attrs = dict(type='box', conaffinity='1', rgba='0.8 0.9 0.8 1', condim='3')
walldist = (self.activity_range + 1)
ET.SubElement(worldbody, 'geom', dict(attrs, name='wall1', pos=('0 -%d 0' % walldist), size=('%d.5 0.5 1' % walldist)))
ET.SubElement(worldbody, 'geom', dict(attrs, name='wall2', pos=('0 %d 0' % walldist), size=('%d.5 0.5 1' % walldist)))
ET.SubElement(worldbody, 'geom', dict(attrs, name='wall3', pos=('-%d 0 0' % walldist), size=('0.5 %d.5 1' % walldist)))
ET.SubElement(worldbody, 'geom', dict(attrs, name='wall4', pos=('%d 0 0' % walldist), size=('0.5 %d.5 1' % walldist)))
(_, file_path) = tempfile.mkstemp(text=True)
tree.write(file_path)
inner_env = model_cls(*args, file_path=file_path, **kwargs)
self.inner_env = inner_env
Serializable.quick_init(self, locals())
def reset(self):
self.objects = []
existing = set()
while (len(self.objects) < self.n_apples):
x = (np.random.randint(((- self.activity_range) / 2), (self.activity_range / 2)) * 2)
y = (np.random.randint(((- self.activity_range) / 2), (self.activity_range / 2)) * 2)
if (((x ** 2) + (y ** 2)) < (self.robot_object_spacing ** 2)):
continue
if ((x, y) in existing):
continue
typ = APPLE
self.objects.append((x, y, typ))
existing.add((x, y))
while (len(self.objects) < (self.n_apples + self.n_bombs)):
x = (np.random.randint(((- self.activity_range) / 2), (self.activity_range / 2)) * 2)
y = (np.random.randint(((- self.activity_range) / 2), (self.activity_range / 2)) * 2)
if (((x ** 2) + (y ** 2)) < (self.robot_object_spacing ** 2)):
continue
if ((x, y) in existing):
continue
typ = BOMB
self.objects.append((x, y, typ))
existing.add((x, y))
self.inner_env.reset()
return self.get_current_obs()
def step(self, action):
(_, _, done, info) = self.inner_env.step(action)
if done:
return Step(self.get_current_obs(), (- 10), done, **info)
com = self.inner_env.get_body_com('torso')
(x, y) = com[:2]
reward = 0
new_objs = []
for obj in self.objects:
(ox, oy, typ) = obj
if ((((ox - x) ** 2) + ((oy - y) ** 2)) < (self.catch_range ** 2)):
if (typ == APPLE):
reward = (reward + 1)
else:
reward = (reward - 1)
else:
new_objs.append(obj)
self.objects = new_objs
done = (len(self.objects) == 0)
return Step(self.get_current_obs(), reward, done, **info)
def get_readings(self):
apple_readings = np.zeros(self.n_bins)
bomb_readings = np.zeros(self.n_bins)
(robot_x, robot_y) = self.inner_env.get_body_com('torso')[:2]
sorted_objects = sorted(self.objects, key=(lambda o: (((o[0] - robot_x) ** 2) + ((o[1] - robot_y) ** 2))))[::(- 1)]
bin_res = (self.sensor_span / self.n_bins)
ori = self.inner_env.model.data.qpos[self.__class__.ORI_IND]
for (ox, oy, typ) in sorted_objects:
dist = ((((oy - robot_y) ** 2) + ((ox - robot_x) ** 2)) ** 0.5)
if (dist > self.sensor_range):
continue
angle = (math.atan2((oy - robot_y), (ox - robot_x)) - ori)
if math.isnan(angle):
import ipdb
ipdb.set_trace()
angle = (angle % (2 * math.pi))
if (angle > math.pi):
angle = (angle - (2 * math.pi))
if (angle < (- math.pi)):
angle = (angle + (2 * math.pi))
half_span = (self.sensor_span * 0.5)
if (abs(angle) > half_span):
continue
bin_number = int(((angle + half_span) / bin_res))
intensity = (1.0 - (dist / self.sensor_range))
if (typ == APPLE):
apple_readings[bin_number] = intensity
else:
bomb_readings[bin_number] = intensity
return (apple_readings, bomb_readings)
def get_current_obs(self):
self_obs = self.inner_env.get_current_obs()
(apple_readings, bomb_readings) = self.get_readings()
return np.concatenate([self_obs, apple_readings, bomb_readings])
def get_viewer(self):
if (self.inner_env.viewer is None):
self.inner_env.viewer = GatherViewer(self)
self.inner_env.viewer.start()
self.inner_env.viewer.set_model(self.inner_env.model)
return self.inner_env.viewer
def action_space(self):
return self.inner_env.action_space
def action_bounds(self):
return self.inner_env.action_bounds
def viewer(self):
return self.inner_env.viewer
def observation_space(self):
dim = self.inner_env.observation_space.flat_dim
newdim = (dim + (self.n_bins * 2))
ub = (BIG * np.ones(newdim))
return spaces.Box((ub * (- 1)), ub)
def action_from_key(self, key):
return self.inner_env.action_from_key(key)
def render(self):
self.get_viewer()
self.inner_env.render() |
def test_moreau_yosida_regularization():
u.vector().vec().set(1000.0)
u.vector().apply('')
y_bar = 0.1
y_low = 0.01
gamma = 1000.0
reg = cashocs._utils.moreau_yosida_regularization(y, gamma, dx, upper_threshold=y_bar, lower_threshold=y_low)
max = cashocs._utils.max_
min = cashocs._utils.min_
reg_ana = ((((1 / (2 * gamma)) * pow(max((gamma * (y - y_bar)), 0.0), 2)) * dx) + (((1 / (2 * gamma)) * pow(min((gamma * (y - y_low)), 0.0), 2)) * dx))
ocp.compute_state_variables()
assert (np.abs((assemble(reg) - assemble(reg_ana))) < 1e-14) |
def get_non_root_control_flow_distance(result: ExecutionResult, predicate_id: int, value: bool, tracer: ExecutionTracer) -> ControlFlowDistance:
trace = result.execution_trace
code_object_id = tracer.get_subject_properties().existing_predicates[predicate_id].code_object_id
distance = ControlFlowDistance()
if (code_object_id not in trace.executed_code_objects):
distance.approach_level = tracer.get_subject_properties().existing_code_objects[code_object_id].cfg.diameter
return distance
if (predicate_id in trace.executed_predicates):
if value:
branch_distance = _predicate_fitness(predicate_id, trace.true_distances)
else:
branch_distance = _predicate_fitness(predicate_id, trace.false_distances)
distance.branch_distance = branch_distance
return distance
cdg = tracer.get_subject_properties().existing_code_objects[code_object_id].cdg
target_node = _get_node_with_predicate_id(cdg, predicate_id)
distance.approach_level = tracer.get_subject_properties().existing_code_objects[code_object_id].cfg.diameter
for node in [node for node in cdg.nodes if ((node.predicate_id is not None) and (node.predicate_id in trace.executed_predicates))]:
try:
candidate = ControlFlowDistance()
candidate.approach_level = nx.shortest_path_length(cdg.graph, node, target_node)
assert (node.predicate_id is not None)
candidate.branch_distance = (_predicate_fitness(node.predicate_id, trace.true_distances) + _predicate_fitness(node.predicate_id, trace.false_distances))
distance = min(distance, candidate)
except nx.NetworkXNoPath:
pass
return distance |
.parametrize('sampling_strategy, err_msg', [({0: (- 100), 1: 50, 2: 50}, 'in a class cannot be negative'), ({0: 10, 1: 70}, 'should be less or equal to the original')])
def test_make_imbalance_error(iris, sampling_strategy, err_msg):
(X, y) = iris
with pytest.raises(ValueError, match=err_msg):
make_imbalance(X, y, sampling_strategy=sampling_strategy) |
def styblinski_tang(ind):
return ((sum(((((x ** 4.0) - (16.0 * (x ** 2.0))) + (5.0 * x)) for x in ind)) / 2.0),) |
class AlgoTrainer(BaseAlgo):
def __init__(self, algo_init, args):
super(AlgoTrainer, self).__init__(args)
self.vae = algo_init['vae']['net']
self.vae_opt = algo_init['vae']['opt']
self.actor = algo_init['actor']['net']
self.actor_opt = algo_init['actor']['opt']
self.critic1 = algo_init['critic1']['net']
self.critic1_opt = algo_init['critic1']['opt']
self.critic2 = algo_init['critic2']['net']
self.critic2_opt = algo_init['critic2']['opt']
self.actor_target = copy.deepcopy(self.actor)
self.critic1_target = copy.deepcopy(self.critic1)
self.critic2_target = copy.deepcopy(self.critic2)
self.args = args
def _train_vae_step(self, batch):
batch = to_torch(batch, torch.float, device=self.args['device'])
obs = batch.obs
act = batch.act
(recon, mean, std) = self.vae(obs, act)
recon_loss = F.mse_loss(recon, act)
KL_loss = ((- self.args['vae_kl_weight']) * (((1 + torch.log(std.pow(2))) - mean.pow(2)) - std.pow(2)).mean())
vae_loss = (recon_loss + (0.5 * KL_loss))
self.vae_opt.zero_grad()
vae_loss.backward()
self.vae_opt.step()
return (vae_loss.cpu().data.numpy(), recon_loss.cpu().data.numpy(), KL_loss.cpu().data.numpy())
def _train_vae(self, train_buffer):
logs = {'vae_loss': [], 'recon_loss': [], 'kl_loss': []}
for i in range(self.args['vae_iterations']):
batch = train_buffer.sample(self.args['vae_batch_size'])
(vae_loss, recon_loss, KL_loss) = self._train_vae_step(batch)
logs['vae_loss'].append(vae_loss)
logs['recon_loss'].append(recon_loss)
logs['kl_loss'].append(KL_loss)
if (((i + 1) % 1000) == 0):
logger.info('VAE Epoch : {}, KL_loss : {:.4}', ((i + 1) // 1000), KL_loss)
logger.info('VAE Epoch : {}, recon_loss : {:.4}', ((i + 1) // 1000), recon_loss)
logger.info('VAE Epoch : {}, Loss : {:.4}', ((i + 1) // 1000), vae_loss)
logger.info('Save VAE Model -> {}', (('/tmp/vae_' + str(i)) + '.pkl'))
def _train_policy(self, train_buffer, callback_fn):
for it in range(self.args['actor_iterations']):
batch = train_buffer.sample(self.args['actor_batch_size'])
batch = to_torch(batch, torch.float, device=self.args['device'])
rew = batch.rew
done = batch.done
obs = batch.obs
act = batch.act
obs_next = batch.obs_next
with torch.no_grad():
(action_next_actor, _) = self.actor_target(obs_next)
action_next_vae = self.vae.decode(obs_next, z=action_next_actor)
target_q1 = self.critic1_target(obs_next, action_next_vae)
target_q2 = self.critic2_target(obs_next, action_next_vae)
target_q = ((self.args['lmbda'] * torch.min(target_q1, target_q2)) + ((1 - self.args['lmbda']) * torch.max(target_q1, target_q2)))
target_q = (rew + (((1 - done) * self.args['discount']) * target_q))
current_q1 = self.critic1(obs, act)
current_q2 = self.critic2(obs, act)
critic_loss = (F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q))
self.critic1_opt.zero_grad()
self.critic2_opt.zero_grad()
critic_loss.backward()
self.critic1_opt.step()
self.critic2_opt.step()
(action_actor, _) = self.actor(obs)
action_vae = self.vae.decode(obs, z=action_actor)
actor_loss = (- self.critic1(obs, action_vae).mean())
self.actor.zero_grad()
actor_loss.backward()
self.actor_opt.step()
self._sync_weight(self.actor_target, self.actor)
self._sync_weight(self.critic1_target, self.critic1)
self._sync_weight(self.critic2_target, self.critic2)
if (((it + 1) % 1000) == 0):
if (callback_fn is None):
self.eval_policy()
else:
res = callback_fn(self.get_policy())
self.log_res(((it + 1) // 1000), res)
def _train_policy_latent(self, train_buffer, val_buffer, callback_fn):
for it in range(self.args['actor_iterations']):
batch = train_buffer.sample(self.args['actor_batch_size'])
batch = to_torch(batch, torch.float, device=self.args['device'])
rew = batch.rew
done = batch.done
obs = batch.obs
act = batch.act
obs_next = batch.obs_next
with torch.no_grad():
(_, _, next_action) = self.actor_target(obs_next, self.vae.decode)
target_q1 = self.critic1_target(obs_next, next_action)
target_q2 = self.critic2_target(obs_next, next_action)
target_q = ((self.args['lmbda'] * torch.min(target_q1, target_q2)) + ((1 - self.args['lmbda']) * torch.max(target_q1, target_q2)))
target_q = (rew + (((1 - done) * self.args['discount']) * target_q))
current_q1 = self.critic1(obs, act)
current_q2 = self.critic2(obs, act)
critic_loss = (F.mse_loss(current_q1, target_q) + F.mse_loss(current_q2, target_q))
self.critic1_opt.zero_grad()
self.critic2_opt.zero_grad()
critic_loss.backward()
self.critic1_opt.step()
self.critic2_opt.step()
(latent_actions, mid_actions, actions) = self.actor(obs, self.vae.decode)
actor_loss = (- self.critic1(obs, actions).mean())
self.actor.zero_grad()
actor_loss.backward()
self.actor_opt.step()
self._sync_weight(self.actor_target, self.actor)
self._sync_weight(self.critic1_target, self.critic1)
self._sync_weight(self.critic2_target, self.critic2)
if (((it + 1) % 1000) == 0):
if (callback_fn is None):
self.eval_policy()
else:
res = callback_fn(policy=self.get_policy())
self.log_res(((it + 1) // 1000), res)
def get_policy(self):
if self.args['latent']:
self.actor.vae = copy.deepcopy(self.vae)
return self.actor
else:
self.vae._actor = copy.deepcopy(self.actor)
return self.vae
def train(self, train_buffer, val_buffer, callback_fn=None):
self._train_vae(train_buffer)
self.vae.eval()
if self.args['latent']:
self._train_policy_latent(train_buffer, val_buffer, callback_fn)
else:
self._train_policy(train_buffer, val_buffer, callback_fn) |
class Mixed_4f(nn.Module):
def __init__(self):
super(Mixed_4f, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(528, 256, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(528, 160, kernel_size=1, stride=1), SepConv3d(160, 320, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(528, 32, kernel_size=1, stride=1), SepConv3d(32, 128, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(528, 128, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
def test_fortran_frontend_maxval_double():
test_string = '\n PROGRAM minval_test\n implicit none\n double precision, dimension(7) :: d\n double precision, dimension(4) :: res\n CALL minval_test_function(d, res)\n end\n\n SUBROUTINE minval_test_function(d, res)\n double precision, dimension(7) :: d\n double precision, dimension(0) :: dt\n double precision, dimension(4) :: res\n\n res(1) = MAXVAL(d)\n res(2) = MAXVAL(d(:))\n res(3) = MAXVAL(d(3:6))\n res(4) = MAXVAL(dt)\n\n END SUBROUTINE minval_test_function\n '
sdfg = fortran_parser.create_sdfg_from_string(test_string, 'minval_test', True)
sdfg.simplify(verbose=True)
sdfg.compile()
size = 7
d = np.full([size], 0, order='F', dtype=np.float64)
for i in range(size):
d[i] = (i + 1)
res = np.full([4], 42, order='F', dtype=np.float64)
sdfg(d=d, res=res)
assert (res[0] == d[(- 1)])
assert (res[1] == d[(- 1)])
assert (res[2] == d[5])
assert (res[3] == np.finfo(np.float64).min)
for i in range(size):
d[i] = (10 - i)
sdfg(d=d, res=res)
assert (res[0] == d[0])
assert (res[1] == d[0])
assert (res[2] == d[2])
assert (res[3] == np.finfo(np.float64).min) |
class TransitionModel(object):
def __init__(self, gridspec, eps=0.2):
self.gs = gridspec
self.eps = eps
def get_aprobs(self, s, a):
legal_moves = self.__get_legal_moves(s)
p = np.zeros(len(ACT_DICT))
p[list(legal_moves)] = (self.eps / len(legal_moves))
if (a in legal_moves):
p[a] += (1.0 - self.eps)
else:
p[ACT_NOOP] += (1.0 - self.eps)
return p
def __get_legal_moves(self, s):
xy = np.array(self.gs.idx_to_xy(s))
moves = {move for move in ACT_DICT if ((not self.gs.out_of_bounds((xy + ACT_DICT[move]))) and (self.gs[(xy + ACT_DICT[move])] != WALL))}
moves.add(ACT_NOOP)
return moves |
def kaiming_init(module: nn.Module, a: float=0, mode: str='fan_out', nonlinearity: str='relu', bias: float=0, distribution: str='normal') -> None:
assert (distribution in ['uniform', 'normal'])
if (hasattr(module, 'weight') and (module.weight is not None)):
if (distribution == 'uniform'):
nn.init.kaiming_uniform_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
def cli_main():
parser = options.get_validation_parser()
add_distributed_training_args(parser)
args = options.parse_args_and_arch(parser)
override_parser = options.get_validation_parser()
add_distributed_training_args(override_parser)
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(args, main, override_args=override_args) |
def get_video_frames_perturbed(video_dir, batch_size):
def get_key(x):
return int(x.split('/')[(- 1)].split('_')[0])
landmark_paths = sorted(glob(f'{video_dir}/*_landmarks.npz'), key=(lambda x: get_key(x)))
index = random.randint(0, max(5, ((len(landmark_paths) - batch_size) - 1)))
source_landmarks_sampled = landmark_paths[index:(index + batch_size)]
source_masks = list()
source_images = list()
face_segmenteds = list()
source_backgrounds = list()
gt_transformations = list()
source_faces_perturbed = list()
for i in range(len(source_landmarks_sampled)):
source_landmark_path = source_landmarks_sampled[i]
file_without_extension = osp.join(source_landmark_path.rsplit('_', 1)[0])
if os.path.exists((file_without_extension + '.jpg')):
source_image_path = (file_without_extension + '.jpg')
else:
source_image_path = (file_without_extension + '.png')
(mask, face_segmented, source_image, source_face_perturbed, gt_transformation, source_background) = perturbed_single_image(source_image_path, source_landmark_path)
face_segmenteds.append(face_segmented)
source_backgrounds.append(source_background)
source_masks.append(mask)
source_images.append(source_image)
source_faces_perturbed.append(source_face_perturbed)
gt_transformations.append(gt_transformation)
return (source_masks, face_segmenteds, source_images, source_faces_perturbed, gt_transformations, source_backgrounds) |
def write_shards(dns_folder_path: pathlib.Path, shards_path: pathlib.Path, seed: int, samples_per_shard: int, min_dur: float):
shards_path.mkdir(parents=True, exist_ok=True)
audio_files = sorted([f for f in dns_folder_path.rglob('*.wav')])
data_tuples = []
all_language_ids = set()
sample_keys_per_language = defaultdict(list)
if ('clean' in dns_folder_path.as_posix()):
delim = 'clean_fullband/'
elif ('noise' in dns_folder_path.as_posix()):
delim = 'noise_fullband/'
lang = 'noise'
elif ('dev_testset' in dns_folder_path.as_posix()):
delim = 'dev_testset/'
lang = 'baseline_noisytestset'
else:
delim = os.path.basename(dns_folder_path.as_posix())
lang = delim
for f in tqdm(audio_files):
sub_path = f.as_posix().split(delim)[1]
loc = f.as_posix()
key = os.path.splitext(os.path.basename(sub_path))[0]
if ('clean_fullband' in dns_folder_path.as_posix()):
lang = key.split('_speech')[0]
dur = librosa.get_duration(path=loc)
key = key.replace('.', '_')
if (dur > min_dur):
all_language_ids.add(lang)
sample_keys_per_language[lang].append(key)
t = (key, lang, loc, dur)
data_tuples.append(t)
all_language_ids = sorted(all_language_ids)
meta_dict = {'language_ids': list(all_language_ids), 'sample_keys_per_language': sample_keys_per_language, 'num_data_samples': len(data_tuples)}
with (shards_path / 'meta.json').open('w') as f:
json.dump(meta_dict, f, indent=4)
random.seed(seed)
random.shuffle(data_tuples)
all_keys = set()
shards_path.mkdir(exist_ok=True, parents=True)
pattern = (str((shards_path / 'shard')) + '-%06d.tar')
with wds.ShardWriter(pattern, maxcount=samples_per_shard) as sink:
for (key, language_id, f, duration) in data_tuples:
tensor = load_audio(f)
assert (key not in all_keys)
all_keys.add(key)
sample = {'__key__': key, 'audio.pth': tensor, 'language_id': language_id}
sink.write(sample) |
_grad()
def test(model, device, loader, evaluator):
model.eval()
(y_pred, y_true) = ([], [])
for (x, y) in tqdm(loader):
x = x.to(device)
out = model(x)
y_pred.append(torch.argmax(out, dim=1, keepdim=True).cpu())
y_true.append(y)
return evaluator.eval({'y_true': torch.cat(y_true, dim=0), 'y_pred': torch.cat(y_pred, dim=0)})['acc'] |
def test_orderedset_reversed():
ordered = OrderedSet([1, 2, 3])
assert (tuple(reversed(ordered)) == (3, 2, 1)) |
def existing_file(file_name):
try:
with open(file_name, 'r') as file:
return file.read()
except Exception:
raise argparse.ArgumentTypeError('The file provided could not be opened.') |
def kaldi_env(kaldi_root):
kaldi_root = kaldi_root.strip()
os.environ['KALDI_ROOT'] = kaldi_root
os.environ['PATH'] = ((os.popen('echo $KALDI_ROOT/src/bin:$KALDI_ROOT/tools/openfst/bin:$KALDI_ROOT/src/fstbin/:$KALDI_ROOT/src/gmmbin/:$KALDI_ROOT/src/featbin/:$KALDI_ROOT/src/lm/:$KALDI_ROOT/src/sgmmbin/:$KALDI_ROOT/src/sgmm2bin/:$KALDI_ROOT/src/fgmmbin/:$KALDI_ROOT/src/latbin/:$KALDI_ROOT/src/nnetbin:$KALDI_ROOT/src/nnet2bin:$KALDI_ROOT/src/nnet3bin:$KALDI_ROOT/src/online2bin/:$KALDI_ROOT/src/ivectorbin/:$KALDI_ROOT/src/lmbin/').readline().strip() + ':') + os.environ['PATH']) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('prob', [0.7, 1.0])
.parametrize('area_ratios', [(0.02, 0.04)])
.parametrize('aspect_ratios', [(0.3, 3.3333)])
.parametrize('replacements', [(2.0, 2.0), (3.0, 4.0)])
.parametrize('n', [1, 3])
.parametrize('share', [True, False])
.parametrize('inplace', [False])
.parametrize('base_axis', [1, (- 3)])
.parametrize('func_seed', [412, (- 1)])
.parametrize('channel_last', [False, True])
def test_random_erase_recomputation(ctx, func_name, seed, prob, area_ratios, aspect_ratios, replacements, n, share, inplace, base_axis, func_seed, channel_last):
if (channel_last and (func_name == 'RandomErase')):
pytest.skip('RandomErase with channel_last is only supported in CUDA.')
from nbla_test_utils import recomputation_test
rng = np.random.RandomState(seed)
(b, c, h, w) = (4, 3, 32, 32)
ishape = ([b, h, w, c] if channel_last else [b, c, h, w])
vinputs = [nn.Variable(ishape)]
func_kwargs = {'prob': prob, 'area_ratios': area_ratios, 'aspect_ratios': aspect_ratios, 'replacements': replacements, 'n': n, 'share': share, 'inplace': inplace, 'base_axis': base_axis, 'seed': seed, 'channel_last': channel_last}
recomputation_test(rng=rng, func=F.random_erase, vinputs=vinputs, func_args=[], func_kwargs=func_kwargs, ctx=ctx) |
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay=15):
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tgz_name = ('distribute-%s.tar.gz' % version)
url = (download_base + tgz_name)
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if (not os.path.exists(saveto)):
try:
log.warn('Downloading %s', url)
src = urlopen(url)
data = src.read()
dst = open(saveto, 'wb')
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto) |
def main():
args = ArgParser().parse_args()
args.eval_filter = (not args.no_eval_filter)
if args.neg_deg_sample_eval:
assert (not args.eval_filter), "if negative sampling based on degree, we can't filter positive edges."
assert os.path.exists(args.model_path), 'No existing model_path: {}'.format(args.model_path)
assert (args.dataset == 'wikikg90m')
args.neg_sample_size_eval = 1000
dataset = get_dataset(args.data_path, args.dataset, args.format, args.delimiter, args.data_files)
args.train = False
args.valid = False
args.test = True
args.strict_rel_part = False
args.soft_rel_part = False
args.async_update = False
args.has_edge_importance = False
if (len(args.gpu) > 1):
args.mix_cpu_gpu = True
if (args.num_proc < len(args.gpu)):
args.num_proc = len(args.gpu)
if ((len(args.gpu) > 1) and (args.num_proc > 1)):
assert ((args.num_proc % len(args.gpu)) == 0), 'The number of processes needs to be divisible by the number of GPUs'
eval_dataset = EvalDataset(dataset, args)
if (args.neg_sample_size_eval < 0):
args.neg_sample_size_eval = args.neg_sample_size = eval_dataset.g.number_of_nodes()
args.batch_size_eval = get_compatible_batch_size(args.batch_size_eval, args.neg_sample_size_eval)
args.num_workers = 8
if (args.num_proc > 1):
test_sampler_tails = []
test_sampler_heads = []
for i in range(args.num_proc):
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='head', num_workers=args.num_workers, rank=i, ranks=args.num_proc)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='tail', num_workers=args.num_workers, rank=i, ranks=args.num_proc)
test_sampler_heads.append(test_sampler_head)
test_sampler_tails.append(test_sampler_tail)
else:
test_sampler_head = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='head', num_workers=args.num_workers, rank=0, ranks=1)
test_sampler_tail = eval_dataset.create_sampler('test', args.batch_size_eval, args.neg_sample_size_eval, args.neg_sample_size_eval, args.eval_filter, mode='tail', num_workers=args.num_workers, rank=0, ranks=1)
n_entities = dataset.n_entities
n_relations = dataset.n_relations
ckpt_path = args.model_path
model = load_model_from_checkpoint(args, n_entities, n_relations, ckpt_path, dataset.entity_feat.shape[1], dataset.relation_feat.shape[1])
if (args.encoder_model_name in ['roberta', 'concat']):
model.entity_feat.emb = dataset.entity_feat
model.relation_feat.emb = dataset.relation_feat
if (args.num_proc > 1):
model.share_memory()
args.step = 0
args.max_step = 0
start = time.time()
if (args.num_proc > 1):
queue = mp.Queue(args.num_proc)
procs = []
for i in range(args.num_proc):
proc = mp.Process(target=test_mp, args=(args, model, [test_sampler_heads[i], test_sampler_tails[i]], i, 'Test'))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
else:
test(args, model, [test_sampler_head, test_sampler_tail], 0, 0, 'Test')
print('Test takes {:.3f} seconds'.format((time.time() - start))) |
def _sage_getsourcelines_name_with_dot(obj):
if ('.' in obj.__name__):
splitted_name = obj.__name__.split('.')
elif hasattr(obj, '__qualname__'):
splitted_name = obj.__qualname__.split('.')
else:
splitted_name = obj.__name__
path = (obj.__module__.split('.') + splitted_name[:(- 1)])
name = splitted_name[(- 1)]
try:
M = __import__(path.pop(0))
except ImportError:
try:
B = obj.__base__
if (B is None):
raise AttributeError
except AttributeError:
raise OSError('could not get source code')
return sage_getsourcelines(B)
try:
while path:
M = getattr(M, path.pop(0))
except AttributeError:
try:
B = obj.__base__
if (B is None):
raise AttributeError
except AttributeError:
raise OSError('could not get source code')
return sage_getsourcelines(B)
(lines, base_lineno) = sage_getsourcelines(M)
if (not lines):
raise OSError('could not get source code')
if inspect.ismodule(obj):
return (lines, base_lineno)
if inspect.isclass(obj):
pat = re.compile((('^(\\s*)class\\s*' + name) + '\\b'))
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
if (lines[i][0] == 'c'):
return (inspect.getblock(lines[i:]), (i + base_lineno))
candidates.append((match.group(1), i))
if candidates:
candidates.sort()
return (inspect.getblock(lines[candidates[0][1]:]), (candidates[0][1] + base_lineno))
else:
raise OSError('could not find class definition')
if inspect.ismethod(obj):
obj = obj.__func__
if is_function_or_cython_function(obj):
obj = obj.__code__
if inspect.istraceback(obj):
obj = obj.tb_frame
if inspect.isframe(obj):
obj = obj.f_code
if inspect.iscode(obj):
if (not hasattr(obj, 'co_firstlineno')):
raise OSError('could not find function definition')
pat = re.compile('^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*)')
pmatch = pat.match
lnum = (min(obj.co_firstlineno, len(lines)) - 1)
while (lnum > 0):
if pmatch(lines[lnum]):
break
lnum -= 1
return (inspect.getblock(lines[lnum:]), (lnum + base_lineno))
raise OSError('could not find code object') |
def make_divisible(v: int, divisor: int=8, min_value: int=None):
min_value = (min_value or divisor)
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v |
def bond_features(bond, use_chirality=True):
bt = bond.GetBondType()
bond_feats = [(bt == Chem.rdchem.BondType.SINGLE), (bt == Chem.rdchem.BondType.DOUBLE), (bt == Chem.rdchem.BondType.TRIPLE), (bt == Chem.rdchem.BondType.AROMATIC), bond.GetIsConjugated(), bond.IsInRing()]
if use_chirality:
bond_feats = (bond_feats + one_of_k_encoding_unk(str(bond.GetStereo()), ['STEREONONE', 'STEREOANY', 'STEREOZ', 'STEREOE']))
return np.array(bond_feats) |
def find_before(ctx, pos, substr, offsets=(0, 0)):
new_pos = ctx.source[:pos].rindex(substr)
return ctx.make_raw_range((new_pos + offsets[0]), ((new_pos + len(substr)) + offsets[1])) |
_task('speech_text_joint_to_text')
class SpeechTextJointToTextTask(SpeechToTextTask):
def add_args(cls, parser):
super(SpeechTextJointToTextTask, cls).add_args(parser)
parser.add_argument('--parallel-text-data', default='', help='path to parallel text data directory')
parser.add_argument('--max-tokens-text', type=int, metavar='N', help='maximum tokens for encoder text input ')
parser.add_argument('--max-positions-text', type=int, metavar='N', default=400, help='maximum tokens for per encoder text input ')
parser.add_argument('--langpairs', default=None, metavar='S', help='language pairs for text training, separated with ","')
parser.add_argument('--speech-sample-ratio', default=1, type=float, metavar='N', help='Multiple Ratio for speech dataset with transcripts ')
parser.add_argument('--text-sample-ratio', default=1, type=float, metavar='N', help='Multiple Ratio for text set ')
parser.add_argument('--update-mix-data', action='store_true', help='use mixed data in one update when update-freq > 1')
parser.add_argument('--load-speech-only', action='store_true', help='load speech data only')
parser.add_argument('--mask-text-ratio', type=float, metavar='V', default=0.0, help='mask V source tokens for text only mode')
parser.add_argument('--mask-text-type', default='random', choices=['random', 'tail'], help='mask text typed')
parser.add_argument('--noise-token', default='', help='noise token for masking src text tokens if mask-text-ratio > 0')
parser.add_argument('--infer-target-lang', default='', metavar='S', help='target language for inference')
def __init__(self, args, src_dict, tgt_dict, infer_tgt_lang_id=None):
super().__init__(args, tgt_dict)
self.src_dict = src_dict
self.data_cfg = S2TJointDataConfig((Path(args.data) / args.config_yaml))
assert (self.tgt_dict.pad() == self.src_dict.pad())
assert (self.tgt_dict.eos() == self.src_dict.eos())
self.speech_only = args.load_speech_only
self._infer_tgt_lang_id = infer_tgt_lang_id
def setup_task(cls, args, **kwargs):
data_cfg = S2TJointDataConfig((Path(args.data) / args.config_yaml))
tgt_dict_path = (Path(args.data) / data_cfg.vocab_filename)
src_dict_path = (Path(args.data) / data_cfg.src_vocab_filename)
if ((not os.path.isfile(src_dict_path)) or (not os.path.isfile(tgt_dict_path))):
raise FileNotFoundError('Dict not found: {}'.format(args.data))
src_dict = Dictionary.load(src_dict_path.as_posix())
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
print('| src dictionary: {} types'.format(len(src_dict)))
print('| tgt dictionary: {} types'.format(len(tgt_dict)))
if (args.parallel_text_data != ''):
if (not os.path.isabs(args.parallel_text_data)):
args.parallel_text_data = os.path.join(args.data, args.parallel_text_data)
if (args.langpairs is None):
raise Exception('Could not infer language pair, please provide it explicitly')
infer_tgt_lang_id = None
if ((args.infer_target_lang != '') and data_cfg.prepend_tgt_lang_tag_no_change):
tgt_lang_tag = SpeechToTextDataset.LANG_TAG_TEMPLATE.format(args.infer_target_lang)
infer_tgt_lang_id = tgt_dict.index(tgt_lang_tag)
assert (infer_tgt_lang_id != tgt_dict.unk())
return cls(args, src_dict, tgt_dict, infer_tgt_lang_id=infer_tgt_lang_id)
def load_langpair_dataset(self, prepend_tgt_lang_tag=False, sampling_alpha=1.0, epoch=0):
lang_pairs = []
text_dataset = None
split = 'train'
for lp in self.args.langpairs.split(','):
(src, tgt) = lp.split('-')
text_dataset = load_langpair_dataset(self.args.parallel_text_data, split, src, self.src_dict, tgt, self.tgt_dict, combine=True, dataset_impl=None, upsample_primary=1, left_pad_source=False, left_pad_target=False, max_source_positions=self.args.max_positions_text, max_target_positions=self.args.max_target_positions, load_alignments=False, truncate_source=False)
if prepend_tgt_lang_tag:
text_dataset = TransformEosLangPairDataset(text_dataset, src_eos=self.src_dict.eos(), tgt_bos=self.tgt_dict.eos(), new_tgt_bos=self.tgt_dict.index(LANG_TAG_TEMPLATE.format(tgt)))
lang_pairs.append(text_dataset)
if (len(lang_pairs) > 1):
if (sampling_alpha != 1.0):
size_ratios = SpeechToTextDatasetCreator.get_size_ratios(self.args.langpairs.split(','), [len(s) for s in lang_pairs], alpha=sampling_alpha)
lang_pairs = [ResamplingDataset(d, size_ratio=r, epoch=epoch, replace=(r >= 1.0)) for (d, r) in zip(lang_pairs, size_ratios)]
return ConcatDataset(lang_pairs)
return text_dataset
def inference_step(self, generator, models, sample, prefix_tokens=None, constraints=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens, constraints=constraints, bos_token=self._infer_tgt_lang_id)
def build_src_tokenizer(self, args):
logger.info(f'src-pre-tokenizer: {self.data_cfg.src_pre_tokenizer}')
return encoders.build_tokenizer(Namespace(**self.data_cfg.src_pre_tokenizer))
def build_src_bpe(self, args):
logger.info(f'tokenizer: {self.data_cfg.src_bpe_tokenizer}')
return encoders.build_bpe(Namespace(**self.data_cfg.src_bpe_tokenizer))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith('train')
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
src_pre_tokenizer = self.build_src_tokenizer(self.args)
src_bpe_tokenizer = self.build_src_bpe(self.args)
ast_dataset = SpeechToTextJointDatasetCreator.from_tsv(self.args.data, self.data_cfg, split, self.tgt_dict, src_dict=(None if self.speech_only else self.src_dict), pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, src_pre_tokenizer=src_pre_tokenizer, src_bpe_tokenizer=src_bpe_tokenizer, is_train_split=is_train_split, epoch=epoch, seed=self.args.seed)
noise_token_id = (- 1)
text_dataset = None
if ((self.args.parallel_text_data != '') and is_train_split):
text_dataset = self.load_langpair_dataset(self.data_cfg.prepend_tgt_lang_tag_no_change, 1.0, epoch=epoch)
if (self.args.mask_text_ratio > 0):
noise_token_id = (self.src_dict.unk() if (self.args.noise_token == '') else self.src_dict.index(self.args.noise_token))
text_dataset = LangPairMaskDataset(text_dataset, src_bos=self.src_dict.bos(), src_eos=self.src_dict.eos(), noise_id=noise_token_id, mask_ratio=self.args.mask_text_ratio, mask_type=self.args.mask_text_type)
if (text_dataset is not None):
mdsets = [ModalityDatasetItem('sup_speech', ast_dataset, (self.args.max_source_positions, self.args.max_target_positions), self.args.max_tokens, self.args.batch_size), ModalityDatasetItem('text', text_dataset, (self.args.max_positions_text, self.args.max_target_positions), (self.args.max_tokens_text if (self.args.max_tokens_text is not None) else self.args.max_tokens), self.args.batch_size)]
ast_dataset = MultiModalityDataset(mdsets)
self.datasets[split] = ast_dataset
def target_dictionary(self):
return self.tgt_dict
def source_dictionary(self):
return (None if self.speech_only else self.src_dict)
def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, data_buffer_size=0, disable_iterator_cache=False):
if (not isinstance(dataset, MultiModalityDataset)):
return super(SpeechTextJointToTextTask, self).get_batch_iterator(dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch, data_buffer_size, disable_iterator_cache)
mult_ratio = [self.args.speech_sample_ratio, self.args.text_sample_ratio]
assert (len(dataset.datasets) == 2)
dataset.set_epoch(epoch)
batch_samplers = dataset.get_batch_samplers(mult_ratio, required_batch_size_multiple, seed)
epoch_iter = GroupedEpochBatchIterator(dataset=dataset, collate_fn=dataset.collater, batch_samplers=batch_samplers, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, mult_rate=(1 if self.args.update_mix_data else max(self.args.update_freq)), buffer_size=data_buffer_size)
self.dataset_to_epoch_iter[dataset] = {}
return epoch_iter |
def _parse_line(line: str, split_on=' ') -> List[int]:
return list(map(int, map(str.strip, line.split(split_on)))) |
class COVIDDialogScenario(Scenario):
SOURCE_URL_TEMPLATE: str = '
name = 'covid_dialog'
description = 'Medical dialogue dataset of conversations between doctors and patients on their COVID-19 concerns'
tags = ['dialogue', 'biomedical']
def get_instances(self, output_path: str) -> List[Instance]:
def download_and_read_lines(file_name: str) -> List[str]:
file_path: str = os.path.join(data_path, file_name)
ensure_file_downloaded(source_url=COVIDDialogScenario.SOURCE_URL_TEMPLATE.format(file_name=file_name), target_path=file_path, unpack=False)
with open(file_path) as f:
return f.read().splitlines()
data_path: str = os.path.join(output_path, 'data')
ensure_directory_exists(data_path)
instances: List[Instance] = []
for split in ALL_SPLITS:
dataset_split: str = ('val' if (split == VALID_SPLIT) else split)
questions: List[str] = download_and_read_lines(f'{dataset_split}.source')
responses: List[str] = download_and_read_lines(f'{dataset_split}.target')
for (question, response) in zip(questions, responses):
question = question.replace('patient: ', '')
instances.append(Instance(input=Input(text=question), references=[Reference(output=Output(text=response), tags=[CORRECT_TAG])], split=split))
return instances |
_module
class TextLoggerHook(LoggerHook):
def __init__(self, by_epoch=True, interval=200, ignore_last=True, reset_flag=False, interval_exp_name=1000):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag, by_epoch)
self.by_epoch = by_epoch
self.time_sec_tot = 0
self.interval_exp_name = interval_exp_name
def get_mode(self, runner):
if (runner.mode == 'train'):
if ('time' in runner.log_buffer.output):
mode = 'train'
else:
mode = 'val'
elif (runner.mode == 'val'):
mode = 'val'
else:
raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}")
return mode
def get_epoch(self, runner):
if (runner.mode == 'train'):
epoch = (runner.epoch + 1)
elif (runner.mode == 'val'):
epoch = runner.epoch
else:
raise ValueError(f"runner mode should be 'train' or 'val', but got {runner.mode}")
return epoch
def get_iter(self, runner, inner_iter=False):
if (self.by_epoch and inner_iter):
current_iter = (runner.inner_iter + 1)
else:
current_iter = (runner.iter + 1)
return current_iter
def get_lr_tags(self, runner):
tags = {}
lrs = runner.current_lr()
if isinstance(lrs, dict):
for (name, value) in lrs.items():
tags[f'learning_rate/{name}'] = value[0]
else:
tags['learning_rate'] = lrs[0]
return tags
def get_momentum_tags(self, runner):
tags = {}
momentums = runner.current_momentum()
if isinstance(momentums, dict):
for (name, value) in momentums.items():
tags[f'momentum/{name}'] = value[0]
else:
tags['momentum'] = momentums[0]
return tags
def before_run(self, runner):
super(TextLoggerHook, self).before_run(runner)
self.start_iter = runner.iter
self.json_log_path = osp.join(runner.work_dir, f'{runner.timestamp}.log.json')
if (runner.meta is not None):
self._dump_log(runner.meta, runner)
def _get_max_memory(self, runner):
device = getattr(runner.model, 'output_device', None)
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([(mem / (1024 * 1024))], dtype=torch.int, device=device)
if (runner.world_size > 1):
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def _log_info(self, log_dict, runner):
if ((runner.meta is not None) and ('exp_name' in runner.meta)):
if (self.every_n_iters(runner, self.interval_exp_name) or (self.by_epoch and self.end_of_epoch(runner))):
exp_info = f"Exp name: {runner.meta['exp_name']}"
runner.logger.info(exp_info)
if (log_dict['mode'] == 'train'):
if isinstance(log_dict['lr'], dict):
lr_str = []
for (k, val) in log_dict['lr'].items():
lr_str.append(f'lr_{k}: {val:.3e}')
lr_str = ' '.join(lr_str)
else:
lr_str = f"lr: {log_dict['lr']:.3e}"
if self.by_epoch:
log_str = f"Epoch [{log_dict['epoch']}][{log_dict['iter']}/{len(runner.data_loader)}] "
else:
log_str = f"Iter [{log_dict['iter']}/{runner.max_iters}] "
log_str += f'{lr_str}, '
if ('time' in log_dict.keys()):
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = (self.time_sec_tot / ((runner.iter - self.start_iter) + 1))
eta_sec = (time_sec_avg * ((runner.max_iters - runner.iter) - 1))
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += f'eta: {eta_str}, '
log_str += f"time: {log_dict['time']:.3f}, data_time: {log_dict['data_time']:.3f}, "
if torch.cuda.is_available():
log_str += f"memory: {log_dict['memory']}, "
elif self.by_epoch:
log_str = f"Epoch({log_dict['mode']}) [{log_dict['epoch']}][{log_dict['iter']}] "
else:
log_str = f"Iter({log_dict['mode']}) [{log_dict['iter']}] "
log_items = []
for (name, val) in log_dict.items():
if (name in ['mode', 'Epoch', 'iter', 'lr', 'time', 'data_time', 'memory', 'epoch']):
continue
if isinstance(val, float):
val = f'{val:.4f}'
log_items.append(f'{name}: {val}')
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def _dump_log(self, log_dict, runner):
json_log = OrderedDict()
for (k, v) in log_dict.items():
json_log[k] = self._round_float(v)
if (runner.rank == 0):
with open(self.json_log_path, 'a+') as f:
mmcv.dump(json_log, f, file_format='json')
f.write('\n')
def _round_float(self, items):
if isinstance(items, list):
return [self._round_float(item) for item in items]
elif isinstance(items, float):
return round(items, 5)
else:
return items
def log(self, runner):
log_dict = OrderedDict(mode=self.get_mode(runner), epoch=self.get_epoch(runner), iter=self.get_iter(runner, inner_iter=True))
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['lr'] = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
log_dict['lr'] = {}
for (k, lr_) in cur_lr.items():
assert isinstance(lr_, list)
log_dict['lr'].update({k: lr_[0]})
try:
if (runner.optimizer_decoder is not None):
lr_d = [group['lr'] for group in runner.optimizer_decoder.param_groups]
log_dict['lr_decoder'] = lr_d[0]
except AttributeError:
pass
if ('time' in runner.log_buffer.output):
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output)
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner) |
def osnet_ain_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
model = OSNet(num_classes, blocks=[[OSBlockINv1, OSBlockINv1], [OSBlock, OSBlockINv1], [OSBlockINv1, OSBlock]], layers=[2, 2, 2], channels=[64, 256, 384, 512], loss=loss, conv1_IN=True, **kwargs)
return model |
def alphanumeric_key(s):
k = [(int(c) if c.isdigit() else c) for c in re.split('([0-9]+)', s)]
return k |
def _print_alignment_header(wer_details, file=sys.stdout):
print(('=' * 80), file=file)
print('{key}, %WER {WER:.2f} [ {num_edits} / {num_ref_tokens}, {insertions} ins, {deletions} del, {substitutions} sub ]'.format(**wer_details), file=file) |
def test_eb_constraints():
def f(x):
return (((x[0] ** 3) + (x[1] ** 2)) + (x[2] * x[3]))
def cfun(x):
return ((((x[0] + x[1]) + x[2]) + x[3]) - 40)
constraints = [{'type': 'ineq', 'fun': cfun}]
bounds = ([(0, 20)] * 4)
bounds[1] = (5, 5)
optimize.minimize(f, x0=[1, 2, 3, 4], method='SLSQP', bounds=bounds, constraints=constraints)
assert (constraints[0]['fun'] == cfun) |
def postprocess_train(all_features, sample_level):
all_features = {k: [postprocess_train_row(row, sample_level) for row in class_dt] for (k, class_dt) in all_features.items()}
return all_features |
def dump(data, stream=None, Dumper=Dumper, **kwds):
return dump_all([data], stream, Dumper=Dumper, **kwds) |
def try_touch_shape(array: Any):
if isinstance(array, TypeTracerArray):
array.touch_shape() |
def get_model_bn(num_channels, nfs, kss, l2regfactors, alpha, dropout_factor, num_dense):
inp_tensor1 = Input(shape=(288, 288, num_channels))
inp_tensor2 = Input(shape=(288, 288, num_channels))
n_img1 = normalize_tensor_image(inp_tensor1)
n_img2 = normalize_tensor_image(inp_tensor2)
total_inp_1 = n_img1
total_inp_2 = n_img2
(l2reg_dense, l2regconv) = l2regfactors
shared_model = get_shared_model_bn(total_inp_1.shape[(- 1)], nfs, kss, l2regconv, alpha)
[y1_1, y2_1, y3_1, y4_1] = shared_model(total_inp_1)
[y1_2, y2_2, y3_2, y4_2] = shared_model(total_inp_2)
branch = MaxPooling2D(2)(y4_2)
branch = Conv2D(nfs[4], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_1 = tf.math.abs((y4_1 - y4_2))
branch = Concatenate()([branch, diff_1])
branch = Conv2D(nfs[5], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_2 = tf.math.abs((y3_1 - y3_2))
branch = Concatenate()([branch, diff_2])
branch = Conv2D(nfs[6], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_3 = tf.math.abs((y2_1 - y2_2))
branch = Concatenate()([branch, diff_3])
branch = Conv2D(nfs[7], (3, 3), padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = BatchNormalization()(branch)
branch = UpSampling2D(2)(branch)
diff_4 = tf.math.abs((y1_1 - y1_2))
branch = Concatenate()([branch, diff_4])
branch = Conv2D(nfs[8], (3, 3), padding='same', strides=(4, 4), kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = Conv2D(nfs[9], (3, 3), padding='same', strides=(2, 2), kernel_regularizer=tf.keras.regularizers.l2(l2regconv))(branch)
branch = LeakyReLU(alpha)(branch)
branch = Flatten()(branch)
branch = Dense(num_dense, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(l2reg_dense))(branch)
branch = Dropout(dropout_factor)(branch)
branch = Dense(1, activation='sigmoid')(branch)
branch = Dense(1)(branch)
model = Model([inp_tensor1, inp_tensor2], branch)
model.summary()
return model |
def run_training(args):
all_X = np.eye(args.d, args.d, dtype=np.float32)
all_sup_Nary_Y = np.random.randint(0, args.k, args.d)
all_sup_Y = np.zeros((args.d, args.k), dtype=np.float32)
for j in range(args.d):
all_sup_Y[(j, all_sup_Nary_Y[j])] = 1
with tf.Graph().as_default():
session = tf.Session()
X_placeholder = tf.placeholder(tf.float32, shape=(None, args.d))
Z_placeholder = tf.placeholder(tf.float32, shape=(None, args.k))
sup_Y_placeholder = tf.placeholder(tf.float32, shape=(None, args.k))
p1 = Affine('p1', X_placeholder, args.k, relu=False)
probs = tf.nn.softmax(p1)
probsZ = tf.reduce_sum((probs * Z_placeholder), 1)
loss_total = tf.reduce_mean((- probsZ))
if args.Dec:
probsY = tf.reduce_sum((probs * sup_Y_placeholder), 1)
loss = tf.reduce_mean((- probsY))
else:
loss = loss_total
optimizer = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-05)
train_op = optimizer.minimize(loss)
session.run(tf.initialize_all_variables())
for step in xrange(args.num_iters):
(X, Z, Y) = get_batch(args.batch_size, all_X, all_sup_Nary_Y, all_sup_Y, args.d, args.k)
_ = session.run(train_op, feed_dict={X_placeholder: X, Z_placeholder: Z, sup_Y_placeholder: Y})
if (((step % args.print_freq) == 0) or ((step + 1) == args.num_iters)):
(X, Z, Y) = get_batch(500, all_X, all_sup_Nary_Y, all_sup_Y, args.d, args.k)
loss_ = session.run([loss_total], feed_dict={X_placeholder: X, Z_placeholder: Z, sup_Y_placeholder: Y})[0]
print(('Iteration %d loss %.4f' % (step, loss_))) |
def get_optimizer_scheduler(args, model, t_total):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=t_total)
if (args.model_name_or_path and os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
return (optimizer, scheduler) |
class BasePartitioner(metaclass=abc.ABCMeta):
def __init__(self, num_partitions: Optional[int]=None, model_parallel_submesh: Optional[HardwareMesh]=None, params_on_devices: bool=True, backend: Optional[str]=None):
if ((not num_partitions) and (not model_parallel_submesh)):
raise ValueError('At least one of `num_partitions` or `model_parallel_submesh` must be set.')
if ((model_parallel_submesh is not None) and (len(model_parallel_submesh) != 4)):
logging.error('`model_parallel_submesh` must be either None or a 4-tuple. Got `model_parallel_submesh`=%s. A ValueError will be raised beginning March 1, 2022.', model_parallel_submesh)
if (bool(num_partitions) and bool(model_parallel_submesh)):
logging.error('At most one of `num_partitions` or `model_parallel_submesh` can be set. Got `num_partitions=%s` and `model_parallel_submesh`=%s. A ValueError will be raised beginning March 21, 2022.', num_partitions, model_parallel_submesh)
self._num_partitions = num_partitions
self._model_parallel_submesh = model_parallel_submesh
self._params_on_devices = params_on_devices
self._data_axis = 'data'
self._backend = backend
def mesh(self) -> Mesh:
raise NotImplementedError
def data_partition_spec(self) -> PartitionSpec:
return PartitionSpec(self._data_axis)
def get_data_layout(self, batch_size: Optional[int]=None, host_index: Optional[int]=None) -> DataLayout:
if (host_index is not None):
raise NotImplementedError('Explicit host_index is not yet implemented.')
if (self._data_axis is None):
return DataLayout(batch_size=batch_size, shard_id=0, num_shards=1, is_first_host_in_replica_set=(jax.process_index() == 0))
mesh_size = self._local_chunker.global_mesh.shape[self._data_axis]
batch_size = (batch_size or mesh_size)
if (batch_size % mesh_size):
raise ValueError(f'Batch size ({batch_size}) must be divisible by corresponding mesh size ({mesh_size}).')
num_shards = self._local_chunker.num_chunks[self._data_axis]
if (batch_size % num_shards):
raise ValueError(f'Batch size ({batch_size}) must be divisible by number of replicas ({num_shards}).')
replica_id = self._local_chunker.get_local_chunk_info((batch_size,), [self._data_axis]).replica_id
return DataLayout(batch_size=int(batch_size), shard_id=int(self._local_chunker.chunk_ids[self._data_axis]), num_shards=int(num_shards), is_first_host_in_replica_set=(replica_id == 0))
def get_local_chunk_info(self, global_shape: Tuple[(int, ...)], mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:
return self._local_chunker.get_local_chunk_info(global_shape, mesh_axes)
def params_on_devices(self):
return self._params_on_devices
def move_params_to_devices(self, train_state: TrainState, train_state_axes: TrainState) -> TrainState:
p_id_fn = self.partition(_id_fn, in_axis_resources=(train_state_axes, None), out_axis_resources=(train_state_axes, None), donate_argnums=(0,))
if (jax.config.jax_array and (jax.process_count() > 1)):
train_state = multihost_utils.host_local_array_to_global_array(train_state, self.mesh, train_state_axes)
(train_state, _) = p_id_fn(train_state, jnp.ones((), dtype=jnp.uint32))
return train_state
def _local_chunker(self):
raise NotImplementedError
def get_logical_axes(self, train_state: TrainState) -> TrainState:
return train_state.restore_state(jax.tree_map((lambda x: None), train_state.state_dict()))
def get_mesh_axes(self, train_state: TrainState) -> TrainState:
raise NotImplementedError
def partition(self, fn: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[(int, Sequence[int])]=(), donate_argnums: Union[(int, Sequence[int])]=()) -> PartitionedCallable:
raise NotImplementedError
def compile(self, partitioned_fn: PartitionedCallable, *args) -> CompiledPartitionedCallable:
raise NotImplementedError |
def conll09_srl_eval_tf(predictions, targets, predicate_predictions, words, mask, predicate_targets, reverse_maps, gold_srl_eval_file, pred_srl_eval_file, pos_predictions, pos_targets, parse_head_targets, parse_head_predictions, parse_label_targets, parse_label_predictions):
with tf.name_scope('conll_srl_eval'):
correct_count = create_metric_variable('correct_count', shape=[], dtype=tf.int64)
excess_count = create_metric_variable('excess_count', shape=[], dtype=tf.int64)
missed_count = create_metric_variable('missed_count', shape=[], dtype=tf.int64)
str_predictions = nn_utils.int_to_str_lookup_table(predictions, reverse_maps['srl'])
str_words = nn_utils.int_to_str_lookup_table(words, reverse_maps['word'])
str_srl_targets = nn_utils.int_to_str_lookup_table(targets, reverse_maps['srl'])
str_parse_label_targets = nn_utils.int_to_str_lookup_table(parse_label_targets, reverse_maps['parse_label'])
str_parse_label_predictions = nn_utils.int_to_str_lookup_table(parse_label_predictions, reverse_maps['parse_label'])
str_pos_predictions = nn_utils.int_to_str_lookup_table(pos_predictions, reverse_maps['gold_pos'])
str_pos_targets = nn_utils.int_to_str_lookup_table(pos_targets, reverse_maps['gold_pos'])
str_predicate_predictions = nn_utils.int_to_str_lookup_table(predicate_predictions, reverse_maps['predicate'])
str_predicate_targets = nn_utils.int_to_str_lookup_table(predicate_targets, reverse_maps['predicate'])
py_eval_inputs = [str_predictions, str_predicate_predictions, str_words, mask, str_srl_targets, str_predicate_targets, str_parse_label_predictions, parse_head_predictions, str_parse_label_targets, parse_head_targets, str_pos_targets, str_pos_predictions, pred_srl_eval_file, gold_srl_eval_file]
out_types = [tf.int64, tf.int64, tf.int64]
(correct, excess, missed) = tf.py_func(evaluation_fns_np.conll09_srl_eval, py_eval_inputs, out_types, stateful=False)
update_correct_op = tf.assign_add(correct_count, correct)
update_excess_op = tf.assign_add(excess_count, excess)
update_missed_op = tf.assign_add(missed_count, missed)
precision_update_op = (update_correct_op / (update_correct_op + update_excess_op))
recall_update_op = (update_correct_op / (update_correct_op + update_missed_op))
f1_update_op = (((2 * precision_update_op) * recall_update_op) / (precision_update_op + recall_update_op))
precision = (correct_count / (correct_count + excess_count))
recall = (correct_count / (correct_count + missed_count))
f1 = (((2 * precision) * recall) / (precision + recall))
return (f1, f1_update_op) |
def test_check_feature_names_error():
X = np.random.randn(10, 3)
feature_names = ['a', 'b', 'c', 'a']
msg = 'feature_names should not contain duplicates.'
with pytest.raises(ValueError, match=msg):
_check_feature_names(X, feature_names) |
def test_compute_fractal_dimension_convergence():
fractal_dimension_test('convergence.png', 1.83) |
def get_text_prompts(label):
return [f'a photo of {label}.', f'a photo of the small {label}.', f'a low resolution photo of a {label}.', f'a photo of many {label}.'] |
class InfinityType(object):
def __repr__(self):
return 'Infinity'
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return (not isinstance(other, self.__class__))
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity |
class TestEvaluationMetrics():
def setup_method(self):
self.ground = [1, 2, 3]
self.prediction_a = [1, 2, 3]
self.prediction_b = [2, 2, 2]
self.prediction_c = [3, 2, 1]
def test_r_squared(self):
assert (sk.r_squared(self.ground, self.prediction_a) == 1)
assert (sk.r_squared(self.ground, self.prediction_b) == 0)
assert (sk.r_squared(self.ground, self.prediction_c) == (- 3))
def test_mse(self):
assert (sk.mse(self.ground, self.prediction_a) == 0)
assert math.isclose(sk.mse(self.ground, self.prediction_b), 0.)
assert math.isclose(sk.mse(self.ground, self.prediction_c), 2.)
def test_rmse(self):
assert (math.sqrt(sk.mse(self.ground, self.prediction_a)) == 0)
assert math.isclose(math.sqrt(sk.mse(self.ground, self.prediction_b)), 0.)
assert math.isclose(math.sqrt(sk.mse(self.ground, self.prediction_c)), 1.)
def test_nrmse(self):
assert ((math.sqrt(sk.mse(self.ground, self.prediction_a)) / np.sum(self.ground)) == 0)
assert math.isclose((math.sqrt(sk.mse(self.ground, self.prediction_b)) / np.sum(self.ground)), 0.)
assert math.isclose((math.sqrt(sk.mse(self.ground, self.prediction_c)) / np.sum(self.ground)), 0.)
def test_max_error(self):
assert (sk.max_error(self.ground, self.prediction_a) == 0)
assert (sk.max_error(self.ground, self.prediction_b) == 1)
assert (sk.max_error(self.ground, self.prediction_c) == 2)
def test_information_gain(self):
assert math.isclose(sk.information_gain(self.ground, self.prediction_a), 0.0)
assert math.isclose(sk.information_gain(self.ground, self.prediction_b), 0.)
assert math.isclose(sk.information_gain(self.ground, self.prediction_c), 0.)
def test_pearson_correlation(self):
assert math.isclose(sk.pearson_correlation(self.ground, self.prediction_a)[0], 0.)
assert (math.isnan(sk.pearson_correlation(self.ground, self.prediction_b)[0]) == True)
assert math.isclose(sk.pearson_correlation(self.ground, self.prediction_c)[0], (- 0.))
def test_spearman_correlation(self):
assert math.isclose(sk.spearman_correlation(self.ground, self.prediction_a)[0], 0.)
assert (math.isnan(sk.spearman_correlation(self.ground, self.prediction_b)[0]) == True)
assert math.isclose(sk.spearman_correlation(self.ground, self.prediction_c)[0], (- 0.))
def test_kl_divergence(self):
assert math.isclose(sk.kullback_leibler_divergence(self.ground, self.prediction_a), 0)
assert math.isclose(sk.kullback_leibler_divergence(self.ground, self.prediction_b), 0.)
assert math.isclose(sk.kullback_leibler_divergence(self.ground, self.prediction_c), 0.)
def test_common_part_of_commuters(self):
assert math.isclose(sk.common_part_of_commuters(self.ground, self.prediction_a), 1)
assert math.isclose(sk.common_part_of_commuters(self.ground, self.prediction_b), 0.)
assert math.isclose(sk.common_part_of_commuters(self.ground, self.prediction_c), 0.)
def test_common_part_of_commuters_distance(self):
assert math.isclose(sk.common_part_of_commuters_distance(self.ground, self.prediction_a), 0.)
assert math.isclose(sk.common_part_of_commuters_distance(self.ground, self.prediction_b), 0.)
assert math.isclose(sk.common_part_of_commuters_distance(self.ground, self.prediction_c), 0.) |
def resnet18(pretrained_path=None):
model = ResNet(BasicBlock, [2, 2, 2, 2])
if (pretrained_path is not None):
model.load_state_dict(torch.load(pretrained_path))
print('Loaded pre-trained weights')
return model |
def omegaconf_to_dict(omegaconf, name):
return {((name + '_') + k): v for (k, v) in omegaconf.to_dict()} |
def to_local_command(params, python_command='python', script=osp.join(config.PROJECT_PATH, 'scripts/run_experiment.py'), use_gpu=False):
command = ((python_command + ' ') + script)
if (use_gpu and (not config.USE_TF)):
command = ("THEANO_FLAGS='device=gpu,dnn.enabled=auto' " + command)
for (k, v) in config.ENV.items():
command = (('%s=%s ' % (k, v)) + command)
for (k, v) in params.items():
if isinstance(v, dict):
for (nk, nv) in v.items():
if (str(nk) == '_name'):
command += (' --%s %s' % (k, _to_param_val(nv)))
else:
command += (' --%s_%s %s' % (k, nk, _to_param_val(nv)))
else:
command += (' --%s %s' % (k, _to_param_val(v)))
return command |
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
setup_multi_processes(cfg)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
cfg.data.test.pipeline[1].img_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.gpu_id is not None):
cfg.gpu_ids = [args.gpu_id]
if (args.launcher == 'none'):
cfg.gpu_ids = [args.gpu_id]
distributed = False
if (len(cfg.gpu_ids) > 1):
warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to {cfg.gpu_ids[0:1]} to avoid potential error in non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(rank, _) = get_dist_info()
if ((args.work_dir is not None) and (rank == 0)):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(args.work_dir, f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(args.work_dir, f'eval_single_scale_{timestamp}.json')
elif (rank == 0):
work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(work_dir, f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(work_dir, f'eval_single_scale_{timestamp}.json')
dataset = build_dataset(cfg.data.test)
loader_cfg = dict(num_gpus=len(cfg.gpu_ids), dist=distributed, shuffle=False)
loader_cfg.update({k: v for (k, v) in cfg.data.items() if (k not in ['train', 'val', 'test', 'train_dataloader', 'val_dataloader', 'test_dataloader'])})
test_loader_cfg = {**loader_cfg, 'samples_per_gpu': 1, 'shuffle': False, **cfg.data.get('test_dataloader', {})}
data_loader = build_dataloader(dataset, **test_loader_cfg)
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint.get('meta', {})):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
print('"CLASSES" not found in meta, use dataset.CLASSES instead')
model.CLASSES = dataset.CLASSES
if ('PALETTE' in checkpoint.get('meta', {})):
model.PALETTE = checkpoint['meta']['PALETTE']
else:
print('"PALETTE" not found in meta, use dataset.PALETTE instead')
model.PALETTE = dataset.PALETTE
torch.cuda.empty_cache()
eval_kwargs = ({} if (args.eval_options is None) else args.eval_options)
efficient_test = eval_kwargs.get('efficient_test', False)
if efficient_test:
warnings.warn('``efficient_test=True`` does not have effect in tools/test.py, the evaluation and format results are CPU memory efficient by default')
eval_on_format_results = ((args.eval is not None) and ('cityscapes' in args.eval))
if eval_on_format_results:
assert (len(args.eval) == 1), 'eval on format results is not applicable for metrics other than cityscapes'
if (args.format_only or eval_on_format_results):
if ('imgfile_prefix' in eval_kwargs):
tmpdir = eval_kwargs['imgfile_prefix']
else:
tmpdir = '.format_cityscapes'
eval_kwargs.setdefault('imgfile_prefix', tmpdir)
mmcv.mkdir_or_exist(tmpdir)
else:
tmpdir = None
if (not distributed):
warnings.warn('SyncBN is only supported with DDP. To be compatible with DP, we convert SyncBN to BN. Please use dist_train.sh which can avoid this error.')
if (not torch.cuda.is_available()):
assert (digit_version(mmcv.__version__) >= digit_version('1.4.4')), 'Please use MMCV >= 1.4.4 for CPU training!'
model = revert_sync_batchnorm(model)
model = MMDataParallel(model, device_ids=cfg.gpu_ids)
results = single_gpu_test(model, data_loader, args.show, args.show_dir, False, args.opacity, pre_eval=((args.eval is not None) and (not eval_on_format_results)), format_only=(args.format_only or eval_on_format_results), format_args=eval_kwargs)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
results = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect, False, pre_eval=((args.eval is not None) and (not eval_on_format_results)), format_only=(args.format_only or eval_on_format_results), format_args=eval_kwargs)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
warnings.warn('The behavior of ``args.out`` has been changed since MMSeg v0.16, the pickled outputs could be seg map as type of np.array, pre-eval results or file paths for ``dataset.format_results()``.')
print(f'''
writing results to {args.out}''')
mmcv.dump(results, args.out)
if args.eval:
eval_kwargs.update(metric=args.eval)
metric = dataset.evaluate(results, **eval_kwargs)
metric_dict = dict(config=args.config, metric=metric)
mmcv.dump(metric_dict, json_file, indent=4)
if ((tmpdir is not None) and eval_on_format_results):
shutil.rmtree(tmpdir) |
def kernel3(a, mat):
mat_type = ti.types.matrix(mat.n, mat.m, ti.i32)
def kernel(u: ti.i32, v: mat_type) -> mat_type:
return (u * v)
return kernel(a, mat) |
def write_plotting_data(arch_str, data_obj):
plotting_dir = os.path.join('..', 'plotting_data', arch_str)
if (not os.path.isdir(plotting_dir)):
os.makedirs(plotting_dir)
write_loc = os.path.join(plotting_dir, 'data.json')
json_data = json.dumps(data_obj, indent=4)
print('Writing')
with open(write_loc, 'w') as f:
f.write(json_data)
print('Written') |
class BasicTransformerBlock(nn.Module):
def __init__(self, dim, n_heads, d_head, dropout=0.0, context_dim=None, gated_ff=True, checkpoint=False):
super().__init__()
self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout)
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, heads=n_heads, dim_head=d_head, dropout=dropout)
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def _forward(self, x, context=None):
x = (self.attn1(self.norm1(x)) + x)
x = (self.attn2(self.norm2(x), context=context) + x)
x = (self.ff(self.norm3(x)) + x)
return x |
class Normalize(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, img, mask):
return (F.normalize(img, self.mean, self.std, self.inplace), mask) |
def test_date_time():
numpy_array = np.array(['2020-07-27T10:41:11', '2019-01-01', '2020-01-01'], 'datetime64[s]')
array = ak.highlevel.Array(numpy_array)
assert (str(array.type) == '3 * datetime64[s]')
assert (array.to_list() == [np.datetime64('2020-07-27T10:41:11'), np.datetime64('2019-01-01T00:00:00'), np.datetime64('2020-01-01T00:00:00')])
array = array.layout
assert (to_list(array) == [np.datetime64('2020-07-27T10:41:11'), np.datetime64('2019-01-01T00:00:00'), np.datetime64('2020-01-01T00:00:00')])
assert (ak.max(array, highlevel=False) == numpy_array[0])
assert (ak.min(array, highlevel=False) == numpy_array[1]) |
def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
phi = ((float(elevation_deg) / 180) * math.pi)
theta = ((float(azimuth_deg) / 180) * math.pi)
x = ((dist * math.cos(theta)) * math.cos(phi))
y = ((dist * math.sin(theta)) * math.cos(phi))
z = (dist * math.sin(phi))
return (x, y, z) |
class SynchronizedSeedDataset(SynchronizedDataset):
def __getitem__(self, idx):
self.sync_once()
return torch.initial_seed() |
def main():
parser = argparse.ArgumentParser()
register_args(parser)
args = parser.parse_args()
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)):
raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir))
if (args.server_ip and args.server_port):
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count())
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))))
set_seed(args)
if (args.local_rank not in [(- 1), 0]):
torch.distributed.barrier()
args.model_type = args.model_type.lower()
(config, tokenizer, model) = load_untrained_model(args)
if (args.local_rank == 0):
torch.distributed.barrier()
model.to(args.device)
logger.info('Training/evaluation parameters %s', args)
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False)
(global_step, tr_loss) = train(args, train_dataset, model, tokenizer)
logger.info(' global_step = %s, average loss = %s', global_step, tr_loss)
if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))):
logger.info('Saving model checkpoint to %s', args.output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
model = get_model_class(args).from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
model.to(args.device)
results = {}
if (args.do_eval and (args.local_rank in [(- 1), 0])):
if args.do_train:
logger.info('Loading checkpoints saved during training for evaluation')
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(((args.output_dir + '/**/') + WEIGHTS_NAME), recursive=True))))
else:
logger.info('Loading checkpoint %s for evaluation', args.model_name_or_path)
checkpoints = [args.model_name_or_path]
logger.info('Evaluate the following checkpoints: %s', checkpoints)
for checkpoint in checkpoints:
global_step = (checkpoint.split('-')[(- 1)] if (len(checkpoints) > 1) else '')
model = get_model_class(args).from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, output_prediction=True)
result = dict((((k + ('_{}'.format(global_step) if global_step else '')), v) for (k, v) in result.items()))
results.update(result)
logger.info('Results: {}'.format(results))
return results |
def collate_dicts(batch: List[Batch]) -> Batch:
X_batch: Dict[(str, Any)] = defaultdict(list)
Y_batch: Dict[(str, Any)] = defaultdict(list)
for (x_dict, y_dict) in batch:
for (field_name, value) in x_dict.items():
X_batch[field_name].append(value)
for (label_name, value) in y_dict.items():
Y_batch[label_name].append(value)
for (field_name, values) in X_batch.items():
if isinstance(values[0], Tensor):
X_batch[field_name] = list_to_tensor(values)
for (label_name, values) in Y_batch.items():
Y_batch[label_name] = list_to_tensor(values)
return (dict(X_batch), dict(Y_batch)) |
class DataGenerator(Dataset):
def __init__(self, img_dir, split_file, transform):
self.img_name_list = []
self.img_label_list = []
self.transform = transform
with open(split_file, 'r') as split_name:
img_and_label_list = split_name.readlines()
for index in img_and_label_list:
img_path = os.path.join(img_dir, index.split()[0])
img_label = int(index.split()[1])
self.img_name_list.append(img_path)
self.img_label_list.append(img_label)
def __getitem__(self, index):
img_name = self.img_name_list[index]
image_data = Image.open(img_name).convert('RGB')
image_data = self.transform(image_data)
image_label = self.img_label_list[index]
return (image_data, image_label, img_name)
def __len__(self):
return len(self.img_name_list) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.