code stringlengths 101 5.91M |
|---|
class Evaluation():
def __init__(self, args):
if (args.dataset == 'coco'):
self.num_classes = 80
if (args.dataset == 'voc'):
self.num_classes = 20
self.num_folds = 4
self.group_class_num = (self.num_classes / 4)
self.batch_size = args.batch_size
self.disp_interval = args.disp_interval
self.clear_num = 200
self.group = args.group
self.group_mean_iou = ([0] * 4)
self.setup()
def get_val_id_list(self):
num = int((self.num_classes / self.num_folds))
val_set = [(self.group + (self.num_folds * v)) for v in range(num)]
return val_set
def setup(self):
self.tp_list = ([0] * self.num_classes)
self.total_list = ([0] * self.num_classes)
self.iou_list = ([0] * self.num_classes)
def update_class_index(self):
if (self.num_classes == 80):
self.class_indexes = self.get_val_id_list()
if (self.num_classes == 20):
self.class_indexes = range((self.group * 5), ((self.group + 1) * 5))
def update_evl(self, idx, query_mask, pred, count):
self.update_class_index()
if (count == self.clear_num):
self.setup()
for i in range(self.batch_size):
id = idx[i].item()
(tp, total) = self.test_in_train(query_mask[i], pred[i])
self.tp_list[id] += tp
self.total_list[id] += total
self.iou_list = [(self.tp_list[ic] / float(max(self.total_list[ic], 1))) for ic in range(self.num_classes)]
self.group_mean_iou[self.group] = np.mean(np.take(self.iou_list, self.class_indexes))
def test_in_train(self, query_label, pred):
pred = pred.data.cpu().numpy().astype(np.int32)
query_label = query_label.cpu().numpy().astype(np.int32)
(tp, tn, fp, fn) = measure(query_label, pred)
total = ((tp + fp) + fn)
return (tp, total) |
def run_tcl_exp(args, config):
stepDict = {1: [int(5000.0), int(5000.0)], 2: [int(10000.0), int(10000.0)], 3: [int(10000.0), int(10000.0)], 4: [int(10000.0), int(10000.0)], 5: [int(10000.0), int(10000.0)]}
data_dim = config.data_dim
n_segments = config.n_segments
n_layers = config.n_layers
n_obs_per_seg = config.n_obs_per_seg
data_seed = config.data_seed
results = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
results_no_ica = {l: {n: [] for n in n_obs_per_seg} for l in n_layers}
num_comp = data_dim
nSims = args.nSims
dataset = args.dataset
test = args.test
for l in n_layers:
for n in n_obs_per_seg:
(x, y, s) = generate_synthetic_data(data_dim, n_segments, n, l, seed=data_seed, simulationMethod=dataset, one_hot_labels=False)
for seed in range(nSims):
print('Running exp with L={} and n={}; seed={}'.format(l, n, seed))
ckpt_folder = os.path.join(args.checkpoints, args.dataset, str(l), str(n), str(seed))
res_TCL = TCL_wrapper(sensor=x.T, label=y, random_seed=seed, list_hidden_nodes=(([(num_comp * 2)] * (l - 1)) + [num_comp]), max_steps=(stepDict[l][0] * 2), max_steps_init=stepDict[l][1], ckpt_dir=ckpt_folder, test=test)
mcc_no_ica = mean_corr_coef(res_TCL[0].T, (s ** 2))
mcc_ica = mean_corr_coef(res_TCL[1].T, (s ** 2))
print('TCL mcc (no ICA): {}\t mcc: {}'.format(mcc_no_ica, mcc_ica))
results[l][n].append(mcc_ica)
results_no_ica[l][n].append(mcc_no_ica)
Results = {'data_dim': data_dim, 'data_segments': n_segments, 'CorrelationCoef': results, 'CorrelationCoef_no_ica': results_no_ica}
return Results |
def get_coord_values(field_line):
fl_coordinates = field_line.coords
fl_coordinates = check_field_line_alignment(fl_coordinates)
fl_r = (fl_coordinates.radius.value / aconst.R_sun.value)
fl_lon = fl_coordinates.lon.value
fl_lat = fl_coordinates.lat.value
return (fl_r, fl_lon, fl_lat) |
def ReScaleSize_STARE(image, re_size=512):
(w, h) = image.size
max_len = max(w, h)
(new_w, new_h) = (max_len, max_len)
delta_w = (new_w - w)
delta_h = (new_h - h)
padding = ((delta_w // 2), (delta_h // 2), (delta_w - (delta_w // 2)), (delta_h - (delta_h // 2)))
image = ImageOps.expand(image, padding, fill=0)
image = image.resize((re_size, re_size))
return image |
def quantize_model_(model, size_tracker, layers_to_quantize, block_sizes_config, n_centroids_config, step=0, n_iter=15, eps=1e-06, max_tentatives=100, verbose=True):
quantized_layers = get_layers(model, layers_to_quantize[step])
for layer in quantized_layers:
is_master_process = ((not dist.is_initialized()) or (dist.is_initialized() and (dist.get_rank() == 0)))
verbose = (verbose and is_master_process)
module = attrgetter(layer)(model)
block_size = get_param(module, layer, block_sizes_config)
n_centroids = get_param(module, layer, n_centroids_config)
if verbose:
logging.info(f'Quantizing layer {layer} with block size {block_size} and {n_centroids} centroids')
weight = module.weight.data.clone()
is_bias = ('bias' in [x[0] for x in module.named_parameters()])
bias = (module.bias.data.clone() if is_bias else None)
quantizer = PQ(weight, block_size, n_centroids=n_centroids, n_iter=n_iter, eps=eps, max_tentatives=max_tentatives, verbose=verbose)
quantizer.encode()
centroids = quantizer.centroids.contiguous()
assignments = quantizer.assignments.contiguous()
if dist.is_initialized():
dist.broadcast(centroids, 0)
dist.broadcast(assignments, 0)
if isinstance(module, nn.Linear):
(out_features, in_features) = map((lambda k: module.__dict__[k]), ['out_features', 'in_features'])
quantized_module = PQLinear(centroids, assignments, bias, in_features, out_features)
elif isinstance(module, nn.Embedding):
(num_embeddings, embedding_dim) = map((lambda k: module.__dict__[k]), ['num_embeddings', 'embedding_dim'])
quantized_module = PQEmbedding(centroids, assignments, num_embeddings, embedding_dim)
elif isinstance(module, nn.Conv2d):
(out_channels, in_channels, kernel_size) = map((lambda k: module.__dict__[k]), ['out_channels', 'in_channels', 'kernel_size'])
(stride, padding, dilation, groups, padding_mode) = map((lambda k: module.__dict__[k]), ['stride', 'padding', 'dilation', 'groups', 'padding_mode'])
quantized_module = PQConv2d(centroids, assignments, bias, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, padding_mode=padding_mode)
else:
raise ValueError(f'Module {module} not yet supported for quantization')
attrsetter(layer)(model, quantized_module)
size_tracker.update(weight, block_size, n_centroids)
return quantized_layers |
def estimate_latent_channels(extractor, train_loader):
device = next(extractor.parameters()).device
feats = []
i_samples = 0
for (i, normal_img) in enumerate(train_loader):
with torch.no_grad():
feat = extractor(normal_img.to(device))
(b, c) = feat.shape[:2]
feat = feat.permute(0, 2, 3, 1).reshape((- 1), c)
feats.append(feat)
del feat
i_samples += b
if (i_samples > 20):
break
feats = torch.cat(feats, axis=0)
mean = torch.mean(feats, dim=0)
feats -= mean
n_samples = feats.shape[0]
s = torch.linalg.svdvals(feats.cpu())
explained_variance = ((s ** 2) / (n_samples - 1))
total_variance = explained_variance.sum()
explained_variance_ratio = (explained_variance / total_variance)
cumulative_explained_var_ratio = list(torch.cumsum(explained_variance_ratio, 0))
latent_channels = len([i for i in cumulative_explained_var_ratio if (i <= 0.9)])
del feats
return latent_channels |
class ToIterableDataset(data.IterableDataset):
def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool=True, shard_chunk_size: int=1):
assert (not isinstance(dataset, data.IterableDataset)), dataset
assert isinstance(sampler, Sampler), sampler
self.dataset = dataset
self.sampler = sampler
self.shard_sampler = shard_sampler
self.shard_chunk_size = shard_chunk_size
def __iter__(self):
if (not self.shard_sampler):
sampler = self.sampler
else:
sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)
for idx in sampler:
(yield self.dataset[idx])
def __len__(self):
return len(self.sampler) |
_model
def regnetx_016(pretrained=False, **kwargs):
return _create_regnet('regnetx_016', pretrained, **kwargs) |
class ControlNet(ExamplesTestsAccelerate):
def test_controlnet_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f'''
examples/controlnet/train_controlnet.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/fill10
--output_dir={tmpdir}
--resolution=64
--train_batch_size=1
--gradient_accumulation_steps=1
--max_train_steps=6
--checkpoints_total_limit=2
--checkpointing_steps=2
--controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
'''.split()
run_command((self._launch_args + test_args))
self.assertEqual({x for x in os.listdir(tmpdir) if ('checkpoint' in x)}, {'checkpoint-4', 'checkpoint-6'})
def test_controlnet_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f'''
examples/controlnet/train_controlnet.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/fill10
--output_dir={tmpdir}
--resolution=64
--train_batch_size=1
--gradient_accumulation_steps=1
--controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
--max_train_steps=9
--checkpointing_steps=2
'''.split()
run_command((self._launch_args + test_args))
self.assertEqual({x for x in os.listdir(tmpdir) if ('checkpoint' in x)}, {'checkpoint-2', 'checkpoint-4', 'checkpoint-6', 'checkpoint-8'})
resume_run_args = f'''
examples/controlnet/train_controlnet.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/fill10
--output_dir={tmpdir}
--resolution=64
--train_batch_size=1
--gradient_accumulation_steps=1
--controlnet_model_name_or_path=hf-internal-testing/tiny-controlnet
--max_train_steps=11
--checkpointing_steps=2
--resume_from_checkpoint=checkpoint-8
--checkpoints_total_limit=3
'''.split()
run_command((self._launch_args + resume_run_args))
self.assertEqual({x for x in os.listdir(tmpdir) if ('checkpoint' in x)}, {'checkpoint-8', 'checkpoint-10', 'checkpoint-12'}) |
(autouse=True)
def _requests_prevent_post(monkeypatch: MonkeyPatch, thrower: Callable, logging_side_effect: Callable) -> MagicMock:
mock = MagicMock(side_effect=logging_side_effect(f'requests.post', after=thrower))
monkeypatch.setattr(requests, 'post', mock)
return mock |
class Argument():
_support_types = [ArgType.INT, ArgType.STR, ArgType.FLOAT, ArgType.NULL, ArgType.TUPLE, ArgType.LIST, ArgType.BOOL]
_int_values = [(- 1024), (- 16), (- 1), 0, 1, 16, 1024]
_str_values = ['mean', 'sum', 'max', 'zeros', 'reflect', 'circular', 'replicate']
_float_values = [0.0, 1.0, (- 1.0), 63.0, (- 63.0), 1024.0, (- 1024.0), 1e+20, (- 1e+20)]
def __init__(self, value, type: ArgType):
self.value = value
self.type = type
def to_code(self, var_name: str) -> str:
if (self.type in [ArgType.INT, ArgType.FLOAT, ArgType.BOOL]):
return f'''{var_name} = {self.value}
'''
elif (self.type == ArgType.STR):
return f'''{var_name} = "{self.value}"
'''
elif (self.type == ArgType.NULL):
return f'''{var_name} = None
'''
else:
assert 0
def mutate_value(self) -> None:
if (self.type == ArgType.INT):
self.value = self.mutate_int_value(self.value)
elif (self.type == ArgType.STR):
self.value = self.mutate_str_value(self.value)
elif (self.type == ArgType.FLOAT):
self.value = self.mutate_float_value(self.value)
elif (self.type == ArgType.BOOL):
self.value = (not self.value)
elif ((self.type == ArgType.TUPLE) or (self.type == ArgType.LIST)):
for arg in self.value:
arg.mutate_value()
elif (self.type == ArgType.NULL):
pass
else:
assert 0
def mutate_type(self) -> None:
if (self.type in [ArgType.INT, ArgType.FLOAT, ArgType.STR, ArgType.BOOL]):
types = [ArgType.INT, ArgType.FLOAT, ArgType.STR, ArgType.BOOL]
types.remove(self.type)
self.type = choice(types)
if (self.type == ArgType.INT):
self.value = self.mutate_int_value(0)
elif (self.type == ArgType.FLOAT):
self.value = self.mutate_float_value(0.0)
elif (self.type == ArgType.STR):
self.value = self.mutate_str_value('max')
elif (self.type == ArgType.BOOL):
self.value = choice([True, False])
elif (self.type in [ArgType.LIST, ArgType.TUPLE]):
for arg in self.value:
arg.mutate_type()
else:
assert 0
def mutate_int_value(self, value, _min=None, _max=None) -> int:
if choose_from_list():
value = choice(Argument._int_values)
else:
value += randint((- 64), 64)
if (_min != None):
value = max(_min, value)
if (_max != None):
value = min(_max, value)
return value
def mutate_str_value(self, value) -> str:
if choose_from_list():
return choice(Argument._str_values)
else:
return value
def mutate_float_value(self, value) -> float:
if choose_from_list():
return choice(Argument._float_values)
else:
return (value + (randint((- 64), 64) * 1.0))
def initial_value(self, type: ArgType):
if (type == ArgType.INT):
return choice(Argument._int_values)
elif (type == ArgType.FLOAT):
return choice(Argument._float_values)
elif (type == ArgType.STR):
return choice(Argument._str_values)
elif (type == ArgType.BOOL):
return choice([True, False])
elif (type == ArgType.NULL):
return None
else:
assert 0
def get_type(x):
if (x is None):
return ArgType.NULL
elif isinstance(x, bool):
return ArgType.BOOL
elif isinstance(x, int):
return ArgType.INT
elif isinstance(x, str):
return ArgType.STR
elif isinstance(x, float):
return ArgType.FLOAT
elif isinstance(x, tuple):
return ArgType.TUPLE
elif isinstance(x, list):
return ArgType.LIST
else:
return None |
def compute_files(user1, user2, file_list, dir_pre, start_num):
match_total = 0
test_total = 0
gold_total = 0
for fi in file_list:
file1 = ((((dir_pre + user1) + '/') + fi) + '.txt')
file2 = ((((dir_pre + user2) + '/') + fi) + '.txt')
if (not os.path.exists(file1)):
print('Error: ', file1, 'does not exist', file=ERROR_LOG)
return (- 1.0)
if (not os.path.exists(file2)):
print('Error: ', file2, 'does not exist', file=ERROR_LOG)
return (- 1.0)
try:
file1_h = open(file1, 'r')
file2_h = open(file2, 'r')
except IOError:
print('Cannot open the files', file1, file2, file=ERROR_LOG)
break
cur_amr1 = smatch.get_amr_line(file1_h)
cur_amr2 = smatch.get_amr_line(file2_h)
if (cur_amr1 == ''):
print('AMR 1 is empty', file=ERROR_LOG)
continue
if (cur_amr2 == ''):
print('AMR 2 is empty', file=ERROR_LOG)
continue
amr1 = amr.AMR.parse_AMR_line(cur_amr1)
amr2 = amr.AMR.parse_AMR_line(cur_amr2)
test_label = 'a'
gold_label = 'b'
amr1.rename_node(test_label)
amr2.rename_node(gold_label)
(test_inst, test_rel1, test_rel2) = amr1.get_triples()
(gold_inst, gold_rel1, gold_rel2) = amr2.get_triples()
if verbose:
print('Instance triples of file 1:', len(test_inst), file=DEBUG_LOG)
print(test_inst, file=DEBUG_LOG)
print('Attribute triples of file 1:', len(test_rel1), file=DEBUG_LOG)
print(test_rel1, file=DEBUG_LOG)
print('Relation triples of file 1:', len(test_rel2), file=DEBUG_LOG)
print(test_rel2, file=DEBUG_LOG)
print('Instance triples of file 2:', len(gold_inst), file=DEBUG_LOG)
print(gold_inst, file=DEBUG_LOG)
print('Attribute triples of file 2:', len(gold_rel1), file=DEBUG_LOG)
print(gold_rel1, file=DEBUG_LOG)
print('Relation triples of file 2:', len(gold_rel2), file=DEBUG_LOG)
print(gold_rel2, file=DEBUG_LOG)
(best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2, gold_inst, gold_rel1, gold_rel2, test_label, gold_label)
if verbose:
print('best match number', best_match_num, file=DEBUG_LOG)
print('Best Match:', smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG)
match_total += best_match_num
test_total += ((len(test_inst) + len(test_rel1)) + len(test_rel2))
gold_total += ((len(gold_inst) + len(gold_rel1)) + len(gold_rel2))
smatch.match_triple_dict.clear()
(precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total)
return ('%.2f' % f_score) |
def get_root_dir():
from . import data
abs_dir = os.path.abspath(data.__file__)
return os.path.split(os.path.split(os.path.split(abs_dir)[0])[0])[0] |
def _make_factorized_antisymmetries():
(key, ion_pos, ion_charges, init_pos, spin_split, ndense_list) = _get_initial_pos_and_hyperparams()
compute_input_streams = _get_compute_input_streams(ion_pos)
backflow = _get_backflow(spin_split, ndense_list, cyclic_spins=False, use_transformer=False, num_heads=1)
jastrow = models.jastrow.get_two_body_decay_scaled_for_chargeless_molecules(ion_pos, ion_charges)
slog_psis = [models.construct.FactorizedAntisymmetry(spin_split, compute_input_streams, backflow, jastrow, rank, 32, 3, models.weights.get_kernel_initializer('lecun_normal'), models.weights.get_bias_initializer('uniform'), jnp.tanh) for rank in (1, 3)]
return (key, init_pos, slog_psis) |
def test_convert_SyncBN():
cfg = _get_config_module('pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py')
model_cfg = cfg.model
convert_SyncBN(model_cfg)
assert (model_cfg['pts_voxel_encoder']['norm_cfg']['type'] == 'BN1d')
assert (model_cfg['pts_backbone']['norm_cfg']['type'] == 'BN2d')
assert (model_cfg['pts_neck']['norm_cfg']['type'] == 'BN2d') |
def get_charge(pid):
abs_pid = abs(pid)
if (pid in [130, 22, 1, 2]):
return 0.0
elif (abs_pid in [11, 13]):
return (- math.copysign(1.0, pid))
elif (abs_pid in [211]):
return math.copysign(1.0, pid)
else:
raise Exception('Unknown pid: ', pid) |
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo', save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join('results/mots', exp_name, 'quantitive')
mkdir_if_missing(result_root)
accs = []
n_frame = 0
(timer_avgs, timer_calls) = ([], [])
for seq in seqs:
output_dir = (os.path.join('results/mots', exp_name, 'qualititive', seq) if (save_images or save_videos) else None)
img_dir = osp.join(data_root, seq, 'img1')
logger.info('start seq: {}'.format(seq))
dataloader = videodataset.LoadImagesAndMaskObsMOTS(img_dir, opt)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[(meta_info.find('frameRate') + 10):meta_info.find('\nseqLength')])
(nf, ta, tc) = eval_seq(opt, dataloader, result_filename, save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
if save_videos:
visualzier = MOTSVisualizer(seq, None, result_filename, output_dir, img_dir)
visualzier.generateVideo()
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = (all_time / np.sum(timer_calls))
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, (1.0 / avg_time)))
eval_config = trackeval.Evaluator.get_default_eval_config()
dataset_config = trackeval.datasets.MOTSChallenge.get_default_dataset_config()
metrics_config = {'METRICS': ['HOTA', 'CLEAR', 'Identity']}
eval_config['LOG_ON_ERROR'] = osp.join(result_root, 'error.log')
eval_config['PLOT_CURVES'] = False
dataset_config['GT_FOLDER'] = data_root
dataset_config['SEQMAP_FOLDER'] = osp.join(data_root, '../../seqmaps')
dataset_config['SPLIT_TO_EVAL'] = 'train'
dataset_config['TRACKERS_FOLDER'] = osp.join(result_root, '..')
dataset_config['TRACKER_SUB_FOLDER'] = ''
dataset_config['TRACKERS_TO_EVAL'] = ['quantitive']
dataset_config['BENCHMARK'] = 'MOTS20'
dataset_config['SKIP_SPLIT_FOL'] = True
evaluator = trackeval.Evaluator(eval_config)
dataset_list = [trackeval.datasets.MOTSChallenge(dataset_config)]
metrics_list = []
for metric in [trackeval.metrics.HOTA, trackeval.metrics.CLEAR, trackeval.metrics.Identity, trackeval.metrics.VACE, trackeval.metrics.JAndF]:
if (metric.get_name() in metrics_config['METRICS']):
metrics_list.append(metric())
if (len(metrics_list) == 0):
raise Exception('No metrics selected for evaluation')
evaluator.evaluate(dataset_list, metrics_list) |
class BatchSamplerSafe(Sampler):
def __init__(self, algo, **kwargs):
self.algo = algo
self.experience_replay = []
self.env_interacts_memory = []
self.env_interacts = 0
self.total_env_interacts = 0
self.mean_path_len = 0
self.use_safety_bonus = (self.algo.safety_constraint and hasattr(self.algo.safety_constraint, 'get_bonus') and self.algo.safety_constraint.use_bonus)
self.use_safety_baselines = (self.algo.safety_constraint and (self.algo.safety_key == 'safety_advantages') and hasattr(self.algo.safety_constraint, 'baseline'))
def start_worker(self):
parallel_sampler.populate_task(self.algo.env, self.algo.policy, scope=self.algo.scope)
def shutdown_worker(self):
parallel_sampler.terminate_task(scope=self.algo.scope)
def obtain_samples(self, itr):
cur_params = self.algo.policy.get_param_values()
paths = parallel_sampler.sample_paths(policy_params=cur_params, max_samples=self.algo.batch_size, max_path_length=self.algo.max_path_length, scope=self.algo.scope)
for path in paths:
logli = self.algo.policy.distribution.log_likelihood(path['actions'], path['agent_infos'])
path['log_likelihood'] = logli
'keep data use per iteration approximately fixed'
if (not self.algo.all_paths):
paths = local_truncate_paths(paths, self.algo.batch_size)
'keep track of path length'
self.env_interacts = sum([len(path['rewards']) for path in paths])
self.total_env_interacts += self.env_interacts
self.mean_path_len = (float(self.env_interacts) / len(paths))
self.experience_replay.append(paths)
self.env_interacts_memory.append(self.env_interacts)
if (len(self.experience_replay) > self.algo.batch_aggregate_n):
self.experience_replay.pop(0)
self.env_interacts_memory.pop(0)
return paths
def process_samples(self, itr, paths):
if self.algo.exploration_bonus:
self.compute_exploration_bonuses_and_statistics()
if self.algo.safety_constraint:
self.compute_safety_function_and_statistics()
self.compute_epoch_weights()
all_paths = []
all_evs = []
for paths in self.experience_replay:
batch_ev = self.process_single_batch(paths)
all_paths += paths
all_evs.append(batch_ev)
all_evs = all_evs[::(- 1)]
if ((self.algo.batch_aggregate_n > 1) and self.algo.importance_sampling):
self.compute_all_importance_weights(ignore_age_0=True)
samples_data = self.create_samples_dict(all_paths)
self.record_statistics(itr, all_paths, all_evs, samples_data)
self.update_parametrized_models()
return samples_data
def compute_exploration_bonuses_and_statistics(self):
for paths in self.experience_replay:
for path in paths:
path['bonuses'] = self.algo.exploration_bonus.get_bonus(path)
' total and mean over all of memory '
self.bonus_total = sum([sum([sum(path['bonuses']) for path in paths]) for paths in self.experience_replay])
self.bonus_mean = (self.bonus_total / sum(self.env_interacts_memory))
self.new_bonus_total = sum([sum(path['bonuses']) for path in self.experience_replay[(- 1)]])
self.new_bonus_mean = (self.new_bonus_total / self.env_interacts_memory[(- 1)])
self.bonus_baseline = (self.algo.exploration_lambda * min(0, (self.bonus_mean / max(1, np.abs(self.bonus_mean)))))
def compute_safety_function_and_statistics(self):
for paths in self.experience_replay:
for path in paths:
path['safety_rewards'] = self.algo.safety_constraint.evaluate(path)
if (hasattr(self.algo.safety_constraint, 'get_bonus') and self.algo.safety_constraint.use_bonus):
path['safety_bonuses'] = self.algo.safety_constraint.get_bonus(path)
def compute_epoch_weights(self):
self.raw_weights = np.array([(self.algo.batch_aggregate_coeff ** j) for j in range(len(self.experience_replay))], dtype='float')
self.raw_weights /= sum(self.raw_weights)
self.raw_weights = self.raw_weights[::(- 1)]
self.weights = self.raw_weights.copy()
if self.algo.relative_weights:
total_paths = sum([len(paths) for paths in self.experience_replay])
for j in range(len(self.weights)):
self.weights[j] *= (total_paths / len(self.experience_replay[j]))
self.age = np.arange(len(self.experience_replay))[::(- 1)]
def process_single_batch(self, paths):
if hasattr(self.algo.baseline, 'predict_n'):
all_path_baselines = self.algo.baseline.predict_n(paths)
else:
all_path_baselines = [self.algo.baseline.predict(path) for path in paths]
if self.use_safety_baselines:
all_path_safety_baselines = [self.algo.safety_constraint.baseline.predict(path) for path in paths]
for (idx, path) in enumerate(paths):
if ('weights' not in path):
path['weights'] = np.ones_like(path['rewards'])
path_baselines = np.append(all_path_baselines[idx], 0)
deltas = ((path['rewards'] + (self.algo.discount * path_baselines[1:])) - path_baselines[:(- 1)])
if self.algo.exploration_bonus:
path['bonuses'] *= self.algo.exploration_lambda
if self.algo.normalize_bonus:
path['bonuses'] /= max(1, np.abs(self.bonus_mean))
if self.algo.nonnegative_bonus_mean:
path['bonuses'] -= self.bonus_baseline
deltas += path['bonuses']
path['advantages'] = special.discount_cumsum(deltas, (self.algo.discount * self.algo.gae_lambda))
path['returns'] = special.discount_cumsum(path['rewards'], self.algo.discount)
if self.algo.safety_constraint:
path['safety_returns'] = special.discount_cumsum(path['safety_rewards'], self.algo.safety_discount)
if self.use_safety_bonus:
path['safety_robust_rewards'] = (path['safety_rewards'] + path['safety_bonuses'])
path['safety_robust_returns'] = special.discount_cumsum(path['safety_robust_rewards'], self.algo.safety_discount)
if self.use_safety_baselines:
path_safety_baselines = np.append(all_path_safety_baselines[idx], 0)
safety_deltas = ((path['safety_rewards'] + (self.algo.safety_discount * path_safety_baselines[1:])) - path_safety_baselines[:(- 1)])
path['safety_advantages'] = special.discount_cumsum(safety_deltas, (self.algo.safety_discount * self.algo.safety_gae_lambda))
if (self.use_safety_bonus and self.use_safety_baselines):
safety_robust_deltas = ((path['safety_robust_rewards'] + (self.algo.safety_discount * path_safety_baselines[1:])) - path_safety_baselines[:(- 1)])
path['safety_robust_advantages'] = special.discount_cumsum(safety_robust_deltas, (self.algo.safety_discount * self.algo.safety_gae_lambda))
if self.algo.safety_tradeoff:
if (not self.use_safety_bonus):
safety_reward_key = 'safety_rewards'
else:
safety_reward_key = 'safety_robust_rewards'
tradeoff_rewards = (path['rewards'] - (self.algo.safety_tradeoff_coeff * path[safety_reward_key]))
path['tradeoff_rewards'] = tradeoff_rewards
path['tradeoff_returns'] = special.discount_cumsum(tradeoff_rewards, self.algo.discount)
if (self.algo.pdo_vf_mode == 1):
tradeoff_deltas = (deltas - (self.algo.safety_tradeoff_coeff * path[safety_reward_key]))
path['advantages'] = special.discount_cumsum(tradeoff_deltas, (self.algo.discount * self.algo.gae_lambda))
else:
if (not self.use_safety_bonus):
tradeoff_deltas = (deltas - (self.algo.safety_tradeoff_coeff * safety_deltas))
else:
tradeoff_deltas = (deltas - (self.algo.safety_tradeoff_coeff * safety_robust_deltas))
path['advantages'] = special.discount_cumsum(tradeoff_deltas, (self.algo.discount * self.algo.gae_lambda))
ev = special.explained_variance_1d(np.concatenate(all_path_baselines), np.concatenate([path[self.algo.baseline._target_key] for path in paths]))
return ev
def compute_all_importance_weights(self, ignore_age_0=False):
self.IS_coeffs = [[] for paths in self.experience_replay]
for (paths, weight, age) in zip(self.experience_replay, self.weights, self.age):
if ((age == 0) and ignore_age_0):
continue
for path in paths:
path['weights'] = (weight * np.ones_like(path['rewards']))
self.update_agent_infos(path)
self.compute_and_apply_importance_weights(path)
path['weights'] *= path['IS_coeff']
self.IS_coeffs[age] = [path['IS_coeff'] for path in paths]
def compute_batch_importance_weights(self, paths, weight=1):
for path in paths:
path['weights'] = (weight * np.ones_like(path['rewards']))
self.update_agent_infos(path)
self.compute_and_apply_importance_weights(path)
path['weights'] *= path['IS_coeff']
def update_agent_infos(self, path):
state_info_list = [path['agent_infos'][k] for k in self.algo.policy.state_info_keys]
input_list = tuple(([path['observations']] + state_info_list))
cur_dist_info = self.algo.dist_info_vars_func(*input_list)
for k in self.algo.policy.distribution.dist_info_keys:
path['agent_infos'][k] = cur_dist_info[k]
def compute_and_apply_importance_weights(self, path):
new_logli = self.algo.policy.distribution.log_likelihood(path['actions'], path['agent_infos'])
logli_diff = (new_logli - path['log_likelihood'])
if (self.algo.decision_weight_mode == 'pd'):
logli_diff = logli_diff[::(- 1)]
log_decision_weighted_IS_coeffs = special.discount_cumsum(logli_diff, 1)
IS_coeff = np.exp(log_decision_weighted_IS_coeffs[::(- 1)])
elif (self.algo.decision_weight_mode == 'pt'):
IS_coeff = np.exp(np.sum(logli_diff))
if self.algo.clip_IS_coeff_above:
IS_coeff = np.minimum(IS_coeff, self.algo.IS_coeff_upper_bound)
if self.algo.clip_IS_coeff_below:
IS_coeff = np.maximum(IS_coeff, self.algo.IS_coeff_lower_bound)
path['IS_coeff'] = IS_coeff
def create_samples_dict(self, paths):
if self.algo.safety_constraint:
if self.use_safety_bonus:
safety_key = ('safety_robust' + self.algo.safety_key[6:])
else:
safety_key = self.algo.safety_key
logger.log(('Policy optimization is using safety_key=%s.' % safety_key))
if (not self.algo.policy.recurrent):
observations = tensor_utils.concat_tensor_list([path['observations'] for path in paths])
actions = tensor_utils.concat_tensor_list([path['actions'] for path in paths])
rewards = tensor_utils.concat_tensor_list([path['rewards'] for path in paths])
returns = tensor_utils.concat_tensor_list([path['returns'] for path in paths])
advantages = tensor_utils.concat_tensor_list([path['advantages'] for path in paths])
env_infos = tensor_utils.concat_tensor_dict_list([path['env_infos'] for path in paths])
agent_infos = tensor_utils.concat_tensor_dict_list([path['agent_infos'] for path in paths])
weights = tensor_utils.concat_tensor_list([path['weights'] for path in paths])
if self.algo.center_adv:
advantages = util.center_advantages(advantages)
if self.algo.positive_adv:
advantages = util.shift_advantages_to_positive(advantages)
samples_data = dict(observations=observations, actions=actions, rewards=rewards, returns=returns, advantages=advantages, env_infos=env_infos, agent_infos=agent_infos, weights=weights, paths=paths)
if self.algo.safety_constraint:
safety_vals = tensor_utils.concat_tensor_list([path[safety_key] for path in paths])
samples_data['safety_values'] = safety_vals
if self.algo.center_safety_vals:
samples_data['safety_offset'] = np.mean(safety_vals)
samples_data['safety_values'] = (samples_data['safety_values'] - samples_data['safety_offset'])
else:
max_path_length = max([len(path['advantages']) for path in paths])
obs = [path['observations'] for path in paths]
obs = tensor_utils.pad_tensor_n(obs, max_path_length)
if self.algo.center_adv:
raw_adv = np.concatenate([path['advantages'] for path in paths])
adv_mean = np.mean(raw_adv)
adv_std = (np.std(raw_adv) + 1e-08)
adv = [((path['advantages'] - adv_mean) / adv_std) for path in paths]
else:
adv = [path['advantages'] for path in paths]
adv = np.asarray([tensor_utils.pad_tensor(a, max_path_length) for a in adv])
actions = [path['actions'] for path in paths]
actions = tensor_utils.pad_tensor_n(actions, max_path_length)
rewards = [path['rewards'] for path in paths]
rewards = tensor_utils.pad_tensor_n(rewards, max_path_length)
returns = [path['returns'] for path in paths]
returns = tensor_utils.pad_tensor_n(returns, max_path_length)
agent_infos = [path['agent_infos'] for path in paths]
agent_infos = tensor_utils.stack_tensor_dict_list([tensor_utils.pad_tensor_dict(p, max_path_length) for p in agent_infos])
env_infos = [path['env_infos'] for path in paths]
env_infos = tensor_utils.stack_tensor_dict_list([tensor_utils.pad_tensor_dict(p, max_path_length) for p in env_infos])
weights = [path['weights'] for path in paths]
weights = tensor_utils.pad_tensor_n(weights, max_path_length)
valids = [np.ones_like(path['returns']) for path in paths]
valids = tensor_utils.pad_tensor_n(valids, max_path_length)
samples_data = dict(observations=obs, actions=actions, advantages=adv, rewards=rewards, returns=returns, valids=valids, agent_infos=agent_infos, env_infos=env_infos, weights=weights, paths=paths)
if self.algo.safety_constraint:
safety_vals = [path[safety_key] for path in paths]
if self.algo.center_safety_vals:
samples_data['safety_offset'] = np.mean(safety_vals)
safety_vals = (safety_vals - samples_data['safety_offset'])
safety_vals = tensor_utils.pad_tensor_n(safety_vals, max_path_length)
samples_data['safety_values'] = safety_vals
if self.algo.safety_constraint:
if (self.algo.safety_key == 'safety_rewards'):
if self.use_safety_bonus:
key = 'safety_robust_rewards'
else:
key = 'safety_rewards'
safety_eval = np.mean(tensor_utils.concat_tensor_list([path[key] for path in self.experience_replay[(- 1)]]))
else:
if self.use_safety_bonus:
key = 'safety_robust_returns'
else:
key = 'safety_returns'
safety_eval = np.mean([path[key][0] for path in self.experience_replay[(- 1)]])
samples_data['safety_eval'] = safety_eval
samples_data['safety_rescale'] = (len(samples_data['safety_values']) / sum([len(paths) for paths in self.experience_replay]))
return samples_data
def record_statistics(self, itr, paths, evs, samples_data):
average_discounted_return = np.mean([path['returns'][0] for path in self.experience_replay[(- 1)]])
undiscounted_returns = [sum(path['rewards']) for path in self.experience_replay[(- 1)]]
agent_infos = tensor_utils.concat_tensor_dict_list([path['agent_infos'] for path in self.experience_replay[(- 1)]])
ent = np.mean(self.algo.policy.distribution.entropy(agent_infos))
logger.record_tabular('Iteration', itr)
logger.record_tabular('AverageDiscountedReturn', average_discounted_return)
logger.record_tabular('AverageReturn', np.mean(undiscounted_returns))
if (self.algo.safety_constraint and self.algo.safety_tradeoff):
average_discounted_tradeoff_return = np.mean([path['tradeoff_returns'][0] for path in self.experience_replay[(- 1)]])
average_undiscounted_tradeoff_return = np.mean([sum(path['tradeoff_rewards']) for path in self.experience_replay[(- 1)]])
logger.record_tabular('AverageDiscountedTradeoffReturn', average_discounted_tradeoff_return)
logger.record_tabular('AverageTradeoffReturn', average_undiscounted_tradeoff_return)
logger.record_tabular('ExplainedVariance', evs[0])
logger.record_tabular('NumBatches', len(self.experience_replay))
logger.record_tabular('NumTrajs', len(paths))
logger.record_tabular('MeanPathLen', self.mean_path_len)
logger.record_tabular('EnvInteracts', self.env_interacts)
logger.record_tabular('TotalEnvInteracts', self.total_env_interacts)
logger.record_tabular('Entropy', ent)
logger.record_tabular('Perplexity', np.exp(ent))
logger.record_tabular('StdReturn', np.std(undiscounted_returns))
logger.record_tabular('MaxReturn', np.max(undiscounted_returns))
logger.record_tabular('MinReturn', np.min(undiscounted_returns))
if (self.algo.batch_aggregate_n > 1):
for age in range(self.algo.batch_aggregate_n):
if (age < len(self.experience_replay)):
raw_weight = self.raw_weights[::(- 1)][age]
weight = self.weights[::(- 1)][age]
logger.record_tabular(('RawWeight_age_' + str(age)), raw_weight)
logger.record_tabular(('ScaledWeight_age_' + str(age)), weight)
if ((age > 0) and self.algo.importance_sampling):
IS = self.get_IS(age)
logger.record_tabular(('MeanISCoeff_age_' + str(age)), np.mean(IS))
logger.record_tabular(('StdISCoeff_age_' + str(age)), np.std(IS))
logger.record_tabular(('MaxISCoeff_age_' + str(age)), np.max(IS))
logger.record_tabular(('MinISCoeff_age_' + str(age)), np.min(IS))
logger.record_tabular(('ExplainedVariance_age_' + str(age)), evs[age])
else:
logger.record_tabular(('RawWeight_age_' + str(age)), 0)
logger.record_tabular(('ScaledWeight_age_' + str(age)), 0)
if ((age > 0) and self.algo.importance_sampling):
logger.record_tabular(('MeanISCoeff_age_' + str(age)), 0)
logger.record_tabular(('StdISCoeff_age_' + str(age)), 0)
logger.record_tabular(('MaxISCoeff_age_' + str(age)), 0)
logger.record_tabular(('MinISCoeff_age_' + str(age)), 0)
logger.record_tabular(('ExplainedVariance_age_' + str(age)), 0)
if self.algo.exploration_bonus:
bonuses = tensor_utils.concat_tensor_list([path['bonuses'] for path in paths])
logger.record_tabular('MeanRawBonus', self.bonus_mean)
logger.record_tabular('MeanBonus', np.mean(bonuses))
logger.record_tabular('StdBonus', np.std(bonuses))
logger.record_tabular('MaxBonus', np.max(bonuses))
bonus_sums = np.array([np.sum(path['bonuses']) for path in paths])
logger.record_tabular('MeanBonusSum', np.mean(bonus_sums))
logger.record_tabular('StdBonusSum', np.std(bonus_sums))
if (self.algo.batch_aggregate_n > 1):
new_bonuses = tensor_utils.concat_tensor_list([path['bonuses'] for path in self.experience_replay[(- 1)]])
logger.record_tabular('NewPathsMeanBonus', np.mean(new_bonuses))
logger.record_tabular('NewPathsStdBonus', np.std(new_bonuses))
logger.record_tabular('NewPathsMaxBonus', np.max(new_bonuses))
if self.algo.safety_constraint:
logger.record_tabular('SafetyEval', samples_data['safety_eval'])
safety_returns = np.array([np.sum(path['safety_rewards']) for path in paths])
logger.record_tabular('MeanSafety[U]Return', np.mean(safety_returns))
logger.record_tabular('StdSafety[U]Return', np.std(safety_returns))
logger.record_tabular('MaxSafety[U]Return', np.max(safety_returns))
if (self.algo.batch_aggregate_n > 1):
new_safety_returns = np.array([np.sum(path['safety_rewards']) for path in self.experience_replay[(- 1)]])
logger.record_tabular('NewPathsMeanSafety[U]Return', np.mean(new_safety_returns))
logger.record_tabular('NewPathsStdSafety[U]Return', np.std(new_safety_returns))
logger.record_tabular('NewPathsMaxSafety[U]Return', np.max(new_safety_returns))
if self.use_safety_bonus:
safety_robust_returns = np.array([np.sum(path['safety_robust_rewards']) for path in paths])
logger.record_tabular('MeanRobustSafety[U]Return', np.mean(safety_robust_returns))
logger.record_tabular('StdRobustSafety[U]Return', np.std(safety_robust_returns))
logger.record_tabular('MaxRobustSafety[U]Return', np.max(safety_robust_returns))
if (self.algo.batch_aggregate_n > 1):
new_safety_robust_returns = np.array([np.sum(path['safety_robust_rewards']) for path in self.experience_replay[(- 1)]])
logger.record_tabular('NewPathsMeanRobustSafety[U]Return', np.mean(new_safety_robust_returns))
logger.record_tabular('NewPathsStdRobustSafety[U]Return', np.std(new_safety_robust_returns))
logger.record_tabular('NewPathsMaxRobustSafety[U]Return', np.max(new_safety_robust_returns))
def get_IS(self, age):
if (self.algo.decision_weight_mode == 'pd'):
return tensor_utils.concat_tensor_list(self.IS_coeffs[age])
else:
return np.array(self.IS_coeffs[age])
def update_parametrized_models(self):
logger.log((('fitting objective baseline with target_key=' + self.algo.baseline._target_key) + '...'))
self.algo.baseline.fit(self.experience_replay[(- 1)])
logger.log('fitted')
if self.algo.exploration_bonus:
logger.log('fitting exploration bonus model...')
self.algo.exploration_bonus.fit(self.experience_replay[(- 1)])
logger.log('fitted')
if self.algo.safety_constraint:
logger.log('fitting safety constraint model...')
self.algo.safety_constraint.fit(self.experience_replay[(- 1)])
logger.log('fitted') |
def add_time(temporal_data):
times = np.repeat(np.arange(temporal_data.shape[1]).reshape(1, (- 1), 1), len(temporal_data), 0)
temporal_data = np.concatenate([times, temporal_data], axis=(- 1))
return temporal_data |
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if (self.radix > 1):
x = x.view(batch, self.cardinality, self.radix, (- 1)).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, (- 1))
else:
x = torch.sigmoid(x)
return x |
def create_training_images(fn, i):
dest = (path_lr / fn.relative_to(path_hr))
dest.parent.mkdir(parents=True, exist_ok=True)
img = PIL.Image.open(fn).convert('LA').convert('RGB')
img.save(dest) |
def interleave(x, bt):
s = list(x.shape)
res = torch.reshape(torch.transpose(x.reshape(([(- 1), bt] + s[1:])), 1, 0), ([(- 1)] + s[1:]))
return res |
class TorchGate(torch.nn.Module):
_grad()
def __init__(self, sr: int, nonstationary: bool=False, n_std_thresh_stationary: float=1.5, n_thresh_nonstationary: float=1.3, temp_coeff_nonstationary: float=0.1, n_movemean_nonstationary: int=20, prop_decrease: float=1.0, n_fft: int=1024, win_length: bool=None, hop_length: int=None, freq_mask_smooth_hz: float=500, time_mask_smooth_ms: float=50):
super().__init__()
self.sr = sr
self.nonstationary = nonstationary
assert (0.0 <= prop_decrease <= 1.0)
self.prop_decrease = prop_decrease
self.n_fft = n_fft
self.win_length = (self.n_fft if (win_length is None) else win_length)
self.hop_length = ((self.win_length // 4) if (hop_length is None) else hop_length)
self.n_std_thresh_stationary = n_std_thresh_stationary
self.temp_coeff_nonstationary = temp_coeff_nonstationary
self.n_movemean_nonstationary = n_movemean_nonstationary
self.n_thresh_nonstationary = n_thresh_nonstationary
self.freq_mask_smooth_hz = freq_mask_smooth_hz
self.time_mask_smooth_ms = time_mask_smooth_ms
self.register_buffer('smoothing_filter', self._generate_mask_smoothing_filter())
_grad()
def _generate_mask_smoothing_filter(self) -> Union[(torch.Tensor, None)]:
if ((self.freq_mask_smooth_hz is None) and (self.time_mask_smooth_ms is None)):
return None
n_grad_freq = (1 if (self.freq_mask_smooth_hz is None) else int((self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2)))))
if (n_grad_freq < 1):
raise ValueError(f'freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz')
n_grad_time = (1 if (self.time_mask_smooth_ms is None) else int((self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000))))
if (n_grad_time < 1):
raise ValueError(f'time_mask_smooth_ms needs to be at least {int(((self.hop_length / self.sr) * 1000))} ms')
if ((n_grad_time == 1) and (n_grad_freq == 1)):
return None
v_f = torch.cat([linspace(0, 1, (n_grad_freq + 1), endpoint=False), linspace(1, 0, (n_grad_freq + 2))])[1:(- 1)]
v_t = torch.cat([linspace(0, 1, (n_grad_time + 1), endpoint=False), linspace(1, 0, (n_grad_time + 2))])[1:(- 1)]
smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0)
return (smoothing_filter / smoothing_filter.sum())
_grad()
def _stationary_mask(self, X_db: torch.Tensor, xn: Optional[torch.Tensor]=None) -> torch.Tensor:
if (xn is not None):
XN = torch.stft(xn, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, return_complex=True, pad_mode='constant', center=True, window=torch.hann_window(self.win_length).to(xn.device))
XN_db = amp_to_db(XN).to(dtype=X_db.dtype)
else:
XN_db = X_db
(std_freq_noise, mean_freq_noise) = torch.std_mean(XN_db, dim=(- 1))
noise_thresh = (mean_freq_noise + (std_freq_noise * self.n_std_thresh_stationary))
sig_mask = (X_db > noise_thresh.unsqueeze(2))
return sig_mask
_grad()
def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor:
X_smoothed = (conv1d(X_abs.reshape((- 1), 1, X_abs.shape[(- 1)]), torch.ones(self.n_movemean_nonstationary, dtype=X_abs.dtype, device=X_abs.device).view(1, 1, (- 1)), padding='same').view(X_abs.shape) / self.n_movemean_nonstationary)
slowness_ratio = ((X_abs - X_smoothed) / X_smoothed)
sig_mask = temperature_sigmoid(slowness_ratio, self.n_thresh_nonstationary, self.temp_coeff_nonstationary)
return sig_mask
def forward(self, x: torch.Tensor, xn: Optional[torch.Tensor]=None) -> torch.Tensor:
assert (x.ndim == 2)
if (x.shape[(- 1)] < (self.win_length * 2)):
raise Exception(f'x must be bigger than {(self.win_length * 2)}')
assert ((xn is None) or (xn.ndim == 1) or (xn.ndim == 2))
if ((xn is not None) and (xn.shape[(- 1)] < (self.win_length * 2))):
raise Exception(f'xn must be bigger than {(self.win_length * 2)}')
X = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, return_complex=True, pad_mode='constant', center=True, window=torch.hann_window(self.win_length).to(x.device))
if self.nonstationary:
sig_mask = self._nonstationary_mask(X.abs())
else:
sig_mask = self._stationary_mask(amp_to_db(X), xn)
sig_mask = ((self.prop_decrease * ((sig_mask * 1.0) - 1.0)) + 1.0)
if (self.smoothing_filter is not None):
sig_mask = conv2d(sig_mask.unsqueeze(1), self.smoothing_filter.to(sig_mask.dtype), padding='same')
Y = (X * sig_mask.squeeze(1))
y = torch.istft(Y, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, center=True, window=torch.hann_window(self.win_length).to(Y.device))
return y.to(dtype=x.dtype) |
def evaluate(encoder, args, batch_trains, classifier, classifiers, eval_sents, domain_encs):
good_sent = bad_sent = good = bad = 0.0
for sent in eval_sents:
(words, golds) = zip(*sent)
probs = [ath(encoder(words, volatile=True)) for ath in classifiers]
outputs = sum(probs)
tags = [encoder.vt.i2w[i] for i in outputs.data.max(1)[1].cpu().view((- 1))]
if (tags == list(golds)):
good_sent += 1
else:
bad_sent += 1
for (go, gu) in zip(golds, tags):
if (go == gu):
good += 1
else:
bad += 1
print(('tag_acc=%.4f, sent_acc=%.4f' % ((good / (good + bad)), (good_sent / (good_sent + bad_sent)))))
return ((1.0 * good) / (good + bad)) |
def evaluate(model, data_loader, device, num_classes):
model.eval()
confmat = utils.ConfusionMatrix(num_classes)
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for (image, target) in metric_logger.log_every(data_loader, 100, header):
(image, target) = (image.to(device), target.to(device))
output = model(image)
output = output['out']
confmat.update(target.flatten(), output.argmax(1).flatten())
confmat.reduce_from_all_processes()
return confmat |
def explore(config):
if (config['train_batch_size'] < (config['sgd_minibatch_size'] * 2)):
config['train_batch_size'] = (config['sgd_minibatch_size'] * 2)
if (config['num_sgd_iter'] < 1):
config['num_sgd_iter'] = 1
config['target_delay'] = int(config['target_delay'])
return config |
def count_objects(obj_info_list):
counts = np.zeros((n_class,))
n_frames = np.zeros(n_class)
ped_hist = []
cyc_hist = []
car_hist = []
for obj_info in obj_info_list:
flags = np.zeros(n_class)
counts_in_frame = np.zeros(n_class)
for obj in obj_info:
class_id = obj[2]
if ((class_id >= n_class) or (class_id < 0)):
continue
counts[class_id] += 1
counts_in_frame[class_id] += 1
flags[class_id] = 1
n_frames += flags
if (counts_in_frame[0] != 0):
ped_hist.append(counts_in_frame[0])
if (counts_in_frame[1] != 0):
cyc_hist.append(counts_in_frame[1])
if (counts_in_frame[2] != 0):
car_hist.append(counts_in_frame[2])
return (counts, n_frames, [ped_hist, cyc_hist, car_hist]) |
def parse_args():
parser = argparse.ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter)
parser.add_argument('exsum_fn', type=str, metavar='EXSUM_FN')
parser.add_argument('--model-var-name', default='model', type=str)
parser.add_argument('--log-dir', default='logs', type=str)
parser.add_argument('--save-dir', default='saves', type=str)
args = parser.parse_args()
fn = args.exsum_fn
assert (fn is not None), 'EXSUM_FN not provided'
assert (fn.endswith('.py') or fn.endswith('.pkl')), 'EXSUM_FN should be a .py or .pkl file'
if fn.endswith('.py'):
spec = importlib.util.spec_from_file_location('module.name', fn)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
model = getattr(module, args.model_var_name)
else:
with open(fn, 'rb') as f:
model = dill.load(f)
assert isinstance(model, Model), '"model" variable is not a Model object'
os.makedirs(args.log_dir, exist_ok=True)
os.makedirs(args.save_dir, exist_ok=True)
return (model, args.log_dir, args.save_dir) |
def handle_sentence(model, layer, tokenized_text, tokenized_to_id_indicies, tokenids_chunks):
layer_embeddings = [sentence_encode(tokenids_chunk, model, layer) for tokenids_chunk in tokenids_chunks]
word_embeddings = sentence_to_wordtoken_embeddings(layer_embeddings, tokenized_text, tokenized_to_id_indicies)
return word_embeddings |
class DeepFoolTF():
def __init__(self, input, logits, num_classes: int=10, max_iter: int=100, subsample: int=10) -> None:
self.input = input
self.logits = logits
self.num_classes = num_classes
self.max_iter = max_iter
self.subsample = subsample
def attack(self, inputs: np.ndarray, labels: np.ndarray) -> np.ndarray:
if ((inputs.min() < 0) or (inputs.max() > 1)):
raise ValueError('Input values should be in the [0, 1] range.')
fmodel = foolbox.models.TensorFlowModel(self.input, self.logits, bounds=(0, 1))
attack = foolbox.attacks.DeepFoolL2Attack(model=fmodel)
batch_size = len(inputs)
adversarials = inputs.copy()
warnings.filterwarnings('ignore', category=UserWarning)
for i in tqdm.tqdm(range(batch_size), ncols=80):
adv = attack(inputs[i], labels[i], unpack=True, steps=self.max_iter, subsample=self.subsample)
if (adv is not None):
adversarials[i] = adv
warnings.resetwarnings()
return adversarials |
class PoseEvaluator(Harness):
def _init_validation(self, opt):
self.fixed_depth_scaling = opt.pose_validation_fixed_scaling
self.val_num_log_images = opt.eval_num_images
def evaluate(self):
print('Evaluate pose predictions:', flush=True)
scores = self._run_pose_validation()
for domain in scores:
print(f' - Results for domain {domain}:')
metrics = scores[domain].get_scores()
print('\n Trajectory error: {:0.3f}, std: {:0.3f}\n'.format(metrics['mean'], metrics['std']))
self._log_gpu_memory() |
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = (kernel_size + ((kernel_size - 1) * (dilation - 1)))
pad_total = (kernel_size_effective - 1)
pad_beg = (pad_total // 2)
pad_end = (pad_total - pad_beg)
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs |
def calculate_qparams(x, num_bits, flatten_dims=_DEFAULT_FLATTEN, reduce_dim=0, reduce_type='mean', keepdim=False, true_zero=False, per_ch_input=False, quant_mode='maxmin'):
alpha_gaus = {1: 1.24, 2: 1.71, 3: 2.215, 4: 2.55, 5: 2.93, 6: 3.28, 7: 3.61, 8: 3.92}
alpha_gaus_positive = {1: 1.71, 2: 2.215, 3: 2.55, 4: 2.93, 5: 3.28, 6: 3.61, 7: 3.92, 8: 4.2}
alpha_laplas = {1: 1.05, 2: 1.86, 3: 2.83, 4: 5.03, 5: 6.2, 6: 7.41, 7: 8.64, 8: 9.89}
alpha_laplas_positive = {1: 1.86, 2: 2.83, 3: 5.03, 4: 6.2, 5: 7.41, 6: 8.64, 7: 9.89, 8: 11.16}
if per_ch_input:
x = x.transpose(0, 1)
with torch.no_grad():
x_flat = x.flatten(*flatten_dims)
if ((quant_mode == 'mean_std') and (num_bits < 8)):
mu = (x_flat.mean() if (x_flat.dim() == 1) else x_flat.mean((- 1)))
std = (x_flat.std() if (x_flat.dim() == 1) else x_flat.std((- 1)))
b = (torch.abs((x_flat - mu)).mean() if (x_flat.dim() == 1) else torch.mean(torch.abs((x_flat - mu.unsqueeze(1))), (- 1)))
minv = (x_flat.min() if (x_flat.dim() == 1) else x_flat.min((- 1))[0])
maxv = (x_flat.max() if (x_flat.dim() == 1) else x_flat.max((- 1))[0])
min_values = _deflatten_as(torch.max((mu - (6 * std)), minv), x)
max_values = _deflatten_as(torch.min((mu + (6 * std)), maxv), x)
elif (x_flat.dim() == 1):
min_values = _deflatten_as(x_flat.min(), x)
max_values = _deflatten_as(x_flat.max(), x)
else:
min_values = _deflatten_as(x_flat.min((- 1))[0], x)
max_values = _deflatten_as(x_flat.max((- 1))[0], x)
if (reduce_dim is not None):
if (reduce_type == 'mean'):
min_values = min_values.mean(reduce_dim, keepdim=keepdim)
max_values = max_values.mean(reduce_dim, keepdim=keepdim)
else:
min_values = min_values.min(reduce_dim, keepdim=keepdim)[0]
max_values = max_values.max(reduce_dim, keepdim=keepdim)[0]
min_values[(min_values > 0)] = 0
max_values[(max_values < 0)] = 0
range_values = (max_values - min_values)
range_values[(range_values == 0)] = 1
return QParams(range=range_values, zero_point=min_values, num_bits=num_bits) |
def print_progress(prefix, start_time, num_docs, num_fixed_text, num_non_english_docs, chars_non_english_docs, num_small_docs, chars_small_docs):
string = (prefix + ' | ')
string += 'elapsed time: {:.2f} | '.format((time.time() - start_time))
string += 'documents: {} | '.format(num_docs)
string += 'fixed text: {} | '.format(num_fixed_text)
string += 'non-english: {} | '.format(num_non_english_docs)
string += 'non-english chars: {} | '.format(chars_non_english_docs)
string += 'small docs: {} | '.format(num_small_docs)
string += 'small docs chars: {}'.format(chars_small_docs)
print(string, flush=True) |
_data_params('mnist2usps')
class Mnist2UspsParams(DatasetParams):
num_channels = 3
image_size = 16
mean = 0.5
std = 0.5
num_cls = 10
target_transform = None |
def negative_pearson(y_true, y_predicted, sample_weight=None):
if isinstance(y_true, pd.DataFrame):
y_true = np.array(y_true).ravel()
if isinstance(y_predicted, pd.DataFrame):
y_predicted = np.array(y_predicted).ravel()
return (- np.corrcoef(y_true, y_predicted)[(0, 1)]) |
def _check_parta2_roi_extractor(config, roi_extractor):
assert (config['type'] == roi_extractor.__class__.__name__)
assert (config.roi_layer.out_size == roi_extractor.roi_layer.out_size)
assert (config.roi_layer.max_pts_per_voxel == roi_extractor.roi_layer.max_pts_per_voxel) |
def create_mat():
image_paths = []
image_labels = []
lmdb_output_path = '../../dataset/LMDB/iiit5k_train'
if (not os.path.exists(lmdb_output_path)):
os.mkdir(lmdb_output_path)
root = '../../dataset/IIIT5K'
train_gt = loadmat(os.path.join(root, 'traindata.mat'))
length = train_gt['traindata'][0].__len__()
for i in tqdm(range(length)):
im_path = train_gt['traindata'][0][i][0][0]
im_gt = train_gt['traindata'][0][i][1][0]
try:
image_path = os.path.join(root, im_path)
image_label = im_gt
temp = Image.open(image_path)
image_labels.append(image_label)
image_paths.append(image_path)
except OSError:
pass
print(('there are all %d images' % len(image_paths)))
createDataset_detection(lmdb_output_path, image_paths, image_labels) |
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
P_mask = ([0] * max_seq_length)
A_mask = ([0] * max_seq_length)
B_mask = ([0] * max_seq_length)
if isinstance(example, PaddingInputExample):
return InputFeatures(input_ids=([0] * max_seq_length), input_mask=([0] * max_seq_length), P_mask=P_mask, A_mask=A_mask, B_mask=B_mask, segment_ids=([0] * max_seq_length), label_id=0, is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
P_offset = example.char_offsets[0]
A_offset = example.char_offsets[1]
B_offset = example.char_offsets[2]
char_off = sorted([[P_offset, 0], [A_offset, 1], [B_offset, 2]], key=(lambda x: x[0]))
text_segments = [example.text_a[:char_off[0][0]], example.text_a[char_off[0][0]:char_off[1][0]], example.text_a[char_off[1][0]:char_off[2][0]], example.text_a[char_off[2][0]:]]
if FLAGS.convert_data:
for (i, segment) in enumerate(text_segments):
seg = re.sub('(?<![a-zA-Z])he(?![a-zA-Z])', 'she', segment.lower())
seg = re.sub('(?<![a-zA-Z])his(?![a-zA-Z])', 'her', seg)
seg = re.sub('(?<![a-zA-Z])hers(?![a-zA-Z])', 'her', seg)
seg = re.sub('(?<![a-zA-Z])him(?![a-zA-Z])', 'her', seg)
seg = re.sub('(?<![a-zA-Z])himself(?![a-zA-Z])', 'herself', seg)
text_segments[i] = seg
token_segments = []
tokens_in_segment = []
for segment in text_segments:
token_segment = tokenizer.tokenize(segment)
token_segments.append(token_segment)
tokens_in_segment.append(len(token_segment))
while (np.sum(tokens_in_segment) > (max_seq_length - 2)):
index = np.argmax([(tokens_in_segment[0] * 2), tokens_in_segment[1], tokens_in_segment[2], (tokens_in_segment[3] * 2)])
if (index == 0):
token_segments[index] = token_segments[index][1:]
elif (index == 3):
token_segments[index] = token_segments[index][:(- 1)]
else:
middle = (tokens_in_segment[index] // 2)
token_segments[index] = (token_segments[index][:middle] + token_segments[index][(middle + 1):])
tokens_in_segment[index] -= 1
tokens = []
segment_ids = ([0] * max_seq_length)
tokens.append('[CLS]')
for segment in token_segments:
temp = ''
for token in segment:
tokens.append(token)
tokens.append('[SEP]')
offset = 1
for (i, row) in enumerate(char_off):
offset += tokens_in_segment[i]
row[0] = offset
token_off = sorted(char_off, key=(lambda x: x[1]))
P_mask[token_off[0][0]] = 1
A_mask[token_off[1][0]] = 1
B_mask[token_off[2][0]] = 1
assert (len(tokens) < (max_seq_length + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
while (len(input_ids) < max_seq_length):
input_ids.append(0)
input_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
label_id = label_map[example.label]
if (ex_index < 3):
tf.logging.info(('tokens: %s' % ' '.join([(((str(P_mask[i]) + str(A_mask[i])) + str(B_mask[i])) + tokenization.printable_text(tokens[i])) for i in range(len(tokens))])))
feature = InputFeatures(input_ids=input_ids, input_mask=input_mask, P_mask=P_mask, A_mask=A_mask, B_mask=B_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True)
return feature |
def compute_dice_at_nfpr(preds: np.ndarray, targets: np.ndarray, max_fpr: float=0.05) -> float:
(preds, targets) = (np.array(preds), np.array(targets))
(fpr, _, thresholds) = roc_curve(targets.reshape((- 1)), preds.reshape((- 1)))
t = thresholds[max(0, (fpr.searchsorted(max_fpr, 'right') - 1))]
return compute_dice(np.where((preds > t), 1, 0), targets) |
def save_samples(samples, output_dir='copy_task', prefix='train', ext='src', reverse=False):
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
with open(os.path.join(output_dir, ((prefix + '.') + ext)), mode='w', encoding='utf-8') as f:
for sample in samples:
sample = (sample[::(- 1)] if reverse else sample)
f.write((sample_to_str(sample) + '\n')) |
_SAMPLERS.register_module()
class ClassAwareSampler(Sampler):
def __init__(self, dataset: BaseDataset, seed: Optional[int]=None, num_sample_class: int=1) -> None:
(rank, world_size) = get_dist_info()
self.rank = rank
self.world_size = world_size
self.dataset = dataset
self.epoch = 0
if (seed is None):
seed = sync_random_seed()
self.seed = seed
assert ((num_sample_class > 0) and isinstance(num_sample_class, int))
self.num_sample_class = num_sample_class
self.cat_dict = self.get_cat2imgs()
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / world_size)))
self.total_size = (self.num_samples * self.world_size)
self.num_cat_imgs = [len(x) for x in self.cat_dict.values()]
self.valid_cat_inds = [i for (i, length) in enumerate(self.num_cat_imgs) if (length != 0)]
self.num_classes = len(self.valid_cat_inds)
def get_cat2imgs(self) -> Dict[(int, list)]:
classes = self.dataset.metainfo.get('classes', None)
if (classes is None):
raise ValueError('dataset metainfo must contain `classes`')
cat2imgs = {i: [] for i in range(len(classes))}
for i in range(len(self.dataset)):
cat_ids = set(self.dataset.get_cat_ids(i))
for cat in cat_ids:
cat2imgs[cat].append(i)
return cat2imgs
def __iter__(self) -> Iterator[int]:
g = torch.Generator()
g.manual_seed((self.epoch + self.seed))
label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g)
data_iter_dict = dict()
for i in self.valid_cat_inds:
data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g)
def gen_cat_img_inds(cls_list, data_dict, num_sample_cls):
id_indices = []
for _ in range(len(cls_list)):
cls_idx = next(cls_list)
for _ in range(num_sample_cls):
id = next(data_dict[cls_idx])
id_indices.append(id)
return id_indices
num_bins = int(math.ceil((((self.total_size * 1.0) / self.num_classes) / self.num_sample_class)))
indices = []
for i in range(num_bins):
indices += gen_cat_img_inds(label_iter_list, data_iter_dict, self.num_sample_class)
if (len(indices) >= self.total_size):
indices = indices[:self.total_size]
else:
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
self.epoch = epoch |
class VGG_vanilla(nn.Module):
def __init__(self):
super(VGG_vanilla, self).__init__()
self.vgg19_f = vgg19_features(pretrained=True, include_classifier=True, final_maxpool=True, final_relu=True)
self.addons = nn.Linear(((512 * 7) * 7), 200)
def forward(self, x):
x = self.vgg19_f(x)
(_, C, H, W) = x.shape
x = x.view(x.size(0), (- 1))
x = self.addons(x)
return x |
class ResNet(nn.Module):
def __init__(self, block, layers, att_position, att_dim, GSoP_mode, num_classes=1000):
self.inplanes = 64
self.GSoP_mode = GSoP_mode
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], att_position=att_position[0], att_dim=att_dim)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, att_position=att_position[1], att_dim=att_dim)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, att_position=att_position[2], att_dim=att_dim)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, att_position=att_position[3], att_dim=att_dim)
if (GSoP_mode == 1):
self.avgpool = nn.AvgPool2d(14, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
print('GSoP-Net1 generating...')
else:
self.isqrt_dim = 256
self.layer_reduce = nn.Conv2d((512 * block.expansion), self.isqrt_dim, kernel_size=1, stride=1, padding=0, bias=False)
self.layer_reduce_bn = nn.BatchNorm2d(self.isqrt_dim)
self.layer_reduce_relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(int(((self.isqrt_dim * (self.isqrt_dim + 1)) / 2)), num_classes)
print('GSoP-Net2 generating...')
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, att_position=[1], att_dim=128):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, att_position[0], att_dim=att_dim))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, attention=att_position[i], att_dim=att_dim))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if (self.GSoP_mode == 1):
x = self.avgpool(x)
else:
x = self.layer_reduce(x)
x = self.layer_reduce_bn(x)
x = self.layer_reduce_relu(x)
x = MPNCOV.CovpoolLayer(x)
x = MPNCOV.SqrtmLayer(x, 3)
x = MPNCOV.TriuvecLayer(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def SAL(num_blocks, **kwargs):
(set_res, addf0, init_random, constraint) = common_config(kwargs)
(input_dependent, input_dim, input_dependent_config) = set_input_dependent_config(kwargs)
block_array = []
for nb in range(num_blocks):
if init_random:
(a_aff, b_aff) = numpy.random.randn(2)
(a_sal, b_sal) = numpy.random.randn(2)
else:
(a_aff, b_aff) = (1.0, 0.0)
(a_sal, b_sal) = (0.0, 1.0)
init_affine = {'init_a': a_aff, 'init_b': b_aff, 'set_restrictions': set_res}
init_sinh_arcsinh = {'init_a': a_sal, 'init_b': b_sal, 'add_init_f0': addf0, 'set_restrictions': set_res, 'input_dependent': input_dependent, 'input_dim': input_dim, 'input_dependent_config': input_dependent_config}
block = [('sinh_arcsinh', init_sinh_arcsinh), ('affine', init_affine)]
block_array.extend(block)
return block_array |
class MeshgridTest(tf.test.TestCase):
def test_meshgrid_numpy_comparison(self):
x = np.arange(4)
y = np.arange(6)
(exp_xgrid, exp_ygrid) = np.meshgrid(x, y)
(xgrid, ygrid) = ops.meshgrid(x, y)
with self.test_session() as sess:
(xgrid_output, ygrid_output) = sess.run([xgrid, ygrid])
self.assertAllEqual(xgrid_output, exp_xgrid)
self.assertAllEqual(ygrid_output, exp_ygrid)
def test_meshgrid_multidimensional(self):
np.random.seed(18)
x = np.random.rand(4, 1, 2).astype(np.float32)
y = np.random.rand(2, 3).astype(np.float32)
(xgrid, ygrid) = ops.meshgrid(x, y)
grid_shape = (list(y.shape) + list(x.shape))
self.assertEqual(xgrid.get_shape().as_list(), grid_shape)
self.assertEqual(ygrid.get_shape().as_list(), grid_shape)
with self.test_session() as sess:
(xgrid_output, ygrid_output) = sess.run([xgrid, ygrid])
self.assertEqual(xgrid_output.shape, tuple(grid_shape))
self.assertEqual(ygrid_output.shape, tuple(grid_shape))
test_elements = [((3, 0, 0), (1, 2)), ((2, 0, 1), (0, 0)), ((0, 0, 0), (1, 1))]
for (xind, yind) in test_elements:
self.assertEqual(xgrid_output[(yind + xind)], x[xind])
self.assertEqual(ygrid_output[(yind + xind)], y[yind]) |
class DistributionalDuelingHeadModel(torch.nn.Module):
def __init__(self, input_size, hidden_sizes, output_size, n_atoms, grad_scale=(2 ** ((- 1) / 2))):
super().__init__()
if isinstance(hidden_sizes, int):
hidden_sizes = [hidden_sizes]
self.advantage_hidden = MlpModel(input_size, hidden_sizes)
self.advantage_out = torch.nn.Linear(hidden_sizes[(- 1)], (output_size * n_atoms), bias=False)
self.advantage_bias = torch.nn.Parameter(torch.zeros(n_atoms))
self.value = MlpModel(input_size, hidden_sizes, output_size=n_atoms)
self._grad_scale = grad_scale
self._output_size = output_size
self._n_atoms = n_atoms
def forward(self, input):
x = scale_grad(input, self._grad_scale)
advantage = self.advantage(x)
value = self.value(x).view((- 1), 1, self._n_atoms)
return (value + (advantage - advantage.mean(dim=1, keepdim=True)))
def advantage(self, input):
x = self.advantage_hidden(input)
x = self.advantage_out(x)
x = x.view((- 1), self._output_size, self._n_atoms)
return (x + self.advantage_bias) |
class BaseFunction():
def __init__(self):
super().__init__()
def forward(self, batch):
pass
def loss(self, batch, loss_function):
pass
def evaluate(self, batch, metrics):
pass
def predict(self, batch):
pass |
def test_no_preprocessing_steps_does_not_change_data(mock_data):
(brain_data, behavior_data, _) = mock_data
views = [brain_data, behavior_data]
preprocessing_steps = [None, None]
mvp = MultiViewPreprocessing(preprocessing_steps)
mvp.fit(views)
transformed_views = mvp.transform(views)
assert all([np.array_equal(original, transformed) for (original, transformed) in zip(views, transformed_views)]) |
class MocoLoss(nn.Module):
def __init__(self):
super(MocoLoss, self).__init__()
print('Loading MOCO model from path: {}'.format(model_paths['moco']))
self.model = self.__load_model()
self.model.cuda()
self.model.eval()
def __load_model():
import torchvision.models as models
model = models.__dict__['resnet50']()
for (name, param) in model.named_parameters():
if (name not in ['fc.weight', 'fc.bias']):
param.requires_grad = False
checkpoint = torch.load(model_paths['moco'], map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=False)
assert (set(msg.missing_keys) == {'fc.weight', 'fc.bias'})
model = nn.Sequential(*list(model.children())[:(- 1)]).cuda()
return model
def extract_feats(self, x):
x = F.interpolate(x, size=224)
x_feats = self.model(x)
x_feats = nn.functional.normalize(x_feats, dim=1)
x_feats = x_feats.squeeze()
return x_feats
def forward(self, y_hat, y, x):
n_samples = x.shape[0]
x_feats = self.extract_feats(x)
y_feats = self.extract_feats(y)
y_hat_feats = self.extract_feats(y_hat)
y_feats = y_feats.detach()
loss = 0
sim_improvement = 0
sim_logs = []
count = 0
for i in range(n_samples):
diff_target = y_hat_feats[i].dot(y_feats[i])
diff_input = y_hat_feats[i].dot(x_feats[i])
diff_views = y_feats[i].dot(x_feats[i])
sim_logs.append({'diff_target': float(diff_target), 'diff_input': float(diff_input), 'diff_views': float(diff_views)})
loss += (1 - diff_target)
sim_diff = (float(diff_target) - float(diff_views))
sim_improvement += sim_diff
count += 1
return ((loss / count), (sim_improvement / count), sim_logs) |
class AutoMatching(nn.Module):
def __init__(self, num_layers, filter_multiplier=8, block_multiplier=2, step=3, cell=cell_level_search.Cell):
super(AutoMatching, self).__init__()
self.cells = nn.ModuleList()
self._num_layers = num_layers
self._step = step
self._block_multiplier = block_multiplier
self._filter_multiplier = filter_multiplier
self._initialize_alphas_betas()
f_initial = int(self._filter_multiplier)
self._num_end = (f_initial * self._block_multiplier)
print('Matching Net block_multiplier:{0}'.format(block_multiplier))
print('Matching Net filter_multiplier:{0}'.format(filter_multiplier))
print('Matching Net f_initial:{0}'.format(f_initial))
self.stem0 = ConvBR((self._num_end * 2), self._num_end, 3, stride=1, padding=1)
for i in range(self._num_layers):
if (i == 0):
cell1 = cell(self._step, self._block_multiplier, (- 1), None, f_initial, None, self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (- 1), f_initial, None, None, (self._filter_multiplier * 2))
self.cells += [cell1]
self.cells += [cell2]
elif (i == 1):
cell1 = cell(self._step, self._block_multiplier, f_initial, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (- 1), self._filter_multiplier, (self._filter_multiplier * 2), None, (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 2), None, None, (self._filter_multiplier * 4))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
elif (i == 2):
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 2), (self._filter_multiplier * 4), None, (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 4), None, None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
elif (i == 3):
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 4), (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 8), (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (- 1), (self._filter_multiplier * 4), (self._filter_multiplier * 8), None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
else:
cell1 = cell(self._step, self._block_multiplier, self._filter_multiplier, None, self._filter_multiplier, (self._filter_multiplier * 2), self._filter_multiplier)
cell2 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 2), self._filter_multiplier, (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 2))
cell3 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 4), (self._filter_multiplier * 2), (self._filter_multiplier * 4), (self._filter_multiplier * 8), (self._filter_multiplier * 4))
cell4 = cell(self._step, self._block_multiplier, (self._filter_multiplier * 8), (self._filter_multiplier * 4), (self._filter_multiplier * 8), None, (self._filter_multiplier * 8))
self.cells += [cell1]
self.cells += [cell2]
self.cells += [cell3]
self.cells += [cell4]
self.last_3 = ConvBR(self._num_end, 1, 3, 1, 1, bn=False, relu=False)
self.last_6 = ConvBR((self._num_end * 2), self._num_end, 1, 1, 0)
self.last_12 = ConvBR((self._num_end * 4), (self._num_end * 2), 1, 1, 0)
self.last_24 = ConvBR((self._num_end * 8), (self._num_end * 4), 1, 1, 0)
def forward(self, x):
self.level_3 = []
self.level_6 = []
self.level_12 = []
self.level_24 = []
stem = self.stem0(x)
self.level_3.append(stem)
count = 0
normalized_betas = torch.randn(self._num_layers, 4, 3).cuda()
if (torch.cuda.device_count() > 1):
img_device = torch.device('cuda', x.get_device())
normalized_alphas = F.softmax(self.alphas.to(device=img_device), dim=(- 1))
for layer in range(len(self.betas)):
if (layer == 0):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
elif (layer == 1):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
elif (layer == 2):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2].to(device=img_device), dim=(- 1))
else:
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:].to(device=img_device), dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1].to(device=img_device), dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2].to(device=img_device), dim=(- 1))
normalized_betas[layer][3][:2] = (F.softmax(self.betas[layer][3][:1].to(device=img_device), dim=(- 1)) * (2 / 3))
else:
normalized_alphas = F.softmax(self.alphas, dim=(- 1))
for layer in range(len(self.betas)):
if (layer == 0):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
elif (layer == 1):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
elif (layer == 2):
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2], dim=(- 1))
else:
normalized_betas[layer][0][1:] = (F.softmax(self.betas[layer][0][1:], dim=(- 1)) * (2 / 3))
normalized_betas[layer][1] = F.softmax(self.betas[layer][1], dim=(- 1))
normalized_betas[layer][2] = F.softmax(self.betas[layer][2], dim=(- 1))
normalized_betas[layer][3][:2] = (F.softmax(self.betas[layer][3][:2], dim=(- 1)) * (2 / 3))
for layer in range(self._num_layers):
if (layer == 0):
(level3_new,) = self.cells[count](None, None, self.level_3[(- 1)], None, normalized_alphas)
count += 1
(level6_new,) = self.cells[count](None, self.level_3[(- 1)], None, None, normalized_alphas)
count += 1
level3_new = (normalized_betas[layer][0][1] * level3_new)
level6_new = (normalized_betas[layer][0][2] * level6_new)
self.level_3.append(level3_new)
self.level_6.append(level6_new)
elif (layer == 1):
(level3_new_1, level3_new_2) = self.cells[count](self.level_3[(- 2)], None, self.level_3[(- 1)], self.level_6[(- 1)], normalized_alphas)
count += 1
level3_new = ((normalized_betas[layer][0][1] * level3_new_1) + (normalized_betas[layer][1][0] * level3_new_2))
(level6_new_1, level6_new_2) = self.cells[count](None, self.level_3[(- 1)], self.level_6[(- 1)], None, normalized_alphas)
count += 1
level6_new = ((normalized_betas[layer][0][2] * level6_new_1) + (normalized_betas[layer][1][2] * level6_new_2))
(level12_new,) = self.cells[count](None, self.level_6[(- 1)], None, None, normalized_alphas)
level12_new = (normalized_betas[layer][1][2] * level12_new)
count += 1
self.level_3.append(level3_new)
self.level_6.append(level6_new)
self.level_12.append(level12_new)
elif (layer == 2):
(level3_new_1, level3_new_2) = self.cells[count](self.level_3[(- 2)], None, self.level_3[(- 1)], self.level_6[(- 1)], normalized_alphas)
count += 1
level3_new = ((normalized_betas[layer][0][1] * level3_new_1) + (normalized_betas[layer][1][0] * level3_new_2))
(level6_new_1, level6_new_2, level6_new_3) = self.cells[count](self.level_6[(- 2)], self.level_3[(- 1)], self.level_6[(- 1)], self.level_12[(- 1)], normalized_alphas)
count += 1
level6_new = (((normalized_betas[layer][0][2] * level6_new_1) + (normalized_betas[layer][1][1] * level6_new_2)) + (normalized_betas[layer][2][0] * level6_new_3))
(level12_new_1, level12_new_2) = self.cells[count](None, self.level_6[(- 1)], self.level_12[(- 1)], None, normalized_alphas)
count += 1
level12_new = ((normalized_betas[layer][1][2] * level12_new_1) + (normalized_betas[layer][2][1] * level12_new_2))
(level24_new,) = self.cells[count](None, self.level_12[(- 1)], None, None, normalized_alphas)
level24_new = (normalized_betas[layer][2][2] * level24_new)
count += 1
self.level_3.append(level3_new)
self.level_6.append(level6_new)
self.level_12.append(level12_new)
self.level_24.append(level24_new)
elif (layer == 3):
(level3_new_1, level3_new_2) = self.cells[count](self.level_3[(- 2)], None, self.level_3[(- 1)], self.level_6[(- 1)], normalized_alphas)
count += 1
level3_new = ((normalized_betas[layer][0][1] * level3_new_1) + (normalized_betas[layer][1][0] * level3_new_2))
(level6_new_1, level6_new_2, level6_new_3) = self.cells[count](self.level_6[(- 2)], self.level_3[(- 1)], self.level_6[(- 1)], self.level_12[(- 1)], normalized_alphas)
count += 1
level6_new = (((normalized_betas[layer][0][2] * level6_new_1) + (normalized_betas[layer][1][1] * level6_new_2)) + (normalized_betas[layer][2][0] * level6_new_3))
(level12_new_1, level12_new_2, level12_new_3) = self.cells[count](self.level_12[(- 2)], self.level_6[(- 1)], self.level_12[(- 1)], self.level_24[(- 1)], normalized_alphas)
count += 1
level12_new = (((normalized_betas[layer][1][2] * level12_new_1) + (normalized_betas[layer][2][1] * level12_new_2)) + (normalized_betas[layer][3][0] * level12_new_3))
(level24_new_1, level24_new_2) = self.cells[count](None, self.level_12[(- 1)], self.level_24[(- 1)], None, normalized_alphas)
count += 1
level24_new = ((normalized_betas[layer][2][2] * level24_new_1) + (normalized_betas[layer][3][1] * level24_new_2))
self.level_3.append(level3_new)
self.level_6.append(level6_new)
self.level_12.append(level12_new)
self.level_24.append(level24_new)
else:
(level3_new_1, level3_new_2) = self.cells[count](self.level_3[(- 2)], None, self.level_3[(- 1)], self.level_6[(- 1)], normalized_alphas)
count += 1
level3_new = ((normalized_betas[layer][0][1] * level3_new_1) + (normalized_betas[layer][1][0] * level3_new_2))
(level6_new_1, level6_new_2, level6_new_3) = self.cells[count](self.level_6[(- 2)], self.level_3[(- 1)], self.level_6[(- 1)], self.level_12[(- 1)], normalized_alphas)
count += 1
level6_new = (((normalized_betas[layer][0][2] * level6_new_1) + (normalized_betas[layer][1][1] * level6_new_2)) + (normalized_betas[layer][2][0] * level6_new_3))
(level12_new_1, level12_new_2, level12_new_3) = self.cells[count](self.level_12[(- 2)], self.level_6[(- 1)], self.level_12[(- 1)], self.level_24[(- 1)], normalized_alphas)
count += 1
level12_new = (((normalized_betas[layer][1][2] * level12_new_1) + (normalized_betas[layer][2][1] * level12_new_2)) + (normalized_betas[layer][3][0] * level12_new_3))
(level24_new_1, level24_new_2) = self.cells[count](self.level_24[(- 2)], self.level_12[(- 1)], self.level_24[(- 1)], None, normalized_alphas)
count += 1
level24_new = ((normalized_betas[layer][2][2] * level24_new_1) + (normalized_betas[layer][3][1] * level24_new_2))
self.level_3.append(level3_new)
self.level_6.append(level6_new)
self.level_12.append(level12_new)
self.level_24.append(level24_new)
self.level_3 = self.level_3[(- 2):]
self.level_6 = self.level_6[(- 2):]
self.level_12 = self.level_12[(- 2):]
self.level_24 = self.level_24[(- 2):]
(d, h, w) = (stem.size()[2], stem.size()[3], stem.size()[4])
upsample_6 = nn.Upsample(size=stem.size()[2:], mode='trilinear', align_corners=True)
upsample_12 = nn.Upsample(size=[(d // 2), (h // 2), (w // 2)], mode='trilinear', align_corners=True)
upsample_24 = nn.Upsample(size=[(d // 4), (h // 4), (w // 4)], mode='trilinear', align_corners=True)
result_3 = self.last_3(self.level_3[(- 1)])
result_6 = self.last_3(upsample_6(self.last_6(self.level_6[(- 1)])))
result_12 = self.last_3(upsample_6(self.last_6(upsample_12(self.last_12(self.level_12[(- 1)])))))
result_24 = self.last_3(upsample_6(self.last_6(upsample_12(self.last_12(self.last_24(self.level_24[(- 1)]))))))
sum_matching_map = (((result_3 + result_6) + result_12) + result_24)
return sum_matching_map
def _initialize_alphas_betas(self):
k = sum((1 for i in range(self._step) for n in range((2 + i))))
num_ops = len(PRIMITIVES)
alphas = (0.001 * torch.randn(k, num_ops)).clone().detach().requires_grad_(True)
betas = (0.001 * torch.randn(self._num_layers, 4, 3)).clone().detach().requires_grad_(True)
self._arch_parameters = [alphas, betas]
self._arch_param_names = ['alphas', 'betas']
[self.register_parameter(name, torch.nn.Parameter(param)) for (name, param) in zip(self._arch_param_names, self._arch_parameters)]
def arch_parameters(self):
return [param for (name, param) in self.named_parameters() if (name in self._arch_param_names)]
def weight_parameters(self):
return [param for (name, param) in self.named_parameters() if (name not in self._arch_param_names)]
def genotype(self):
decoder = Decoder(self.alphas_cell, self._block_multiplier, self._step)
return decoder.genotype_decode() |
def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index):
bin_labels = labels.new_zeros(target_shape)
valid_mask = ((labels >= 0) & (labels != ignore_index))
inds = torch.nonzero(valid_mask, as_tuple=True)
if (inds[0].numel() > 0):
if (labels.dim() == 3):
bin_labels[(inds[0], labels[valid_mask], inds[1], inds[2])] = 1
else:
bin_labels[(inds[0], labels[valid_mask])] = 1
valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float()
if (label_weights is None):
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.unsqueeze(1).expand(target_shape)
bin_label_weights *= valid_mask
return (bin_labels, bin_label_weights) |
def func(inp, net=None, target=None):
out = net(inp)
loss = torch.nn.functional.nll_loss(out, target=torch.LongTensor([target]))
print(f'Loss: {loss.item()}')
return loss |
(scope='module')
def regression_data():
data = synthetic_regression()
return (data['full']['X'], data['full']['y']) |
class FlaxElectraForCausalLM(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
def plot_collision(collision_report: CollisionReport, sim_log: SimLog):
fig = plt.gca().get_figure()
log_entries: Mapping[(PlayerName, LogEntry)] = sim_log.at_interp(collision_report.at_time)
imp_point = collision_report.impact_point.coords[0]
n = (0.2 * collision_report.impact_normal)
plt.plot(*imp_point, 'o', zorder=_Zorders.IMPACT_POINT)
n_color = 'r'
plt.arrow(imp_point[0], imp_point[1], n[0], n[1], ec=n_color, fc=n_color, alpha=0.9, zorder=_Zorders.IMPACT_NORMAL)
name = 'Dark2'
cmap: ListedColormap = colormaps[name]
colors = list(cmap.colors)
(col_before, col_after) = ('darkorange', 'seagreen')
for (i, (player, p_report)) in enumerate(collision_report.players.items()):
p_color = colors[i]
footprint = p_report.footprint
plt.plot(*footprint.exterior.xy, color=p_color)
try:
(xc, yc) = (log_entries[player].state.x, log_entries[player].state.y)
except KeyError:
(xc, yc) = footprint.centroid.coords[0]
plt.text(xc, yc, f'{player}', horizontalalignment='center', verticalalignment='center', zorder=_Zorders.PLAYER_NAME)
vel_scale = 0.3
vel = (vel_scale * p_report.velocity[0])
vel_after = (vel_scale * p_report.velocity_after[0])
arr_width = 0.01
head_width = (arr_width * 5)
plt.arrow(xc, yc, vel[0], vel[1], width=arr_width, head_width=head_width, ec=col_before, fc=col_before, alpha=0.8, zorder=_Zorders.VEL_BEFORE)
plt.arrow(xc, yc, vel_after[0], vel_after[1], width=arr_width, head_width=head_width, ec=col_after, fc=col_after, alpha=0.8, zorder=_Zorders.VEL_AFTER)
arrow_shift = 0.1
arrow_patch = FancyArrowPatch(((xc - arrow_shift), yc), ((xc + arrow_shift), yc), connectionstyle=f'arc3,rad={arrow_shift}', color='k', zorder=_Zorders.DEBUG)
fig.patches.extend([arrow_patch])
plt.text(xc, (yc + (4 * arrow_shift)), f'{p_report.velocity[1]:.3f}', horizontalalignment='center', verticalalignment='center', zorder=_Zorders.VEL_BEFORE, color=col_before)
plt.text(xc, (yc + (2 * arrow_shift)), f'{p_report.velocity_after[1]:.3f}', horizontalalignment='center', verticalalignment='center', zorder=_Zorders.VEL_AFTER, color=col_after)
ap = (np.array(imp_point) - np.array([xc, yc]))
omega = p_report.velocity[1]
vel_atP = (vel_scale * velocity_of_P_given_A(((1 / vel_scale) * vel), omega, ap))
plt.arrow(imp_point[0], imp_point[1], vel_atP[0], vel_atP[1], width=arr_width, head_width=head_width, ec=p_color, fc=p_color, alpha=0.8, zorder=_Zorders.DEBUG)
for loc in p_report.locations:
(loc_str, loc_shape) = loc
plt.fill(*loc_shape.exterior.xy, fc='cyan', ec='darkblue', alpha=0.4, zorder=_Zorders.IMPACT_LOCATION)
(xc, yc) = loc_shape.centroid.coords[0]
plt.text(xc, yc, f'{loc_str}', horizontalalignment='center', verticalalignment='center', zorder=_Zorders.IMPACT_LOCATION_NAME)
before_patch = mpatches.Patch(color=col_before, label='before')
after_patch = mpatches.Patch(color=col_after, label='after')
plt.legend(handles=[before_patch, after_patch])
fig.set_layout_engine('tight')
plt.axis('equal')
plt.draw()
return |
def main(args, debug=True):
app = build_app(args.dataset, args.pred_dir, args.video_dir, args.frame_dir, args.flow_dir, args.nms)
app_kwargs = {'debug': debug, 'port': args.port}
if args.public:
app_kwargs['host'] = '0.0.0.0'
app.run(**app_kwargs) |
def format_step(step):
if isinstance(step, str):
return step
s = ''
if (len(step) > 0):
s += 'Training Epoch: {} '.format(step[0])
if (len(step) > 1):
s += 'Training Iteration: {} '.format(step[1])
if (len(step) > 2):
s += 'Validation Iteration: {} '.format(step[2])
return s |
_module()
class QualityFocalLoss(nn.Module):
def __init__(self, use_sigmoid=True, beta=2.0, reduction='mean', loss_weight=1.0, activated=False):
super(QualityFocalLoss, self).__init__()
assert (use_sigmoid is True), 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
assert (reduction_override in (None, 'none', 'mean', 'sum'))
reduction = (reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = quality_focal_loss_with_prob
else:
calculate_loss_func = quality_focal_loss
loss_cls = (self.loss_weight * calculate_loss_func(pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor))
else:
raise NotImplementedError
return loss_cls |
class Pose1DTemporalEncoder(nn.Module):
def __init__(self, input_channels, output_channels):
super(Pose1DTemporalEncoder, self).__init__()
self._input_channels = input_channels
self._output_channels = output_channels
self.init_model()
def init_model(self):
self._model = nn.Sequential(nn.Conv1d(in_channels=self._input_channels, out_channels=32, kernel_size=3, padding=1), nn.BatchNorm1d(32), nn.ReLU(True), nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3, padding=1), nn.BatchNorm1d(32), nn.ReLU(True), nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3, padding=1), nn.BatchNorm1d(64), nn.ReLU(True), nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3, padding=1), nn.BatchNorm1d(64), nn.ReLU(True), nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, padding=1), nn.BatchNorm1d(128), nn.ReLU(True), nn.Conv1d(in_channels=128, out_channels=128, kernel_size=3, padding=1), nn.BatchNorm1d(128), nn.ReLU(True), nn.Conv1d(in_channels=128, out_channels=self._output_channels, kernel_size=3, padding=1), nn.BatchNorm1d(self._output_channels), nn.ReLU(True), nn.Conv1d(in_channels=self._output_channels, out_channels=self._output_channels, kernel_size=3, padding=1))
def forward(self, x):
x = torch.transpose(x, 1, 2)
x = self._model(x)
x = torch.transpose(x, 1, 2)
return x |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune 3D CNN from TCG pretrained weights')
parser.add_argument('--cl', type=int, default=16, help='clip length')
parser.add_argument('--model', type=str, default='r3d', help='c3d/r3d/r21d')
parser.add_argument('--dataset', type=str, default='sthv2', help='ucf101/hmdb51/K400/sthv2')
parser.add_argument('--gpu', type=int, default=0, help='GPU id')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--ft_lr', type=float, default=0.001, help='finetune learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--log', type=str, default='log', help='log directory')
parser.add_argument('--ckpt', type=str, default='log/Frequency_3090/K400_TCG_r3d_cl16_it8_tl3_/best_acc_model_216.pt', help='checkpoint path')
parser.add_argument('--desp', type=str, help='additional description')
parser.add_argument('--epochs', type=int, default=200, help='number of total epochs to run')
parser.add_argument('--start-epoch', type=int, default=1, help='manual epoch number (useful on restarts)')
parser.add_argument('--bs', type=int, default=32, help='mini-batch size')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--pf', type=int, default=10, help='print frequency every batch')
parser.add_argument('--seed', type=int, default=632, help='seed for initializing training.')
args = parser.parse_args()
return args |
class VATGenerator(object):
def __init__(self, net: nn.Module, xi=1e-06, eplision=10, ip=1) -> None:
super(VATGenerator, self).__init__()
self.xi = xi
self.eps = eplision
self.ip = ip
self.net = net
def _l2_normalize(d: Tensor) -> Tensor:
d_reshaped = d.view(d.shape[0], (- 1), *(1 for _ in range((d.dim() - 2))))
d /= torch.norm(d_reshaped, dim=1, keepdim=True)
assert torch.allclose(d.view(d.shape[0], (- 1)).norm(dim=1), torch.ones(d.shape[0]).to(d.device), rtol=0.001)
return d
def kl_div_with_logit(q_logit: Tensor, p_logit: Tensor):
assert (not q_logit.requires_grad), f'q_logit should be no differentiable, like y.'
assert p_logit.requires_grad
q = F.softmax(q_logit, dim=1)
logq = F.log_softmax(q_logit, dim=1)
logp = F.log_softmax(p_logit, dim=1)
qlogq = (q * logq).sum(dim=1)
qlogp = (q * logp).sum(dim=1)
return (qlogq - qlogp)
def __call__(self, img: Tensor, loss_name='kl') -> Tuple[(Tensor, Tensor)]:
with torch.no_grad():
pred = self.net(img)
d = torch.Tensor(img.size()).normal_()
d = self._l2_normalize(d).to(img.device)
self.net.zero_grad()
with _disable_tracking_bn_stats(self.net):
for _ in range(self.ip):
d = (self.xi * self._l2_normalize(d))
d.requires_grad = True
y_hat = self.net((img + d))
delta_kl = self.kl_div_with_logit(pred.detach(), y_hat)
delta_kl.mean().backward()
d = d.grad.data.clone()
self.net.zero_grad()
d = self._l2_normalize(d)
r_adv = ((0.25 * self.eps.view((- 1), 1)) * d)
img_adv = (img + r_adv.detach())
return (img_adv.detach(), r_adv.detach()) |
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(['--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir])
preprocess.main(preprocess_args) |
def combine_json(dir_list, output_path, shuffle=False):
data = []
for file_path in dir_list:
with open(file_path, 'r') as infile:
cur_data = json.load(infile)
print(file_path, len(cur_data), 'samples')
data = (data + cur_data)
if (shuffle == True):
print('shuffle the data')
random.shuffle(data)
with open(output_path, 'w') as outfile:
json.dump(data, outfile, indent=1)
print('Successfully concatenated {:d} JSON files into {:s} with {:d} qa pairs.'.format(len(dir_list), output_path, len(data))) |
def check_depths():
from nets.profile_func import profile_slimmable_models
print(f'profile model GFLOPs (forward complexity) and size (#param)')
for resnet in [resnet18, resnet34, resnet50]:
model = resnet(track_running_stats=False, bn_type='bn')
model.eval()
print(f'''
model {resnet.__name__} on {('training' if model.training else 'eval')} mode''')
profile_slimmable_models(model, model.slimmable_ratios) |
def model_slim_mha(model, dataloader=None):
from .pattern_analyzer import SelfMHASearcher
from .weight_slim import MHACompression
logger.warning('You are using model slim methods, some attention heads will be removed permanently.')
pa_obj = SelfMHASearcher(model, dataloader)
(layers, _) = pa_obj.search(split_qkv_ffn=False)
layers = pa_obj.obtain_mha_module(layers)
layers = pa_obj.from_layer_name_to_object(layers)
for layer in layers:
mha_compression = MHACompression(layer)
mha_compression()
return model |
class DriftingFiniteArmedBernoulliBandit(FiniteArmedBernoulliBandit):
def __init__(self, n_arm, a0=1.0, b0=1.0, gamma=0.01):
self.n_arm = n_arm
self.a0 = a0
self.b0 = b0
self.prior_success = np.array([a0 for a in range(n_arm)])
self.prior_failure = np.array([b0 for a in range(n_arm)])
self.gamma = gamma
self.probs = np.array([np.random.beta(a0, b0) for a in range(n_arm)])
def set_prior(self, prior_success, prior_failure):
self.prior_success = np.array(prior_success)
self.prior_failure = np.array(prior_failure)
def get_optimal_reward(self):
return np.max(self.probs)
def advance(self, action, reward):
self.prior_success = ((self.prior_success * (1 - self.gamma)) + (self.a0 * self.gamma))
self.prior_failure = ((self.prior_failure * (1 - self.gamma)) + (self.b0 * self.gamma))
self.prior_success[action] += reward
self.prior_failure[action] += (1 - reward)
self.probs = np.array([np.random.beta(self.prior_success[a], self.prior_failure[a]) for a in range(self.n_arm)]) |
def build_save_dataset(corpus_type, fields, opt):
assert (corpus_type in ['train', 'valid'])
if (corpus_type == 'train'):
corpus = opt.train_dir
else:
corpus = opt.valid_dir
dataset = inputters.build_dataset(fields, data_path=corpus, data_type=opt.data_type, seq_length=opt.seq_length, seq_length_trunc=opt.seq_length_trunc, dynamic_dict=opt.dynamic_dict)
dataset.fields = []
pt_file = '{:s}.{:s}.pt'.format(opt.save_data, corpus_type)
logger.info((' * saving %s dataset to %s.' % (corpus_type, pt_file)))
torch.save(dataset, pt_file)
return pt_file |
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained, progress, **kwargs) |
def get_model_files(model_type: str, frameworks: Optional[List[str]]=None) -> Dict[(str, Union[(Path, List[Path])])]:
module_name = model_type_to_module_name(model_type)
model_module = ((TRANSFORMERS_PATH / 'models') / module_name)
model_files = list(model_module.glob('*.py'))
model_files = filter_framework_files(model_files, frameworks=frameworks)
doc_file = (((((REPO_PATH / 'docs') / 'source') / 'en') / 'model_doc') / f'{model_type}.mdx')
test_files = [f'test_modeling_{module_name}.py', f'test_modeling_tf_{module_name}.py', f'test_modeling_flax_{module_name}.py', f'test_tokenization_{module_name}.py', f'test_image_processing_{module_name}.py', f'test_feature_extraction_{module_name}.py', f'test_processor_{module_name}.py']
test_files = filter_framework_files(test_files, frameworks=frameworks)
test_files = [((((REPO_PATH / 'tests') / 'models') / module_name) / f) for f in test_files]
test_files = [f for f in test_files if f.exists()]
return {'doc_file': doc_file, 'model_files': model_files, 'module_name': module_name, 'test_files': test_files} |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar')
parser.add_argument('--start', type=int, default=0)
parser.add_argument('--end', type=int, default=100)
parser.add_argument('--n_iter', type=int, default=1000)
parser.add_argument('--transfer', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--sweep', action='store_true')
parser.add_argument('--wandb', action='store_true', default=False, help='Use wandb for logging')
parser.add_argument('--ensemble_adv_trained', action='store_true')
parser.add_argument('--batch_size', type=int, default=256, metavar='S')
parser.add_argument('--test_batch_size', type=int, default=32, metavar='S')
parser.add_argument('--train_set', default='test', choices=['train_and_test', 'test', 'train'], help='add the test set in the training set')
parser.add_argument('--modelIn', type=str, default='../pretrained_classifiers/cifar/res18/model_0.pt')
parser.add_argument('--robust_model_path', type=str, default='../madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt')
parser.add_argument('--dir_test_models', type=str, default='../', help='The path to the directory containing the classifier models for evaluation.')
parser.add_argument('--max_test_model', type=int, default=2, help='The maximum number of pretrained classifiers to use for testing.')
parser.add_argument('--train_on_madry', default=False, action='store_true', help='Train using Madry tf grad')
parser.add_argument('--train_on_list', default=False, action='store_true', help='train on a list of classifiers')
parser.add_argument('--attack_ball', type=str, default='Linf', choices=['L2', 'Linf'])
parser.add_argument('--source_arch', default='res18', help='The architecture we want to attack on CIFAR.')
parser.add_argument('--target_arch', default=None, help='The architecture we want to blackbox transfer to on CIFAR.')
parser.add_argument('--momentum', type=float, default=0.0, metavar='M', help='Randomly apply input Transformation')
parser.add_argument('--transform_prob', type=float, default=0.5, metavar='M', help='Randomly apply input Transformation')
parser.add_argument('--resize_factor', type=float, default=1.1, metavar='M', help='Resize Factor for Random Resizing')
parser.add_argument('--epsilon', type=float, default=0.1, metavar='M', help='Epsilon for Delta (default: 0.1)')
parser.add_argument('--train_with_critic_path', type=str, default=None, help='Train generator with saved critic model')
parser.add_argument('--model', help='path to model')
parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)')
parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)')
parser.add_argument('--namestr', type=str, default='NoBox', help='additional info in output filename to describe experiments')
args = parser.parse_args()
args.dev = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
(train_loader, test_loader) = create_loaders(args, root='../data')
if os.path.isfile('../settings.json'):
with open('../settings.json') as f:
data = json.load(f)
args.wandb_apikey = data.get('wandbapikey')
if args.wandb:
os.environ['WANDB_API_KEY'] = args.wandb_apikey
wandb.init(project='NoBox-sweeps', name='AutoAttack-{}'.format(args.dataset))
adv_models = None
if (args.dataset == 'cifar'):
(args.nc, args.h, args.w) = (3, 32, 32)
(model, l_test_classif_paths) = load_all_classifiers(args, load_archs=[args.source_arch])
model_type = args.source_arch
if (args.target_arch is not None):
(model_target, l_test_classif_paths) = load_all_classifiers(args, load_archs=[args.target_arch])
model_type = args.target_arch
del model_target
torch.cuda.empty_cache()
if args.ensemble_adv_trained:
adv_model_names = args.adv_models
l_test_classif_paths = []
adv_models = ([None] * len(adv_model_names))
for i in range(len(adv_model_names)):
adv_path = os.path.join(args.dir_test_models, 'pretrained_classifiers', args.dataset, 'ensemble_adv_trained', (adv_model_names[i] + '.pt'))
(init_func, _) = ARCHITECTURES[adv_model_names[i]]
temp_model = init_func().to(args.dev)
adv_models[i] = nn.DataParallel(temp_model)
adv_models[i].load_state_dict(torch.load(adv_path))
l_test_classif_paths.append([adv_path])
model_type = 'Ensemble Adversarial'
elif (args.dataset == 'mnist'):
if (args.source_arch == 'natural'):
(model, l_test_classif_paths) = load_all_classifiers(args, load_archs=['natural'])
model_type = 'natural'
elif ((args.source_arch == 'ens_adv') or args.ensemble_adv_trained):
adv_model_names = args.adv_models
adv_models = ([None] * len(adv_model_names))
for i in range(len(adv_model_names)):
type = get_model_type(adv_model_names[i])
adv_models[i] = load_model(args, adv_model_names[i], type=type).to(args.dev)
path = os.path.join(args.dir_test_models, 'pretrained_classifiers', args.dataset, 'ensemble_adv_trained', args.model)
(model, l_test_classif_paths) = load_all_classifiers(args, load_archs=['natural'])
l_test_classif_paths = [path]
model_type = 'Ensemble Adversarial'
model.to(args.dev)
model.eval()
print(('Testing on %d Test Classifiers with Source Model %s' % (len(l_test_classif_paths), args.source_arch)))
l = [x.unsqueeze(0) for (x, y) in test_loader.dataset]
x_test = torch.cat(l, 0).to(args.dev)
l = [y for (x, y) in test_loader.dataset]
y_test = torch.Tensor(l).long().to(args.dev)
device_count = torch.cuda.device_count()
if (device_count > 1):
print(('CUDA Device Count is %d, Error might happen. Use export CUDA_VISIBLE_DEVICES=0' % device_count))
attacker = DIM(args, model, attack_ball=args.attack_ball, eps=args.epsilon, n_iter=args.n_iter, decay_factor=args.momentum)
advcorrect = 0
with ctx_noparamgrad_and_eval(model):
adv_complete_list = []
if (args.dataset == 'cifar'):
for (batch_idx, (x_batch, y_batch)) in enumerate(test_loader):
if (((batch_idx + 1) * args.test_batch_size) > args.batch_size):
break
(x_batch, y_batch) = (x_batch.to(args.dev), y_batch.to(args.dev))
adv_complete_list.append(attacker.perturb(x_batch, y_batch))
adv_complete = torch.cat(adv_complete_list)
else:
adv_complete = attacker.perturb(x_test[:args.batch_size], y_test[:args.batch_size])
output = model(adv_complete)
pred = output.max(1, keepdim=True)[1]
advcorrect += pred.eq(y_test[:args.batch_size].view_as(pred)).sum().item()
fool_rate = (1 - (advcorrect / float(args.batch_size)))
print(('Test set base model fool rate: %f' % fool_rate))
if args.transfer:
adv_img_list = []
y_orig = y_test[:args.batch_size]
for i in range(0, len(adv_complete)):
adv_img_list.append([adv_complete[i].unsqueeze(0), y_orig[i]])
del model
torch.cuda.empty_cache()
baseline_transfer(args, attacker, 'DI-Attack', model_type, adv_img_list, l_test_classif_paths, adv_models) |
class PathConv(torch.autograd.Function):
def forward(ctx, path_indices, features):
if features.is_cuda:
output = gckn_fast_cuda.path_conv_forward(path_indices, features)
else:
output = gckn_fast_cpu.path_conv_forward(path_indices, features)
ctx.save_for_backward(path_indices)
ctx.size = features.size()
return output
def backward(ctx, grad_output):
grad_input = grad_output.new_zeros(ctx.size)
if grad_output.is_cuda:
gckn_fast_cuda.path_conv_backward(grad_input, grad_output.contiguous(), *ctx.saved_variables)
else:
gckn_fast_cpu.path_conv_backward(grad_input, grad_output.contiguous(), *ctx.saved_variables)
return (None, grad_input) |
def try_or_nothing(func):
try:
return func()
except Exception as e:
print(type(Exception))
print(e) |
class L1(Loss):
def __init__(self):
self.loss = nn.L1Loss()
def __call__(self, logits, targets, **kwargs):
return self.loss(logits, targets) |
def setup_logger(log_filename: Pathlike, log_level: str='info', use_console: bool=True) -> None:
now = datetime.now()
date_time = now.strftime('%Y-%m-%d-%H-%M-%S')
log_filename = '{}-{}'.format(log_filename, date_time)
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
if (dist.is_available() and dist.is_initialized()):
world_size = dist.get_world_size()
rank = dist.get_rank()
formatter = f'%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] ({rank}/{world_size}) %(message)s'
else:
formatter = '%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s'
level = logging.ERROR
if (log_level == 'debug'):
level = logging.DEBUG
elif (log_level == 'info'):
level = logging.INFO
elif (log_level == 'warning'):
level = logging.WARNING
logging.basicConfig(filename=log_filename, format=formatter, level=level, filemode='w')
if use_console:
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console) |
def cca_decomp(A, B):
assert (A.shape[0] < A.shape[1])
assert (B.shape[0] < B.shape[1])
(evals_a, evecs_a) = np.linalg.eigh((A A.T))
evals_a = ((evals_a + np.abs(evals_a)) / 2)
inv_a = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_a])
(evals_b, evecs_b) = np.linalg.eigh((B B.T))
evals_b = ((evals_b + np.abs(evals_b)) / 2)
inv_b = np.array([((1 / np.sqrt(x)) if (x > 0) else 0) for x in evals_b])
cov_ab = (A B.T)
temp = ((((evecs_a np.diag(inv_a)) evecs_a.T) cov_ab) ((evecs_b np.diag(inv_b)) evecs_b.T))
try:
(u, s, vh) = np.linalg.svd(temp)
except:
(u, s, vh) = np.linalg.svd((temp * 100))
s = (s / 100)
transformed_a = ((u.T ((evecs_a np.diag(inv_a)) evecs_a.T)) A).T
transformed_b = ((vh ((evecs_b np.diag(inv_b)) evecs_b.T)) B).T
return (u, s, vh, transformed_a, transformed_b) |
class SubMGroup3d(SparseGroup):
def __init__(self, in_channels, kernel_size, stride=1, padding=0, dilation=1, indice_key=None):
super(SubMGroup3d, self).__init__(3, in_channels, kernel_size, stride, padding, dilation, True, indice_key=indice_key) |
class TrOCRForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_config_overrides(config_class, processors):
config_overrides = {}
tokenizer = None
for processor in processors:
if isinstance(processor, PreTrainedTokenizerFast):
tokenizer = processor
break
elif isinstance(processor, PreTrainedTokenizer):
tokenizer = processor
if (tokenizer is None):
return config_overrides
vocab_size = len(tokenizer)
config_overrides['vocab_size'] = vocab_size
model_tester_kwargs = {'vocab_size': vocab_size}
if (config_class.__name__ in ['AlignConfig', 'AltCLIPConfig', 'ChineseCLIPConfig', 'CLIPSegConfig', 'ClapConfig', 'CLIPConfig', 'GroupViTConfig', 'OwlViTConfig', 'XCLIPConfig', 'FlavaConfig', 'BlipConfig', 'Blip2Config']):
del model_tester_kwargs['vocab_size']
model_tester_kwargs['text_kwargs'] = {'vocab_size': vocab_size}
elif (config_class.__name__ == 'FSMTConfig'):
del model_tester_kwargs['vocab_size']
model_tester_kwargs['src_vocab_size'] = tokenizer.src_vocab_size
model_tester_kwargs['tgt_vocab_size'] = tokenizer.tgt_vocab_size
_tiny_config = get_tiny_config(config_class, **model_tester_kwargs)
if hasattr(_tiny_config, 'text_config'):
_tiny_config = _tiny_config.text_config
for attr in dir(_tiny_config):
if attr.endswith('_token_id'):
token_id = getattr(_tiny_config, attr)
if (token_id is not None):
token_id = get_token_id_from_tokenizer(attr, tokenizer, original_token_id=token_id)
config_overrides[attr] = token_id
if (config_class.__name__ == 'FSMTConfig'):
config_overrides['src_vocab_size'] = tokenizer.src_vocab_size
config_overrides['tgt_vocab_size'] = tokenizer.tgt_vocab_size
config_overrides['decoder'] = configuration_fsmt.DecoderConfig(vocab_size=tokenizer.tgt_vocab_size, bos_token_id=config_overrides['eos_token_id'])
return config_overrides |
def main(params: Params):
rng = np.random.RandomState(1001)
torch.manual_seed(rng.choice())
(molchef_wae, latent_dim, stop_symbol_idx) = load_in_mchef(params.weights_to_use, cuda_details=params.cuda_details, path_molecule_details=params.path_mol_details)
seq_to_smi_list = mt.MapSeqsToReactants()
trsfm = symbol_sequence_data.TrsfmSeqStrToArray(symbol_sequence_data.StopSymbolDetails(True, stop_symbol_idx), shuffle_seq_flag=True, rng=rng)
reaction_bags_dataset = symbol_sequence_data.SymbolSequenceDataset(params.path_react_bags_train, trsfm)
print('starting from training examples.')
zs_to_start_from_train_data = []
indices_to_use = (list(range(10)) + rng.permutation(len(reaction_bags_dataset))[:(params.num_molecules_to_optimize - 10)].tolist())
for i in tqdm.tqdm(indices_to_use, desc='creating initial starting locations'):
sequence_batch_first = reaction_bags_dataset[i][0]
sequence_batch_first = torch.from_numpy(sequence_batch_first).view(1, (- 1))
lengths = torch.tensor([sequence_batch_first.shape[1]])
packed_seq = rnn.pack_padded_sequence(sequence_batch_first, lengths, batch_first=True)
packed_seq = packed_seq.to(params.cuda_details.device_str)
z_sample = molchef_wae._run_through_to_z(packed_seq)
zs_to_start_from_train_data.append(z_sample)
results = collections.defaultdict(list)
searches = [('random_search', LocalSearchRunner(True, molchef_wae.prop_predictor_, molchef_wae, seq_to_smi_list, params)), ('prop_opt', LocalSearchRunner(False, molchef_wae.prop_predictor_, molchef_wae, seq_to_smi_list, params))]
for (search_name, searcher) in searches:
print(f'Doing {search_name}')
init_points = zs_to_start_from_train_data
for initial_z in tqdm.tqdm(init_points):
results[search_name].append(searcher.optimize_z(initial_z, params.num_distinct_molecule_steps, params.epsilon))
with open('local_search_results.pick', 'wb') as fo:
pickle.dump(results, fo)
all_reactant_bags = set()
for results_for_search_type in results.values():
for individual_run_results in results_for_search_type:
reactant_strs = individual_run_results[2]
all_reactant_bags.update(reactant_strs)
tokenized_sampled_reactants = [mt.tokenization(smi_str) for smi_str in all_reactant_bags if len(smi_str)]
with open('opt.tokenized-reactant.txt', 'w') as fo:
fo.writelines('\n'.join(tokenized_sampled_reactants)) |
def xdensenet40_2_k36_bc_cifar10(num_classes=10, **kwargs):
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True, model_name='xdensenet40_2_k36_bc_cifar10', **kwargs) |
def AddFinalLayer(config_lines, input, output_dim, ng_affine_options=' param-stddev=0 bias-stddev=0 ', max_change_per_component=1.5, label_delay=None, use_presoftmax_prior_scale=False, prior_scale_file=None, include_log_softmax=True, add_final_sigmoid=False, name_affix=None, objective_type='linear'):
components = config_lines['components']
component_nodes = config_lines['component-nodes']
if (name_affix is not None):
final_node_prefix = ('Final-' + str(name_affix))
else:
final_node_prefix = 'Final'
prev_layer_output = AddAffineLayer(config_lines, final_node_prefix, input, output_dim, ng_affine_options, max_change_per_component)
if include_log_softmax:
if use_presoftmax_prior_scale:
components.append('component name={0}-fixed-scale type=FixedScaleComponent scales={1}'.format(final_node_prefix, prior_scale_file))
component_nodes.append('component-node name={0}-fixed-scale component={0}-fixed-scale input={1}'.format(final_node_prefix, prev_layer_output['descriptor']))
prev_layer_output['descriptor'] = '{0}-fixed-scale'.format(final_node_prefix)
prev_layer_output = AddSoftmaxLayer(config_lines, final_node_prefix, prev_layer_output)
elif add_final_sigmoid:
prev_layer_output = AddSigmoidLayer(config_lines, final_node_prefix, prev_layer_output)
AddOutputLayer(config_lines, prev_layer_output, label_delay, suffix=name_affix, objective_type=objective_type) |
def create_segmentation_file(img_subdir, anns_subdir, output_subdir, dataset_descriptor, metadata_input, object_input, relationship_input, attribute_synsets_input, attribute_dict_file_dir, attribute_dict_file_path, output_json_path, num_workers=20):
obj_data = json.load(open(object_input))
rel_data = json.load(open(relationship_input))
attr_data = json.load(open(attribute_dict_file_path))
assert (len(obj_data) == len(rel_data)), f'Expected #objects = #relations, got: {len(obj_data)} objects, {len(rel_data)} relations'
n_images = len(obj_data)
segm_map = {}
idxs = list(range(n_images))
lock = Lock()
q = Queue()
for (i, idx) in enumerate(idxs):
q.put((i, idx))
def worker():
while True:
(i, idx) = q.get()
if ((i % 100) == 0):
print(('[Segmentation-Generation] processed %i images...' % i))
image_id = obj_data[idx]['image_id']
assert (image_id == rel_data[idx]['image_id']), f'Expected ordered objects and relationships, mismatch at index {idx}'
segm_map_entry = {}
(children_map, parent_map, bbox_map, id_list, _) = preprocessRelations(obj_data[idx], rel_data[idx])
for id in id_list:
image_id = bbox_map[id]['image_id']
name = bbox_map[id]['names'][0]
category_id = attr_data['label_to_idx'][name.replace('_', '').lower()]
if (len(bbox_map[id]['names']) != 1):
print(f'Object {id} has multiple names: {bbox_map[id]}')
segm_map_entry[id] = {'segmentation': [], 'object_id': id, 'category_id': category_id, 'image_id': bbox_map[id]['image_id'], 'names': bbox_map[id]['names']}
if (len(children_map[id]) == 0):
bboxSegm = to_segmentation(bbox_map[id])
segm_map_entry[id]['segmentation'] = [bboxSegm]
else:
for child_id in children_map[id]:
if (child_id not in segm_map_entry):
print(f'Object {child_id} not in segmentation map')
continue
segm_map_entry[id]['segmentation'] = (segm_map_entry[id]['segmentation'] + segm_map_entry[child_id]['segmentation'])
obj_ids = []
empty_index_entry = {}
for id in segm_map_entry:
obj_ids.append(segm_map_entry[id])
obj_ids.sort(key=(lambda x: x['object_id']))
empty_index_entry['empty_index'] = obj_ids
lock.acquire()
segm_map[image_id] = empty_index_entry
lock.release()
q.task_done()
for i in range(num_workers):
t = Thread(target=worker)
t.daemon = True
t.start()
q.join()
with open(output_json_path, 'w') as obj_out_file:
json.dump(segm_map, obj_out_file)
print('constructed segmentations') |
def render_mesh(mesh, mesh_center):
scene = mi.load_dict({'type': 'scene', 'integrator': {'type': 'path'}, 'light': {'type': 'constant', 'radiance': {'type': 'rgb', 'value': 1.0}}, 'sensor': {'type': 'perspective', 'focal_length': '50mm', 'to_world': mi.ScalarTransform4f.look_at(origin=[0, 0, 5], target=mesh_center, up=[0, 1, 0]), 'thefilm': {'type': 'hdrfilm', 'width': 1024, 'height': 768}, 'thesampler': {'type': 'multijitter', 'sample_count': 64}}, 'themesh': mesh})
img = mi.render(scene, spp=256)
return img |
class LxmertXLayer(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_local_var(primary, val, scope, validate_shape, shape, dtype):
shape = (shape if callable(val) else None)
if isinstance(primary, kv_variable_ops.KvVariable):
if (shape is not None):
shape = tensor_shape.as_shape([shape.as_list()[1]])
current_partitioner = get_kv_variable_scope_store().current_scope.partitioner
get_kv_variable_scope_store().current_scope.set_partitioner(None)
local_var = get_kv_variable(scope, embedding_dim=shape, initializer=val, key_dtype=primary.key_dtype, value_dtype=primary.dtype, collections=[ops.GraphKeys.LOCAL_VARIABLES], trainable=False)
get_kv_variable_scope_store().current_scope.set_partitioner(current_partitioner)
return local_var
current_partitioner = tf_variable_scope.get_variable_scope().partitioner
tf_variable_scope.get_variable_scope().set_partitioner(None)
local_var = tf_variable_scope.get_variable(scope, initializer=val, trainable=False, use_resource=resource_variable_ops.is_resource_variable(primary), shape=shape, dtype=dtype, collections=[ops.GraphKeys.LOCAL_VARIABLES], validate_shape=validate_shape)
tf_variable_scope.get_variable_scope().set_partitioner(current_partitioner)
if (isinstance(primary, variables.Variable) and primary._save_slice_info):
real_var_name = local_var.name[len((primary.op.name + '/')):(- 2)]
slice_info = primary._save_slice_info
local_var._set_save_slice_info(variables.Variable.SaveSliceInfo(((slice_info.full_name + '/') + real_var_name), slice_info.full_shape[:], slice_info.var_offset[:], slice_info.var_shape[:]))
return local_var |
class MNIST(Dataset):
def __str__(self):
return 'MNIST Dataset'
def __init__(self, target_size, dataset_path='./datasets/mnist', train_transforms=None, test_transforms=None):
self.mean = (0.1307,)
self.std = (0.3081,)
self.num_classes = 10
super(MNIST, self).__init__(target_size=target_size, dataset_path=dataset_path, mean=self.mean, std=self.std, train_transforms=train_transforms, test_transforms=test_transforms)
print('Preparing {} and storing data in {}'.format(str(self), dataset_path))
self.train_dataset = datasets.MNIST(self.dataset_path, train=True, download=True, transform=transforms.Compose(self.train_transforms))
self.val_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms))
self.test_dataset = datasets.MNIST(self.dataset_path, train=False, download=True, transform=transforms.Compose(self.test_transforms)) |
class Logger(object):
def __init__(self, logdir='./log'):
self.writer = SummaryWriter(logdir)
def scalar_summary(self, tag, value, step):
self.writer.add_scalar(tag, value, step)
def scalars_summary(self, tag, dictionary, step):
self.writer.add_scalars(tag, dictionary, step)
def text_summary(self, tag, value, step):
self.writer.add_text(tag, value, step)
def audio_summary(self, tag, value, step, sr):
writer.add_audio(tag, value, step, sample_rate=sr) |
class _3DUNET_TF_SUT():
def __init__(self, model_path, preprocessed_data_dir, performance_count):
print('Loading TF model...')
graph_def = graph_pb2.GraphDef()
print(model_path)
with open(model_path, 'rb') as f:
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as g:
tf.compat.v1.import_graph_def(graph_def)
self.sess = tf.compat.v1.Session(graph=g)
self.input = g.get_tensor_by_name('import/input:0')
self.output = g.get_tensor_by_name('import/output:0')
print('Constructing SUT...')
self.sut = lg.ConstructSUT(self.issue_queries, self.flush_queries, self.process_latencies)
self.qsl = get_brats_QSL(preprocessed_data_dir, performance_count)
print('Finished constructing SUT.')
def issue_queries(self, query_samples):
for i in range(len(query_samples)):
data = self.qsl.get_features(query_samples[i].index)
print('Processing sample id {:d} with shape = {:}'.format(query_samples[i].index, data.shape))
output = self.sess.run(self.output, feed_dict={self.input: data[(np.newaxis, ...)]})[0].astype(np.float16)
response_array = array.array('B', output.tobytes())
bi = response_array.buffer_info()
response = lg.QuerySampleResponse(query_samples[i].id, bi[0], bi[1])
lg.QuerySamplesComplete([response])
def flush_queries(self):
pass
def process_latencies(self, latencies_ns):
pass |
def _create_rdd_x_y(x, y, input_names, output_names, sc):
from tensorflow.python.keras.engine import training_utils
x = training_utils.standardize_input_data(x, input_names, check_batch_axis=False, exception_prefix='input')
y = training_utils.standardize_input_data(y, output_names, shapes=None, check_batch_axis=False, exception_prefix='target')
num_samples = x[0].shape[0]
num_inputs = len(x)
num_targets = len(y)
input_data = []
for i in range(num_samples):
inputs = []
for j in range(num_inputs):
inputs.append(x[j][i])
targets = []
for j in range(num_targets):
if (y[j][i].ndim == 0):
targets.append(np.expand_dims(y[j][i], axis=1))
else:
targets.append(y[j][i])
input_data.append((inputs, targets))
x_meta = dict([(input_names[i], (input_data[0][0][i].dtype, input_data[0][0][i].shape)) for i in range(len(input_names))])
y_meta = dict([(output_names[i], (input_data[0][1][i].dtype, input_data[0][1][i].shape)) for i in range(len(input_names))])
rdd = sc.parallelize(input_data)
return (rdd, x_meta, y_meta) |
def test_scrape2(snapshot):
assert (eia_api_v2.scrape('2020-07-10', '2020-07-11').to_csv() == snapshot(name='Output of scrape for July 10th 2020')) |
def clean_up_SPICE(file):
for ext in ['.asc', '.masterlog', '.net', '_run.net', '_run.op.raw', '_run.raw', '1.log']:
os.system(f'rm {file}{ext}') |
def parse_response(response, current_name, user_name, names, action_delim):
response = response.split('REMINDER:')[0].strip()
response = response.split('RULES:')[0].strip()
match = re.search('(NEXT:\\s*([^\\n]+))', response)
name = user_name
if (not match):
logger.warning(f"Didn't generate NEXT target: {response}")
if (current_name == user_name):
name = random.choice(list((set(names) - set([user_name]))))
else:
response = response.replace(match.group(1), '').strip()
name = match.group(2).strip().replace('"', '')
if response.startswith(f'{current_name}:'):
response = response[(len(current_name) + 1):].lstrip()
response = response.replace('', '"').replace('', '"')
other_names = (set(names) - set([current_name]))
if (current_name != user_name):
other_names.add(user_name)
other_names_re = (('\n(' + '|'.join([str(re.escape(name)) for name in other_names])) + '):')
response = re.split(other_names_re, response)[0]
if action_delim:
action = re.escape(action_delim)
response = re.sub(f'({action})([\.,\s-]*){action}\s*"', '\\1 "', response)
response = re.sub(f'"([\W]{(0, 2)})"({action})', '"\\1\\2', response)
response = re.sub(f'({action})"([\W]{(0, 2)})"', '\\1\\2"', response)
response = re.sub(f'{action}\(|\){action}', action_delim, response)
response = re.sub(f'[,\.]{action}', action_delim, response)
response = re.sub(f'({action})[,\.](\s)', '\\1\\2', response)
response = re.sub(' +', ' ', response)
if (name not in names):
matches = get_close_matches(name, list((set(names) | set([user_name]))))
if (not matches):
name = random.choice(names)
else:
name = matches[0]
if (name not in (list(names) + [user_name])):
if current_name.startswith(user_name):
name = random.choice(names)
else:
name = user_name
return (response, name) |
def map_to_generation_datapoint(patch: AvgPatch) -> GenerationDatapoint:
n_unfinshed = utils.binary_bool(patch.is_unfinished)
return GenerationDatapoint(gen_time=patch.total_gen_time, n_total=1, n_unique=utils.binary_bool((not patch.is_duplicate)), n_unfinished=n_unfinshed, n_pruned=utils.binary_bool((patch.is_pruned and (not patch.is_unfinished)))) |
def device_analysis_options(output_dir):
options = model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS.copy()
options['select'] = ['device', 'float_ops', 'micros']
options['order_by'] = 'name'
options['account_type_regexes'] = ['.*']
if output_dir:
options['dump_to_file'] = os.path.join(output_dir, 'device.txt')
return ('scope', options) |
class Learner(object):
def __init__(self, params):
params['zeros'] = False
self.agents = {i: get_policy(params, (params['seed'] + (1000 * i))) for i in range(params['num_agents'])}
self.timesteps = 0
self.w_reward = 1
self.w_size = 0
self.dists = 0
self.adam_params = {i: [0, 0] for i in range(params['num_agents'])}
self.buffer = []
self.states = []
self.embeddings = {i: [] for i in range(params['num_agents'])}
self.best = {i: (- 9999) for i in range(params['num_agents'])}
self.reward = {i: [(- 9999)] for i in range(params['num_agents'])}
self.min_dist = 0
self.num_workers = params['num_workers']
self.init_workers(params)
def init_workers(self, params):
deltas_id = create_shared_noise.remote()
self.deltas = SharedNoiseTable(ray.get(deltas_id), seed=(params['seed'] + 3))
self.workers = [Worker.remote((params['seed'] + (7 * i)), env_name=params['env_name'], policy=params['policy'], h_dim=params['h_dim'], layers=params['layers'], deltas=deltas_id, rollout_length=params['steps'], delta_std=params['sigma'], num_evals=params['num_evals'], ob_filter=params['ob_filter']) for i in range(params['num_workers'])]
def get_agent(self):
self.policy = deepcopy(self.agents[self.agent])
self.embedding = self.embeddings[self.agent].copy()
self.m = self.adam_params[self.agent][0]
self.v = self.adam_params[self.agent][1]
def update_agent(self):
self.agents[self.agent] = deepcopy(self.policy)
self.embeddings[self.agent] = self.embedding.copy()
self.adam_params[self.agent] = [self.m, self.v]
def update_embeddings(self, params, data=[]):
for j in range(params['num_agents']):
if (params['embedding'] == 'a_s'):
self.embeddings[j] = [embed(params, [], self.agents[j], self.selected)]
else:
self.embeddings[j] = [embed(params, s, self.agents[j], self.selected) for s in data[j][1]]
def calc_pairwise_dists(self, params):
dists = np.zeros([params['num_agents'], params['num_agents']])
min_dist = 999
for i in range(params['num_agents']):
for j in range(params['num_agents']):
dists[i][j] = np.linalg.norm((self.embeddings[i][0] - self.embeddings[j][0]))
if ((i != j) & (dists[i][j] < min_dist)):
min_dist = dists[i][j]
self.dists = np.mean(dists)
self.min_dist = min_dist
self.dist_vec = np.mean(dists, axis=1)
self.dist_vec /= np.sum(self.dist_vec)
def select_agent(self):
if (min([x[(- 1)] for x in list(self.reward.values())]) > (- 9999)):
reward_vec = rankdata([max(x[(- 5):]) for x in list(self.reward.values())])
reward_vec /= np.sum(reward_vec)
dist_vec = rankdata(self.dist_vec)
dist_vec /= np.sum(dist_vec)
vec = ((dist_vec + reward_vec) / 2)
self.agent = np.argmax(np.random.multinomial(1, vec))
else:
self.agent = np.argmax(np.random.multinomial(1, self.dist_vec)) |
def get_ort_model_output(feat, onnx_io='tmp.onnx'):
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None, {sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {sess.get_inputs()[i].name: feat[i].numpy() for i in range(len(feat))})
return onnx_outputs |
def test_try_parse_percentage_column_known() -> None:
assert (postprocessing.try_parse('50', 'CS', known_percentages=['CS']) == 0.5) |
def conv1x1(in_planes: int, out_planes: int, stride: int=1, bias: bool=False) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias) |
def double_double_predict_correct(vrblvl=0):
if (vrblvl > 0):
print('in double_double_predict_correct ...')
phc = get_phcfun()
apar = pointer(c_int32(1))
bvrb = pointer(c_int32(vrblvl))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> double_double_predict_correct calls phc', end='')
retval = phc(862, apar, bvrb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.