code stringlengths 101 5.91M |
|---|
class NetworksTest(tf.test.TestCase):
def testGetNetworkFnFirstHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[:10]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
(logits, end_points) = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[(- 1)], num_classes)
def testGetNetworkFnSecondHalf(self):
batch_size = 5
num_classes = 1000
for net in list(nets_factory.networks_map.keys())[10:]:
with tf.Graph().as_default() as g, self.test_session(g):
net_fn = nets_factory.get_network_fn(net, num_classes)
image_size = getattr(net_fn, 'default_image_size', 224)
inputs = tf.random_uniform((batch_size, image_size, image_size, 3))
(logits, end_points) = net_fn(inputs)
self.assertTrue(isinstance(logits, tf.Tensor))
self.assertTrue(isinstance(end_points, dict))
self.assertEqual(logits.get_shape().as_list()[0], batch_size)
self.assertEqual(logits.get_shape().as_list()[(- 1)], num_classes) |
def coreset(run_dir: str='./run', datasets_dir: str='./data', dataset: str='amazon_review_polarity', validation: int=0, shuffle: bool=True, arch: str='vdcnn9-maxpool', optimizer: str='sgd', epochs: Tuple[(int, ...)]=(3, 3, 3, 3, 3), learning_rates: Tuple[(float, ...)]=(0.01, 0.005, 0.0025, 0.00125, 0.000625), momentum: float=0.9, weight_decay: float=0.0001, batch_size: int=128, eval_batch_size: int=128, proxy_arch: str='preact20', proxy_optimizer: str='sgd', proxy_epochs: Tuple[(int, ...)]=(3, 3, 3, 3, 3), proxy_learning_rates: Tuple[(float, ...)]=(0.01, 0.005, 0.0025, 0.00125, 0.000625), proxy_momentum: float=0.9, proxy_weight_decay: float=0.0001, proxy_batch_size: int=128, proxy_eval_batch_size: int=128, subset: int=1800000, selection_method: str='least_confidence', precomputed_selection: Optional[str]=None, train_target: bool=True, cuda: bool=True, device_ids: Tuple[(int, ...)]=tuple(range(cuda.device_count())), num_workers: int=0, eval_num_workers: int=0, seed: Optional[int]=None, checkpoint: str='best', track_test_acc: bool=True):
seed = utils.set_random_seed(seed)
config = utils.capture_config(**locals())
run_dir = utils.create_run_dir(run_dir, timestamp=config['timestamp'])
utils.save_config(config, run_dir)
(use_cuda, device, device_ids, num_workers) = utils.config_run_env(cuda=cuda, device_ids=device_ids, num_workers=num_workers)
train_dataset = create_dataset(dataset, datasets_dir, train=True)
validate_splits(train_dataset, validation, subset)
test_dataset = None
if track_test_acc:
test_dataset = create_dataset(dataset, datasets_dir, train=False)
num_classes = train_dataset.classes
if (precomputed_selection is None):
proxy_run_dir = os.path.join(run_dir, 'proxy')
os.makedirs(proxy_run_dir, exist_ok=True)
(train_indices, dev_indices) = utils.split_indices(train_dataset, validation, proxy_run_dir, shuffle=shuffle)
(train_loader, dev_loader, test_loader) = create_loaders(train_dataset, batch_size=proxy_batch_size, eval_batch_size=proxy_eval_batch_size, test_dataset=test_dataset, use_cuda=use_cuda, num_workers=num_workers, eval_num_workers=eval_num_workers, indices=(train_indices, dev_indices))
(model, _proxy_optimizer) = create_model_and_optimizer(arch=proxy_arch, num_classes=num_classes, optimizer=proxy_optimizer, learning_rate=proxy_learning_rates[0], momentum=proxy_momentum, weight_decay=proxy_weight_decay, run_dir=proxy_run_dir)
criterion = nn.CrossEntropyLoss()
model = model.to(device)
if use_cuda:
model = nn.DataParallel(model, device_ids=device_ids)
criterion = criterion.to(device)
batch_callback: Optional[Callable] = None
if (selection_method == 'forgetting_events'):
forgetting_meter = ForgettingEventsMeter(train_dataset)
batch_callback = forgetting_meter.callback
(model, proxy_accuracies, proxy_times) = run_training(model=model, optimizer=_proxy_optimizer, criterion=criterion, device=device, train_loader=train_loader, epochs=proxy_epochs, learning_rates=proxy_learning_rates, dev_loader=dev_loader, test_loader=test_loader, run_dir=proxy_run_dir, checkpoint=checkpoint, batch_callback=batch_callback)
proxy_stats: Dict[(str, Any)] = OrderedDict()
proxy_stats['nexamples'] = len(train_indices)
proxy_stats['train_accuracy'] = proxy_accuracies.train
proxy_stats['dev_accuracy'] = proxy_accuracies.dev
proxy_stats['test_accuracy'] = proxy_accuracies.test
proxy_stats['train_time'] = proxy_times.train
proxy_stats['dev_time'] = proxy_times.dev
proxy_stats['test_time'] = proxy_times.test
utils.save_result(proxy_stats, os.path.join(run_dir, 'proxy.csv'))
current = np.array([], dtype=np.int64)
if (selection_method == 'kcenters'):
assert (subset > 1000)
current = np.random.permutation(train_indices)[:1000]
subset = (subset - len(current))
nevents = None
if (selection_method == 'forgetting_events'):
nevents = forgetting_meter.nevents
nevents[(~ forgetting_meter.was_correct)] = np.inf
(target_train_indices, stats) = select(model, train_dataset, current=current, pool=train_indices, budget=subset, method=selection_method, batch_size=proxy_eval_batch_size, device=device, device_ids=device_ids, num_workers=num_workers, use_cuda=use_cuda, nevents=nevents)
utils.save_index(target_train_indices, run_dir, 'selected.index')
utils.save_index(dev_indices, run_dir, 'dev.index')
utils.save_result(stats, os.path.join(run_dir, 'selection.csv'))
else:
assert train_target, 'Must train target if selection is precomuted'
target_train_indices = np.loadtxt(os.path.join(precomputed_selection, 'selected.index'), dtype=np.int64)
dev_indices = np.loadtxt(os.path.join(precomputed_selection, 'dev.index'), dtype=np.int64)
if train_target:
loaders = create_loaders(train_dataset, batch_size=batch_size, eval_batch_size=eval_batch_size, test_dataset=test_dataset, use_cuda=use_cuda, num_workers=num_workers, eval_num_workers=eval_num_workers, indices=(target_train_indices, dev_indices))
(target_train_loader, target_dev_loader, target_test_loader) = loaders
target_run_dir = os.path.join(run_dir, 'target')
os.makedirs(target_run_dir, exist_ok=True)
utils.save_index(target_train_indices, target_run_dir, 'train.index')
utils.save_index(dev_indices, target_run_dir, 'dev.index')
(model, _target_optimizer) = create_model_and_optimizer(arch=arch, num_classes=num_classes, optimizer=optimizer, learning_rate=learning_rates[0], momentum=momentum, weight_decay=weight_decay, run_dir=target_run_dir)
criterion = nn.CrossEntropyLoss()
model = model.to(device)
if use_cuda:
model = nn.DataParallel(model, device_ids=device_ids)
criterion = criterion.to(device)
(model, target_accuracies, target_times) = run_training(model=model, optimizer=_target_optimizer, criterion=criterion, device=device, train_loader=target_train_loader, epochs=epochs, learning_rates=learning_rates, dev_loader=target_dev_loader, test_loader=target_test_loader, run_dir=target_run_dir, checkpoint=checkpoint)
target_stats: Dict[(str, Any)] = OrderedDict()
target_stats['nexamples'] = len(target_train_indices)
target_stats['train_accuracy'] = target_accuracies.train
target_stats['dev_accuracy'] = target_accuracies.dev
target_stats['test_accuracy'] = target_accuracies.test
target_stats['train_time'] = target_times.train
target_stats['dev_time'] = target_times.dev
target_stats['test_time'] = target_times.test
utils.save_result(target_stats, os.path.join(run_dir, 'target.csv')) |
def instantiate_factored_mapping(pairs):
part_mappings = [[list(zip(preimg, perm_img)) for perm_img in itertools.permutations(img)] for (preimg, img) in pairs]
return tools.cartesian_product(part_mappings) |
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None, logger_iter_interval=50):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = ((result_dir / 'final_result') / 'data')
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
dataset = dataloader.dataset
logger.info((' EPOCH %s EVALUATION ' % epoch_id))
if dist_test:
if (not isinstance(model, torch.nn.parallel.DistributedDataParallel)):
num_gpus = torch.cuda.device_count()
local_rank = (cfg.LOCAL_RANK % num_gpus)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False)
model.eval()
if (cfg.LOCAL_RANK == 0):
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
pred_dicts = []
for (i, batch_dict) in enumerate(dataloader):
with torch.no_grad():
batch_pred_dicts = model(batch_dict)
final_pred_dicts = dataset.generate_prediction_dicts(batch_pred_dicts, output_path=(final_output_dir if save_to_file else None))
pred_dicts += final_pred_dicts
disp_dict = {}
if ((cfg.LOCAL_RANK == 0) and (((i % logger_iter_interval) == 0) or (i == 0) or ((i + 1) == len(dataloader)))):
past_time = progress_bar.format_dict['elapsed']
second_each_iter = (past_time / max(i, 1.0))
remaining_time = (second_each_iter * (len(dataloader) - i))
disp_str = ', '.join([f'{key}={val:.3f}' for (key, val) in disp_dict.items() if (key != 'lr')])
batch_size = batch_dict.get('batch_size', None)
logger.info(f'eval: epoch={epoch_id}, batch_iter={i}/{len(dataloader)}, batch_size={batch_size}, iter_cost={second_each_iter:.2f}s, time_cost: {progress_bar.format_interval(past_time)}/{progress_bar.format_interval(remaining_time)}, {disp_str}')
if (cfg.LOCAL_RANK == 0):
progress_bar.close()
if dist_test:
logger.info(f'Total number of samples before merging from multiple GPUs: {len(pred_dicts)}')
pred_dicts = common_utils.merge_results_dist(pred_dicts, len(dataset), tmpdir=(result_dir / 'tmpdir'))
logger.info(f'Total number of samples after merging from multiple GPUs (removing duplicate): {len(pred_dicts)}')
logger.info((' Performance of EPOCH %s ' % epoch_id))
sec_per_example = ((time.time() - start_time) / len(dataloader.dataset))
logger.info(('Generate label finished(sec_per_example: %.4f second).' % sec_per_example))
if (cfg.LOCAL_RANK != 0):
return {}
ret_dict = {}
with open((result_dir / 'result.pkl'), 'wb') as f:
pickle.dump(pred_dicts, f)
(result_str, result_dict) = dataset.evaluation(pred_dicts, output_path=final_output_dir)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info(('Result is save to %s' % result_dir))
logger.info('Evaluation done.')
return ret_dict |
def waymo_data_prep(root_path, info_prefix, version, out_dir, workers, max_sweeps=5):
from tools.data_converter import waymo_converter as waymo
splits = ['training', 'validation', 'testing']
for (i, split) in enumerate(splits):
load_dir = osp.join(root_path, 'waymo_format', split)
if (split == 'validation'):
save_dir = osp.join(out_dir, 'kitti_format', 'training')
else:
save_dir = osp.join(out_dir, 'kitti_format', split)
converter = waymo.Waymo2KITTI(load_dir, save_dir, prefix=str(i), workers=workers, test_mode=(split == 'test'))
converter.convert()
out_dir = osp.join(out_dir, 'kitti_format')
kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps)
create_groundtruth_database('WaymoDataset', out_dir, info_prefix, f'{out_dir}/{info_prefix}_infos_train.pkl', relative_path=False, with_mask=False) |
class COCO_json(object):
def __init__(self, images_dir, save_dir, categories_dict, sets, images_names, meta_dir):
self.images_dir = images_dir
self.save_dir = save_dir
self.categories_dict = categories_dict
self.sets = sets
self.images_names = images_names
self.meta_dir = meta_dir
def create_info(self, year=2019, version=1.0, desc='', contr='', url='', datetime=''):
info = {'year': year, 'version': version, 'description': desc, 'contributor': contr, 'url': url, 'date_created': datetime}
return info
def create_license(self, idx=0, name='', url=''):
license = {'id': idx, 'name': name, 'url': url}
return license
def create_image_info(self, image_id, width, height, file_name, license=0, flickr_url='', coco_url='', data_captured=''):
image = {'id': int(image_id), 'file_name': file_name, 'width': int(width), 'height': int(height), 'license': license, 'flickr_url': flickr_url, 'coco_url': coco_url, 'date_captured': data_captured}
return image
def create_images_info_all(self):
self.images = []
for image_name in self.images_names:
(width, height) = get_images_size(os.path.join(self.images_dir, image_name))
img_id = get_image_id(image_name)
self.images.append(self.create_image_info(image_id=img_id, width=width, height=height, file_name=image_name))
def create_annotations(self, anno_id, image_id, category_id, bbox, segmentation='', area='', iscrowd=0):
annotation = {'id': int(anno_id), 'image_id': int(image_id), 'category_id': int(category_id), 'segmentation': segmentation, 'area': area, 'bbox': bbox, 'iscrowd': int(iscrowd)}
return annotation
def create_categories(self, category_id, category_name, supercategory='fashion'):
categories = {'id': category_id, 'name': category_name, 'supercategory': supercategory}
return categories
def create_annotations_all(self, bbox_transform_func=None):
if (not (type(self.sets) == list)):
self.sets = list(self.sets)
anno_id = 0
self.annotations = []
self.categories = []
for sett in self.sets:
for category in list(self.categories_dict.keys()):
filepath = os.path.join(self.meta_dir, 'json', f'{sett}_pairs_{category}.json')
json_file = json.load(open(filepath))
if (not (len(self.categories) == len(list(self.categories_dict.keys())))):
category_name = category
category_id = self.categories_dict[category]
self.categories.append(self.create_categories(category_id=category_id, category_name=category_name, supercategory='fashion'))
for anno in json_file:
image_id = anno['photo']
if (bbox_transform_func is not None):
bbox = bbox_transform_func(bbox=anno['bbox'])
else:
bbox = bbox = anno['bbox']
self.annotations.append(self.create_annotations(anno_id=anno_id, image_id=image_id, category_id=category_id, bbox=bbox, segmentation='', area='', iscrowd=0))
anno_id += 1
def create_full_coco_json(self, bbox_transform_func=None):
self.info = self.create_info()
self.licenses = self.create_license()
self.create_images_info_all()
self.create_annotations_all(bbox_transform_func=bbox_transform_func)
self.json = {'info': self.info, 'images': self.images, 'annotations': self.annotations, 'categories': self.categories, 'licenses': self.licenses} |
class SqueezeViewRemove(pm.SingleStateTransformation):
in_array = pm.PatternNode(nodes.AccessNode)
out_array = pm.PatternNode(nodes.AccessNode)
def expressions(cls):
return [sdutil.node_path_graph(cls.in_array, cls.out_array)]
def can_be_applied(self, state: SDFGState, expr_index: int, sdfg: SDFG, permissive: bool=False):
in_array = self.in_array
out_array = self.out_array
in_desc = in_array.desc(sdfg)
out_desc = out_array.desc(sdfg)
if (state.out_degree(out_array) != 1):
return False
if (not isinstance(out_desc, data.View)):
return False
vedge = state.out_edges(out_array)[0]
if (vedge.data.data != out_array.data):
return False
view_subset = copy.deepcopy(vedge.data.subset)
aedge = state.edges_between(in_array, out_array)[0]
if (aedge.data.data != in_array.data):
return False
array_subset = copy.deepcopy(aedge.data.subset)
vsqdims = view_subset.squeeze()
if ((not permissive) and isinstance(vedge.dst, nodes.LibraryNode)):
return False
if (array_subset != view_subset):
return False
astrides = tuple(in_desc.strides)
vstrides = tuple((s for (i, s) in enumerate(out_desc.strides) if (i in vsqdims)))
if (astrides != vstrides):
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG):
in_array = self.in_array
out_array = self.out_array
out_desc = out_array.desc(sdfg)
vedge = state.out_edges(out_array)[0]
view_subset = copy.deepcopy(vedge.data.subset)
aedge = state.edges_between(in_array, out_array)[0]
array_subset = copy.deepcopy(aedge.data.subset)
vsqdims = view_subset.squeeze()
for e in state.memlet_tree(vedge):
e.data.data = in_array.data
e.data.subset.squeeze(vsqdims)
state.remove_edge(vedge)
state.add_edge(in_array, vedge.src_conn, vedge.dst, vedge.dst_conn, vedge.data)
state.remove_node(out_array)
try:
sdfg.remove_data(out_array.data)
except ValueError:
pass |
def write_file(lines, path):
print('Writing:', path)
with open(path, 'w') as f:
for l in lines:
f.write((l + '\n')) |
class BenchmarkResult():
def __init__(self, metric, method, value=None, curve_x=None, curve_y=None, curve_y_std=None, value_sign=None):
self.metric = metric
self.method = method
self.value = value
self.curve_x = curve_x
self.curve_y = curve_y
self.curve_y_std = curve_y_std
self.value_sign = value_sign
if ((self.value_sign is None) and (self.metric in sign_defaults)):
self.value_sign = sign_defaults[self.metric]
if (self.value is None):
self.value = sklearn.metrics.auc(curve_x, (np.array(curve_y) - curve_y[0]))
def full_name(self):
return ((self.method + ' ') + self.metric) |
def plot_line(vis: visdom.Visdom, window_name: str, env: Optional[str]=None, line_label: Optional[str]=None, x: Optional[np.ndarray]=None, y: Optional[np.ndarray]=None, x_label: Optional[str]=None, y_label: Optional[str]=None, width: int=576, height: int=416, draw_marker: bool=False) -> str:
empty_call = (not vis.win_exists(window_name))
if (empty_call and ((x is not None) or (y is not None))):
return window_name
if (x is None):
x = np.ones(1)
empty_call = (empty_call & True)
if (y is None):
y = np.full(1, np.nan)
empty_call = (empty_call & True)
if (x.shape != y.shape):
x = np.ones_like(y)
opts = {'showlegend': True, 'markers': draw_marker, 'markersize': 5}
if empty_call:
opts['title'] = window_name
opts['width'] = width
opts['height'] = height
window_name = vis.line(X=x, Y=y, win=window_name, env=env, update='append', name=line_label, opts=opts)
(xtickmin, xtickmax) = (0.0, (np.max(x) * 1.05))
(ytickmin, ytickmax) = calc_ytick_range(vis, window_name, env)
opts = {'showlegend': True, 'xtickmin': xtickmin, 'xtickmax': xtickmax, 'ytickmin': ytickmin, 'ytickmax': ytickmax, 'xlabel': x_label, 'ylabel': y_label}
window_name = vis.update_window_opts(win=window_name, opts=opts, env=env)
return window_name |
class TestSolveLyapunov(object):
cases = [(np.array([[1, 2], [3, 4]]), np.array([[9, 10], [11, 12]])), (np.array([[(1.0 + 1j), 2.0], [(3.0 - 4j), 5.0]]), np.array([[(2.0 - 2j), (2.0 + 2j)], [((- 1.0) - 1j), 2.0]])), (np.array([[1.0, 2.0], [3.0, 5.0]]), np.array([[(2.0 - 2j), (2.0 + 2j)], [((- 1.0) - 1j), 2.0]])), (np.array([[(1.0 + 1j), 2.0], [(3.0 - 4j), 5.0]]), np.array([[2.0, 2.0], [(- 1.0), 2.0]])), (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]), np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])), (np.array([[(0.1 + 0j), (0.091 + 0j), (0.082 + 0j), (0.073 + 0j), (0.064 + 0j), (0.055 + 0j), (0.046 + 0j), (0.037 + 0j), (0.028 + 0j), (0.019 + 0j), (0.01 + 0j)], [(1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j), (0.0 + 0j)], [(0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (0.0 + 0j), (1.0 + 0j), (0.0 + 0j)]]), np.eye(11)), (matrix([[0, 1], [((- 1) / 2), (- 1)]]), (matrix([0, 3]).T * matrix([0, 3]).T.T)), (matrix([[0, 1], [((- 1) / 2), (- 1)]]), np.array((matrix([0, 3]).T * matrix([0, 3]).T.T)))]
def test_continuous_squareness_and_shape(self):
nsq = np.ones((3, 2))
sq = np.eye(3)
assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
def check_continuous_case(self, a, q):
x = solve_continuous_lyapunov(a, q)
assert_array_almost_equal((np.dot(a, x) + np.dot(x, a.conj().transpose())), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal((np.dot(np.dot(a, x), a.conj().transpose()) - x), ((- 1.0) * q))
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear') |
def score_2afc_dataset(data_loader, func, name=''):
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist()
d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist()
gts += data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = ((((d0s < d1s) * (1.0 - gts)) + ((d1s < d0s) * gts)) + ((d1s == d0s) * 0.5))
return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores)) |
class ChamferFunction(torch.autograd.Function):
def forward(ctx, xyz1, xyz2):
(dist1, dist2, idx1, idx2) = chamfer.forward(xyz1, xyz2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return (dist1, dist2)
def backward(ctx, grad_dist1, grad_dist2):
(xyz1, xyz2, idx1, idx2) = ctx.saved_tensors
(grad_xyz1, grad_xyz2) = chamfer.backward(xyz1, xyz2, idx1, idx2, grad_dist1, grad_dist2)
return (grad_xyz1, grad_xyz2) |
def build_debug_graph(inputs):
nr_iters = inputs['features'].shape[0]
feature_shape = [s.value for s in inputs['features'].shape[2:]]
groups_shape = [s.value for s in inputs['groups'].shape[2:]]
with tf.name_scope('debug'):
X_debug_shape = ([nr_iters, None] + feature_shape)
G_debug_shape = ([nr_iters, None] + groups_shape)
X_debug = tf.placeholder(tf.float32, shape=X_debug_shape)
G_debug = tf.placeholder(tf.float32, shape=G_debug_shape)
return build_graph(X_debug, G_debug) |
def roman2romantrain(roman):
if (roman == 'rest'):
return ([0], 1)
return ([(int(roman) - 1)], 0) |
_utils.test(require=ti.extension.sparse, exclude=ti.metal)
def test_no_activate():
x = ti.field(ti.f32)
n = 1024
d = ti.root.dynamic(ti.i, n, chunk_size=32)
d.place(x)
def initialize():
for i in range(n):
x[i] = 1
def func():
ti.no_activate(d)
for i in range((n // 2)):
x[((i * 2) + 1)] += 1
initialize()
func()
for i in range(n):
assert (x[i] == ((i % 2) + 1)) |
def get_triton_activation_kernel(activation: Optional[Activation]):
return ({Activation.ReLU: relu, Activation.LeakyReLU: leaky_relu, Activation.GeLU: gelu, Activation.GeLUApprox: gelu_approx, Activation.SquaredReLU: squared_relu}[activation] if activation else None) |
(frozen=True)
class ScannerTypeInfo():
type = attrib()
cpp_name = attrib()
serialize = attrib()
deserialize = attrib() |
def test_nonzero_offset_fromarrow_NumpyArray_5():
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1]))
assert (to_list(ak._connect.pyarrow.handle_arrow(content.to_arrow()[(- 2):10])) == pyarrow.Array.to_pylist(content.to_arrow()[(- 2):10])) |
.parametrize('seed', [313])
.parametrize('axis', [None, 0, 1, 2, 3, (0, 2), (1, 2, 3)])
.parametrize('keepdims', [False, True])
.parametrize('inshape', [(2, 3, 4, 5), (2, 1, 4, 5)])
.parametrize('op, ctx, func_name', list_ctx_and_func_name(['sum', 'mean', 'max', 'min', 'prod']))
def test_reduction_double_backward(op, seed, inshape, axis, keepdims, ctx, func_name):
from nbla_test_utils import backward_function_tester
func = getattr(F, op)
ref_func = getattr(np, op)
rng = np.random.RandomState(seed)
inputs = [rng.randn(*inshape).astype(np.float32)]
backward_function_tester(rng, func, inputs, func_args=[axis], func_kwargs=dict(keepdims=keepdims), ctx=ctx, atol_accum=0.08) |
def SuzukiGraph():
from sage.groups.perm_gps.permgroup_named import SuzukiSporadicGroup
g = Graph()
g.add_edges(SuzukiSporadicGroup().orbit((1, 2), 'OnSets'))
g.relabel()
g.name('Suzuki graph')
return g |
def test_horizon_180_365_days(tmp_path: pathlib.Path):
time_horizon = TimeHorizon(datetime.timedelta(days=180), datetime.timedelta(days=365))
labeler = DummyLabeler([2], time_horizon)
events_with_labels: EventsWithLabels = [(event((2000, 1, 3), 2, None), True), (event((2000, 10, 5), 2, None), False), (event((2002, 1, 5), 2, None), True), (event((2002, 3, 1), 1, None), True), (event((2002, 4, 5), 3, None), True), (event((2002, 4, 12), 1, None), True), (event((2002, 12, 5), 2, None), False), (event((2002, 12, 10), 1, None), False), (event((2004, 1, 10), 2, None), False), (event((2008, 1, 10), 2, None), 'out of range')]
run_test_for_labeler(labeler, events_with_labels, help_text='test_horizon_180_365_days') |
def norm_layer1d(norm, num_channels):
if (norm == 'batch'):
return nn.BatchNorm1d(num_channels)
elif (norm == 'instance'):
return nn.InstanceNorm1d(num_channels, affine=True)
elif (norm == 'layer'):
return nn.LayerNorm(num_channels)
else:
raise ValueError(('%s not recognized.' % norm)) |
def teniter(variable: T.Tensor, include_ordinary=True, include_saved=False):
def dedup(state, parent, ten, saved):
if (tensor := state.get(id(ten))):
ordinary = ((not saved) or tensor[1])
saved = (saved or tensor[2])
state[id(ten)] = (ten, ordinary, saved)
else:
state[id(ten)] = (ten, (not saved), saved)
state = {}
traverse(variable, partial(dedup, state))
def predicate(value):
(_, ordinary, saved) = value
if (include_ordinary and include_saved):
return (ordinary or saved)
elif include_ordinary:
return ordinary
elif include_saved:
return saved
else:
return False
for (ten, _, _) in filter(predicate, state.values()):
(yield ten) |
class MixedPercisionActivationSearch4Bit(MixedPercisionActivationBaseTest):
def __init__(self, unit_test):
super().__init__(unit_test)
self.expected_config = [1, 4, 1, 1]
def get_kpi(self):
return KPI(192, 1536)
def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
self.verify_config(quantization_info.mixed_precision_cfg, self.expected_config) |
def print_model(model):
print(model)
nParams = 0
for w in model.parameters():
nParams += functools.reduce(operator.mul, w.size(), 1)
print(nParams) |
_properties
class ONNXOp(nd.LibraryNode):
implementations = {}
default_implementation = None
default_backward_implementation = None
schema = Property(dtype=ONNXSchema, desc="The operator's ONNX OpSchema", allow_none=True)
backward_implementation = Property(dtype=str, allow_none=True, desc='Which implementation this library node will expand into in the backward pass.')
def iter_outputs_in_onnx_order(self, state: SDFGState) -> List[MultiConnectorEdge]:
return self._iter_params_in_onnx_order(state, inputs=False)
def iter_inputs_in_onnx_order(self, state: SDFGState) -> List[MultiConnectorEdge]:
return self._iter_params_in_onnx_order(state, inputs=True)
def _iter_params_in_onnx_order(self, state: SDFGState, inputs: bool=False) -> List[MultiConnectorEdge]:
parameters = list((self.schema.inputs if inputs else self.schema.outputs))
if (len(parameters) == 0):
return []
if (parameters[(- 1)].param_type == ONNXParameterType.Variadic):
name = parameters[(- 1)].name
parameters = itertools.chain([param.name for param in parameters[:(- 1)]], (((name + '__') + str(i)) for i in itertools.count()))
else:
parameters = [param.name for param in parameters]
edges = (state.in_edges(self) if inputs else state.out_edges(self))
parameters = list(itertools.islice(parameters, len(edges)))
conn_to_edge = {(edge.dst_conn if inputs else edge.src_conn): edge for edge in edges}
return [conn_to_edge[name] for name in parameters]
def iter_edges(self, state: SDFGState, ignore_unknown=False) -> Iterator[Tuple[(MultiConnectorEdge, bool)]]:
in_edges: List[MultiConnectorEdge] = state.in_edges(self)
out_edges: List[MultiConnectorEdge] = state.out_edges(self)
def get_idx(parameters, name):
if ('__' in name):
(name, number) = parse_variadic_param(name)
else:
number = 0
matched = [i for (i, param) in enumerate(parameters) if (param.name == name)]
if (len(matched) != 1):
if ignore_unknown:
return None
raise ValueError("Found {} connectors with name '{}', expected to find exactly one".format(len(matched), name))
parameter_idx = matched[0]
parameter_idx += number
return parameter_idx
if ignore_unknown:
in_edges = [e for e in in_edges if (get_idx(self.schema.inputs, e.dst_conn) is not None)]
out_edges = [e for e in out_edges if (get_idx(self.schema.outputs, e.src_conn) is not None)]
sorted_in = sorted(in_edges, key=(lambda edge: get_idx(self.schema.inputs, edge.dst_conn)))
sorted_out = sorted(out_edges, key=(lambda edge: get_idx(self.schema.outputs, edge.src_conn)))
return itertools.chain(zip(sorted_in, itertools.repeat(True)), zip(sorted_out, itertools.repeat(False)))
def validate(self, sdfg: SDFG, state: SDFGState):
in_edges = state.in_edges(self)
out_edges = state.out_edges(self)
all_connectors = {edge.dst_conn for edge in in_edges}.union((edge.src_conn for edge in out_edges))
if (None in all_connectors):
raise ValueError('Edges to ONNX Ops must not have connector None')
for (edge, is_input) in self.iter_edges(state):
if is_input:
conn_name = edge.dst_conn
if (conn_name not in self.in_connectors):
raise ValueError("Memlet {} leading to nonexistent input connector '{}'".format(edge.data, conn_name))
else:
conn_name = edge.src_conn
if (conn_name not in self.out_connectors):
raise ValueError("Memlet {} leading to nonexistent output connector '{}'".format(edge.data, conn_name))
required_inputs = {inp.name for inp in self.schema.inputs if (inp.param_type == ONNXParameterType.Single)}
passed_inputs = {inp.dst_conn for inp in in_edges if ('__' not in inp.dst_conn)}
known_inputs = {inp.name for inp in self.schema.inputs}
missing_inputs = required_inputs.difference(passed_inputs)
if (len(missing_inputs) > 0):
raise ValueError(get_missing_arguments_message(self.schema.name, missing_inputs, 'input'))
required_outputs = {outp.name for outp in self.schema.outputs if (outp.param_type == ONNXParameterType.Single)}
passed_outputs = {outp.src_conn for outp in out_edges if ('__' not in outp.src_conn)}
known_outputs = {outp.name for outp in self.schema.outputs}
missing_outputs = required_outputs.difference(passed_outputs)
if (len(missing_outputs) > 0):
raise ValueError(get_missing_arguments_message(self.schema.name, missing_outputs, 'output'))
unknown_inputs = passed_inputs.difference(known_inputs)
if (len(unknown_inputs) > 0):
raise TypeError("Got an unexpected argument '{}'".format(list(unknown_inputs)[0]))
unknown_outputs = passed_outputs.difference(known_outputs)
if (len(unknown_outputs) > 0):
raise TypeError("Got an unexpected argument '{}'".format(list(unknown_outputs)[0]))
variadic_inputs = {inp.name for inp in self.schema.inputs if (inp.param_type == ONNXParameterType.Variadic)}
passed_variadic_inputs = {edge.dst_conn for edge in in_edges if ('__' in edge.dst_conn)}
seen_variadic_numbers = set()
for param in passed_variadic_inputs:
(name, number) = parse_variadic_param(param)
if (name not in variadic_inputs):
raise ValueError("Got an unexpected variadic argument '{}'".format(param))
if (number in seen_variadic_numbers):
raise ValueError('Got two variadic inputs with index {}, expected at most one'.format(number))
seen_variadic_numbers.add(number)
for i in range(len(seen_variadic_numbers)):
if (i not in seen_variadic_numbers):
raise ValueError('Since {} variadic inputs were passed, expected variadic parameter with number {}'.format(len(seen_variadic_numbers), i))
variadic_outputs = {outp.name for outp in self.schema.outputs if (outp.param_type == ONNXParameterType.Variadic)}
passed_variadic_outputs = {edge.src_conn for edge in out_edges if ('__' in edge.src_conn)}
seen_variadic_numbers = set()
for param in passed_variadic_outputs:
(name, number) = parse_variadic_param(param)
if (name not in variadic_outputs):
raise ValueError("Got an unexpected variadic argument '{}'".format(param))
if (number in seen_variadic_numbers):
raise ValueError('Got two variadic outputs with index {}, expected at most one'.format(number))
seen_variadic_numbers.add(number)
for i in range(len(seen_variadic_numbers)):
if (i not in seen_variadic_numbers):
raise ValueError('Since {} variadic outputs were passed, expected variadic parameter with number {}'.format(len(seen_variadic_numbers), i))
assigned_params = {}
for (edge, is_input) in self.iter_edges(state):
conn_name = (edge.dst_conn if is_input else edge.src_conn)
if ('__' in conn_name):
(parsed_name, number) = parse_variadic_param(conn_name)
else:
parsed_name = conn_name
matching = [inp for inp in (self.schema.inputs if is_input else self.schema.outputs) if (inp.name == parsed_name)]
if (len(matching) != 1):
raise ValueError("Expected to find one {} parameter in schema with name '{}', but found {}".format(('input' if is_input else 'output'), parsed_name, len(matching)))
matched = matching[0]
if (('__' in conn_name) and (matched.param_type != ONNXParameterType.Variadic)):
raise ValueError("Got variadic argument '{}' for non-variadic parameter '{}'. Ensure that non-variadic args do not contain '__'".format(conn_name, matched.name))
if (('__' not in conn_name) and (matched.param_type == ONNXParameterType.Variadic)):
raise ValueError("Expected variadic argument for variadic parameter '{}', got '{}'. Use '{}__i' as the connector name, where i is the desired index of the variadic parameter.".format(matched.name, conn_name, conn_name))
edge_data = edge.data.data
edge_dtype = sdfg.arrays[edge_data].dtype
if ((matched.param_type == ONNXParameterType.Variadic) and (not matched.homogeneous)):
pass
elif ((matched.type_str in assigned_params) and ((assigned_params[matched.type_str] != edge_dtype) and (assigned_params[matched.type_str] != edge_dtype.base_type))):
raise ValueError("Could not solve type constraints; excepted type '{expected}' for {param_type} '{conn_name}', got type '{actual}'".format(expected=assigned_params[matched.type_str], param_type=('input' if is_input else 'output'), conn_name=matched.name, actual=edge_dtype))
cons = self.schema.type_constraints[matched.type_str]
if ((edge_dtype not in cons.types) and (edge_dtype.base_type not in cons.types)):
raise ValueError("Expected type in '{possible}' for {param_type} '{conn_name}', got type '{actual}'".format(possible=cons.types, param_type=('input' if is_input else 'output'), conn_name=matched.name, actual=edge_dtype))
assigned_params[matched.type_str] = edge_dtype.base_type
required_attrs = {name for (name, attr) in dace_schema.attributes.items() if attr.required}
for attr in required_attrs:
if (getattr(self, attr) is None):
raise ValueError("Expected value for required attribute '{}', got None".format(attr)) |
class InteractionBlock(torch.nn.Module):
def __init__(self, hidden_channels, num_gaussians, num_filters, cutoff):
super(InteractionBlock, self).__init__()
self.mlp = Sequential(Linear(num_gaussians, num_filters), ShiftedSoftplus(), Linear(num_filters, num_filters))
self.conv = CFConv(hidden_channels, hidden_channels, num_filters, self.mlp, cutoff)
self.act = ShiftedSoftplus()
self.lin = Linear(hidden_channels, hidden_channels)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.mlp[0].weight)
self.mlp[0].bias.data.fill_(0)
torch.nn.init.xavier_uniform_(self.mlp[2].weight)
self.mlp[0].bias.data.fill_(0)
self.conv.reset_parameters()
torch.nn.init.xavier_uniform_(self.lin.weight)
self.lin.bias.data.fill_(0)
def forward(self, x, edge_index, edge_weight, edge_attr):
x = self.conv(x, edge_index, edge_weight, edge_attr)
x = self.act(x)
x = self.lin(x)
return x |
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_constructor([])
cls.add_method('GetScale', 'double', [], is_const=True)
cls.add_method('GetShape', 'double', [], is_const=True)
cls.add_method('GetBound', 'double', [], is_const=True)
cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
cls.add_method('GetValue', 'double', [], is_virtual=True)
cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True)
return |
class Writer(abc.ABC):
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
self.close()
def close(self):
pass
def open(self):
pass
def reserve(self, entry: str, shape: tuple, dtype=None):
pass
def fill(self, entry: str, data, index: expr.IndexExpression=None):
pass
def write(self, entry: str, data, dtype=None):
pass |
class DiracConv2d(nn.Conv2d, DiracConv):
def __init__(self, in_channels, out_channels, kernel_size, padding=0, dilation=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride=1, padding=padding, dilation=dilation, bias=bias)
self.init_params(out_channels)
def forward(self, input):
return F.conv2d(input, self.transform_weight(), self.bias, self.stride, self.padding, self.dilation) |
class ContinuousQFunctionMixin():
def inner_predict_value(self: _ContinuousQFunctionProtocol, x: TorchObservation, action: torch.Tensor) -> torch.Tensor:
return self._q_func_forwarder.compute_expected_q(x, action, reduction='mean').reshape((- 1)) |
def countless_generalized(data, factor):
assert (len(data.shape) == len(factor))
sections = []
mode_of = reduce((lambda x, y: (x * y)), factor)
majority = int(math.ceil((float(mode_of) / 2)))
data += 1
for offset in np.ndindex(factor):
part = data[tuple((np.s_[o::f] for (o, f) in zip(offset, factor)))]
sections.append(part)
def pick(elements):
eq = ((elements[i] == elements[(i + 1)]) for i in range((len(elements) - 1)))
anded = reduce((lambda p, q: (p & q)), eq)
return (elements[0] * anded)
def logical_or(x, y):
return (x + ((x == 0) * y))
result = (pick(combo) for combo in combinations(sections, majority))
result = reduce(logical_or, result)
for i in range((majority - 1), (3 - 1), (- 1)):
partial_result = (pick(combo) for combo in combinations(sections, i))
partial_result = reduce(logical_or, partial_result)
result = logical_or(result, partial_result)
partial_result = (pick(combo) for combo in combinations(sections[:(- 1)], 2))
partial_result = reduce(logical_or, partial_result)
result = logical_or(result, partial_result)
result = (logical_or(result, sections[(- 1)]) - 1)
data -= 1
return result |
class Normalize(rf.Module):
def __init__(self, *, param_dims: Union[(Dim, Sequence[Dim])], epsilon: float=1e-06, scale: bool=True, bias: bool=True):
super(Normalize, self).__init__()
self.epsilon = epsilon
if isinstance(param_dims, Dim):
param_dims = [param_dims]
self.scale = None
if scale:
self.scale = rf.Parameter(dims=param_dims)
self.scale.initial = 1.0
self.bias = (rf.Parameter(dims=param_dims) if bias else None)
def __call__(self, a: Tensor, *, axis: Union[(Dim, Sequence[Dim])]):
norm = normalize(a, axis=axis, epsilon=self.epsilon)
if (self.scale is not None):
norm = (self.scale * norm)
if (self.bias is not None):
norm = (norm + self.bias)
return norm |
class Tester(Base):
def __init__(self, ckpt_path):
self.ckpt_path = ckpt_path
super(Tester, self).__init__(log_name='test_logs.txt')
def _make_batch_generator(self, test_set, annot_subset, capture, camera, seq_name):
self.logger.info((('Creating ' + test_set) + ' dataset...'))
testset_loader = Dataset(transforms.ToTensor(), test_set, annot_subset, capture, camera, seq_name)
batch_generator = DataLoader(dataset=testset_loader, batch_size=(cfg.num_gpus * cfg.test_batch_size), shuffle=False, num_workers=cfg.num_thread, pin_memory=True)
self.joint_num = testset_loader.joint_num
self.batch_generator = batch_generator
self.testset = testset_loader
def _make_model(self):
model_path = self.ckpt_path
assert os.path.exists(model_path), ('Cannot find model at ' + model_path)
self.logger.info('Load checkpoint from {}'.format(model_path))
self.logger.info('Creating graph...')
model = get_model('test', self.joint_num)
model = model.cuda()
model = DataParallel(model)
ckpt = torch.load(model_path)
resnet_dec_keys = []
resnet_new_keys = []
for k in ckpt['network'].keys():
if (('decoder_net' in k) and ('resnet_decoder' not in k)):
new_k = ((k[:19] + 'resnet_decoder.') + k[19:])
resnet_new_keys.append(new_k)
resnet_dec_keys.append(k)
for (i, k) in enumerate(resnet_dec_keys):
ckpt['network'][resnet_new_keys[i]] = ckpt['network'][resnet_dec_keys[i]]
del ckpt['network'][k]
model.load_state_dict(ckpt['network'], strict=True)
model.eval()
self.model = model
def _evaluate(self, preds, gt, ckpt_path, annot_subset):
if (cfg.dataset == 'InterHand2.6M'):
if (cfg.predict_2p5d and (cfg.predict_type == 'vectors')):
self.testset.evaluate_2p5d(preds, gt, ckpt_path, annot_subset)
else:
self.testset.evaluate(preds, gt, ckpt_path, annot_subset)
elif (cfg.dataset == 'ho3d'):
self.testset.evaluate(preds, ckpt_path, gt)
elif (cfg.dataset == 'h2o3d'):
self.testset.evaluate(preds, ckpt_path, gt)
def _dump_results(self, preds, dump_dir):
self.testset.dump_results(preds, dump_dir) |
class NodeConfig():
def __init__(self) -> None:
self.sim = 'qemu'
self.ip = '10.0.0.1'
self.prefix = 24
self.cores = 1
self.threads = 1
self.memory = 512
self.disk_image = 'base'
self.mtu = 1500
self.nockp = 0
self.app: tp.Optional[AppConfig] = None
self.kcmd_append = ''
def config_str(self) -> str:
if (self.sim == 'gem5'):
cp_es = ([] if self.nockp else ['m5 checkpoint'])
exit_es = ['m5 exit']
else:
cp_es = ['echo ready to checkpoint']
exit_es = ['poweroff -f']
es = (((((((self.prepare_pre_cp() + self.app.prepare_pre_cp()) + cp_es) + self.prepare_post_cp()) + self.app.prepare_post_cp()) + self.run_cmds()) + self.cleanup_cmds()) + exit_es)
return '\n'.join(es)
def make_tar(self, path: str) -> None:
with tarfile.open(path, 'w:') as tar:
cfg_i = tarfile.TarInfo('guest/run.sh')
cfg_i.mode = 511
cfg_f = self.strfile(self.config_str())
cfg_f.seek(0, io.SEEK_END)
cfg_i.size = cfg_f.tell()
cfg_f.seek(0, io.SEEK_SET)
tar.addfile(tarinfo=cfg_i, fileobj=cfg_f)
cfg_f.close()
for (n, f) in self.config_files().items():
f_i = tarfile.TarInfo(('guest/' + n))
f_i.mode = 511
f.seek(0, io.SEEK_END)
f_i.size = f.tell()
f.seek(0, io.SEEK_SET)
tar.addfile(tarinfo=f_i, fileobj=f)
f.close()
def prepare_pre_cp(self) -> tp.List[str]:
return ['set -x', 'export HOME=/root', 'export LANG=en_US', ('export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:' + '/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"')]
def prepare_post_cp(self) -> tp.List[str]:
return []
def run_cmds(self) -> tp.List[str]:
return self.app.run_cmds(self)
def cleanup_cmds(self) -> tp.List[str]:
return []
def config_files(self) -> tp.Dict[(str, tp.IO)]:
return self.app.config_files()
def strfile(self, s: str) -> io.BytesIO:
return io.BytesIO(bytes(s, encoding='UTF-8')) |
class PrefetchLoader(object):
def __init__(self, loader, img_normalize=None):
self.loader = loader
self.stream = torch.cuda.Stream()
self.img_normalize = img_normalize
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while (batch is not None):
is_tuple = isinstance(batch, tuple)
if is_tuple:
(task, batch) = batch
batch['visual_inputs'] = batch['visual_inputs'].float()
if (self.img_normalize is not None):
batch['visual_inputs'] = self.img_normalize(batch['visual_inputs'])
if ('crop_visual_inputs' in batch):
batch['crop_visual_inputs'] = batch['crop_visual_inputs'].float()
batch['crop_visual_inputs'] = self.img_normalize(batch['crop_visual_inputs'])
if ('context_visual_inputs' in batch):
batch['context_visual_inputs'] = batch['context_visual_inputs'].float()
batch['context_visual_inputs'] = self.img_normalize(batch['context_visual_inputs'])
if is_tuple:
(yield (task, batch))
else:
(yield batch)
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if (batch is not None):
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method |
def test_indexedoption():
def find_it(array):
for item in array:
if (item is None):
pass
elif (item.x == 3):
return item
return None
array = ak.highlevel.Array([{'x': 1}, {'x': 2}, None, {'x': 3}])
assert (ak.operations.to_list(find_it(array)) == {'x': 3}) |
def create_function_nnp(inputs, outputs, func_name, func_args, func_kwargs):
if (func_name is None):
return
for (category_name, category) in nnabla.utils.converter.get_category_info().items():
if (func_name in category):
function = category[func_name]
nnp = nnabla_pb2.NNablaProtoBuf()
net = nnp.network.add()
net.name = 'network1'
net.batch_size = 1
func = net.function.add()
func.name = func_name
func.type = func_name
func_inputs = []
data_names = []
parameter_names = []
input_data = []
for (n, i) in enumerate(inputs):
if (i is not None):
if (len(list(function['inputs'].items())) == 1):
(input_name, input_info) = list(function['inputs'].items())[0]
if (('variadic' in input_info) and input_info['variadic']):
input_name += str(n)
else:
(input_name, input_info) = list(function['inputs'].items())[n]
func_inputs.append(input_name)
var = net.variable.add()
var.name = input_name
if (('parameter' in input_info) and input_info['parameter']):
parameter_names.append(input_name)
var.type = 'Parameter'
shape = list(i.d.shape)[:]
if (func.name == 'BatchNormalization'):
shape = ([1] + shape)
var.shape.dim.extend(shape)
param = nnp.parameter.add()
param.variable_name = input_name
param.shape.dim.extend(shape)
param.data.extend(i.d.flatten())
else:
input_data.append(i.d.flatten())
data_names.append(input_name)
var.type = 'Buffer'
shape = list(i.d.shape)[:]
if ((input_name == 'rmean') or (input_name == 't')):
pass
elif ((func.name == 'PReLU') and (input_name == 'x1')):
pass
elif (func.name == 'Transpose'):
pass
elif (func.name == 'Concatenate'):
pass
else:
shape = ([1] + shape)
var.shape.dim.extend(shape)
func.input.extend(func_inputs)
func_outputs = []
output_data = []
for (n, o) in enumerate(outputs):
output_name = 'y{}'.format(n)
func_outputs.append(output_name)
var = net.variable.add()
var.name = output_name
var.type = 'Buffer'
shape = list(o.d.shape)[:]
shape = ([(- 1)] + shape)
var.shape.dim.extend(shape)
output_data.append(o.d.flatten())
func.output.extend(func_outputs)
if ('arguments' in function):
for (n, (arg_name, arg)) in enumerate(function['arguments'].items()):
param = eval('func.{}_param'.format(function['snake_name']))
if ((not func_args) and (not func_kwargs)):
continue
if (func.name == 'Interpolate'):
del func_args[0]
if (n < len(func_args)):
a = func_args[n]
elif ((func.name == 'Concatenate') or (func.name == 'Stack')):
a = func_kwargs['axis']
else:
a = func_kwargs.get('keepdims')
if (a is None):
f = ['Sum', 'Mean', 'Max', 'Min', 'Prod']
if ('axes' in arg_name):
if (func.name in f):
a = net.variable[0].shape.dim[:(- 1)]
a = [(x - 1) for x in a]
else:
a = (len(net.variable[0].shape.dim) - 2)
if (a is not None):
if ('axis' == arg_name):
if (func.name == 'Concatenate'):
pass
else:
a += 1
if ('axes' in arg_name):
if (func.name == 'Transpose'):
pass
else:
if (isinstance(a, tuple) or isinstance(a, list)):
a = list(a)
else:
a = [a]
a = [(x + 1) for x in a]
if (isinstance(a, tuple) or isinstance(a, list)):
if (arg['type'] == 'Shape'):
exec('param.{}.dim.extend(list(a))'.format(arg_name))
else:
exec('param.{}.extend(a)'.format(arg_name))
elif isinstance(a, numpy.ndarray):
a = a.flatten()
if (arg['type'] == 'Shape'):
if (function['snake_name'] == 'broadcast'):
exec('param.{}.dim.extend([1] + list(a))'.format(arg_name))
else:
exec('param.{}.dim.extend(list(a))'.format(arg_name))
else:
exec('param.{}.extend(a)'.format(arg_name))
elif isinstance(a, type):
exec('param.{} = {}'.format(arg_name, dtypes.np_dtpye_to_int[a]))
elif ('repeated' in arg['type']):
exec('param.{}.extend([a])'.format(arg_name))
elif (arg['type'] == 'string'):
exec('param.{} = "{}"'.format(arg_name, a))
else:
if (arg_name == 'base_axis'):
a = (a + 1)
exec('param.{} = {}'.format(arg_name, a))
exe = nnp.executor.add()
exe.name = 'inference'
exe.network_name = 'network1'
for d in data_names:
dat = exe.data_variable.add()
dat.variable_name = d
dat.data_name = d
for o in func_outputs:
out = exe.output_variable.add()
out.variable_name = o
out.data_name = o
for p in parameter_names:
par = exe.parameter_variable.add()
par.variable_name = p
return (nnp, input_data, output_data) |
def print_flags(flags, flags_def):
logging.info('Running training with hyperparameters: \n{}'.format(pprint.pformat(['{}: {}'.format(key, val) for (key, val) in get_user_flags(flags, flags_def).items()]))) |
class BidirectionalSourceEncoder(SourceEncoder):
def __init__(self, input_dim, hidden_dim, rnn_cell_factory):
super(BidirectionalSourceEncoder, self).__init__()
if ((hidden_dim % 2) != 0):
raise ValueError('hidden_dim must be even for BidirectionalSourceEncoder.')
self._hidden_dim = hidden_dim
build_encoder = (lambda : SimpleSourceEncoder(rnn_cell_factory(input_dim, (hidden_dim / 2))))
self.forward_encoder = build_encoder()
self.backward_encoder = build_encoder()
def hidden_dim(self):
return self._hidden_dim
def forward(self, input_embeds_list):
reverse = (lambda seq: list(reversed(seq)))
forward_states = self.forward_encoder(input_embeds_list)
backward_states = reverse(self.backward_encoder(reverse(input_embeds_list)))
return BidirectionalEncoderOutput(forward_states, backward_states) |
def _write_separated_file(buf, edge_dic, weight_dic, separator, prefix=''):
dummy_prefix = object()
prefix = (prefix or dummy_prefix)
for (key, edge_val) in edge_dic.items():
for (j, value) in enumerate(edge_val):
elements = [prefix, str(key), str(value), (str(weight_dic[key][j]) + '\n')]
string = separator.join((x for x in elements if (x != dummy_prefix)))
buf.write(string) |
class ModuleMap(dict):
def __getitem__(self, k):
assert isinstance(k, ast_internal_classes.Module_Node)
if (k not in self):
self[k] = {}
return super().__getitem__(k)
def get(self, k):
return self[k]
def __setitem__(self, k, v) -> None:
assert isinstance(k, ast_internal_classes.Module_Node)
return super().__setitem__(k, v) |
def simple_attentional_rnn(rnn_input, attention_state_list, initial_state=None):
attention_states = reshape_list2tensor(attention_state_list, len(attention_state_list), FLAGS.sentembed_size)
cell = get_lstm_cell()
dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)
(rnn_outputs, rnn_state) = seq2seq.attention_decoder(rnn_input, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=dtype, scope=None, initial_state_attention=False)
return (rnn_outputs, rnn_state) |
def get_model_visualization_name(model_name):
if (('bayesian' in model_name) or ('BBB' in model_name)):
return 'BBB RNN'
if ('variational' in model_name):
return 'Variational RNN'
if (('vanilla' in model_name) or ('baseline' in model_name)):
return 'Baseline RNN'
if ('forest' in model_name):
return 'Random Forest' |
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform(self.conv1.weight.data, 1.0)
nn.init.xavier_uniform(self.conv2.weight.data, 1.0)
self.model = nn.Sequential(nn.BatchNorm2d(in_channels), nn.ReLU(), nn.Upsample(scale_factor=2), self.conv1, nn.BatchNorm2d(out_channels), nn.ReLU(), self.conv2)
self.bypass = nn.Sequential()
if (stride != 1):
self.bypass = nn.Upsample(scale_factor=2)
def forward(self, x):
return (self.model(x) + self.bypass(x)) |
def safe_rm(path_to_rm):
if (not os.path.exists(path_to_rm)):
return
if os.path.isdir(path_to_rm):
files_to_rm = [f'{path_to_rm}/{fname}' for fname in os.listdir(path_to_rm)]
dir_to_rm = path_to_rm
else:
files_to_rm = [path_to_rm]
dir_to_rm = None
for file_to_rm in files_to_rm:
if (os.path.isfile(file_to_rm) and (os.path.basename(file_to_rm) in REMOVABLE_PATHS)):
os.remove(file_to_rm)
assert (not os.path.exists(file_to_rm)), f'Error removing: {file_to_rm}'
if ((dir_to_rm is not None) and os.path.isdir(dir_to_rm)):
os.rmdir(dir_to_rm)
assert (not os.path.exists(dir_to_rm)), f'Error removing: {dir_to_rm}' |
class Classify(nn.Module):
def __init__(self, channels_prev: int, num_classes: int):
super().__init__()
self.pool = nn.AvgPool2d(7)
self.flat = nn.Flatten()
self.fc = nn.Linear(channels_prev, num_classes)
def forward(self, states: Tuple[(Tensor, Tensor)]) -> Tensor:
(x, _) = states
x = self.pool(x)
x = self.flat(x)
x = self.fc(x)
return x |
def perturb(x):
if (random.random() < (1.0 / 6)):
return (x + 1)
elif (random.random() < (1.0 / 5)):
return (x - 1)
elif (random.random() < (1.0 / 4)):
return (x + 2)
elif (random.random() < (1.0 / 3)):
return (x - 2)
return x |
def lagrange_inversion(a):
n = len(a)
f = sum(((a[i] * (x ** i)) for i in range(len(a))))
h = (x / f).series(x, 0, n).removeO()
hpower = [(h ** 0)]
for k in range(n):
hpower.append((hpower[(- 1)] * h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append((hpower[k].coeff(x, (k - 1)) / k))
b = map((lambda x: mp.mpf(x)), b)
return b |
def collate_metrics(output_data_batch, name='depth'):
if isinstance(output_data_batch[0], dict):
output_data_batch = [output_data_batch]
metrics_data = []
for (i, output_batch) in enumerate(output_data_batch):
metrics = OrderedDict()
for (key, val) in output_batch[0].items():
if key.startswith(name):
metrics[key] = torch.stack([output[key] for output in output_batch], 0)
metrics[key] = torch.mean(metrics[key], 0)
metrics_data.append(metrics)
return metrics_data |
def train(model, data, params):
log = Logger(os.path.join(params.logdir, params.logfile), 'w')
num_train_original = atis_data.num_utterances(data.train_data)
log.put(('Original number of training utterances:\t' + str(num_train_original)))
eval_fn = evaluate_utterance_sample
trainbatch_fn = data.get_utterance_batches
trainsample_fn = data.get_random_utterances
validsample_fn = data.get_all_utterances
batch_size = params.batch_size
if params.interaction_level:
batch_size = 1
eval_fn = evaluate_interaction_sample
trainbatch_fn = data.get_interaction_batches
trainsample_fn = data.get_random_interactions
validsample_fn = data.get_all_interactions
maximum_output_length = params.train_maximum_sql_length
train_batches = trainbatch_fn(batch_size, max_output_length=maximum_output_length, randomize=(not params.deterministic))
if (params.num_train >= 0):
train_batches = train_batches[:params.num_train]
training_sample = trainsample_fn(params.train_evaluation_size, max_output_length=maximum_output_length)
valid_examples = validsample_fn(data.valid_data, max_output_length=maximum_output_length)
num_train_examples = sum([len(batch) for batch in train_batches])
num_steps_per_epoch = len(train_batches)
log.put(('Actual number of used training examples:\t' + str(num_train_examples)))
log.put((('(Shortened by output limit of ' + str(maximum_output_length)) + ')'))
log.put(('Number of steps per epoch:\t' + str(num_steps_per_epoch)))
log.put(('Batch size:\t' + str(batch_size)))
print((((('Kept ' + str(num_train_examples)) + '/') + str(num_train_original)) + ' examples'))
print((((('Batch size of ' + str(batch_size)) + ' gives ') + str(num_steps_per_epoch)) + ' steps per epoch'))
epochs = 0
patience = params.initial_patience
learning_rate_coefficient = 1.0
previous_epoch_loss = float('inf')
maximum_validation_accuracy = 0.0
maximum_string_accuracy = 0.0
countdown = int(patience)
if params.scheduler:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(model.trainer, mode='min')
keep_training = True
while keep_training:
log.put(('Epoch:\t' + str(epochs)))
model.set_dropout(params.dropout_amount)
if (not params.scheduler):
model.set_learning_rate((learning_rate_coefficient * params.initial_learning_rate))
if params.interaction_level:
epoch_loss = train_epoch_with_interactions(train_batches, params, model, randomize=(not params.deterministic))
else:
epoch_loss = train_epoch_with_utterances(train_batches, model, randomize=(not params.deterministic))
log.put(('train epoch loss:\t' + str(epoch_loss)))
model.set_dropout(0.0)
train_eval_results = eval_fn(training_sample, model, params.train_maximum_sql_length, name=os.path.join(params.logdir, 'train-eval'), write_results=True, gold_forcing=True, metrics=TRAIN_EVAL_METRICS)[0]
for (name, value) in train_eval_results.items():
log.put(((('train final gold-passing ' + name.name) + ':\t') + ('%.2f' % value)))
valid_eval_results = eval_fn(valid_examples, model, params.eval_maximum_sql_length, name=os.path.join(params.logdir, 'valid-eval'), write_results=True, gold_forcing=True, metrics=VALID_EVAL_METRICS)[0]
for (name, value) in valid_eval_results.items():
log.put(((('valid gold-passing ' + name.name) + ':\t') + ('%.2f' % value)))
valid_loss = valid_eval_results[Metrics.LOSS]
valid_token_accuracy = valid_eval_results[Metrics.TOKEN_ACCURACY]
string_accuracy = valid_eval_results[Metrics.STRING_ACCURACY]
if params.scheduler:
scheduler.step(valid_loss)
if (valid_loss > previous_epoch_loss):
learning_rate_coefficient *= params.learning_rate_ratio
log.put(('learning rate coefficient:\t' + str(learning_rate_coefficient)))
previous_epoch_loss = valid_loss
saved = False
if ((not saved) and (string_accuracy > maximum_string_accuracy)):
maximum_string_accuracy = string_accuracy
patience = (patience * params.patience_ratio)
countdown = int(patience)
last_save_file = os.path.join(params.logdir, ('save_' + str(epochs)))
model.save(last_save_file)
log.put(('maximum string accuracy:\t' + str(maximum_string_accuracy)))
log.put(('patience:\t' + str(patience)))
log.put(('save file:\t' + str(last_save_file)))
if (countdown <= 0):
keep_training = False
countdown -= 1
log.put(('countdown:\t' + str(countdown)))
log.put('')
epochs += 1
log.put('Finished training!')
log.close()
return last_save_file |
def print_estimates(estimates_df, truth_df, sample_time_col, truth_query_fn):
for (_, row) in estimates_df.iterrows():
(model, K) = (row['model'], row['K'])
num_users = (480189 if ('Netflix' in model) else (1000990 if ('KDD' in model) else 1823179))
num_items = (17770 if ('Netflix' in model) else (624961 if ('KDD' in model) else 136736))
num_latent_factors = row['num_latent_factors']
user_sample_ratio = row['user_sample_ratio']
if (user_sample_ratio == 0.0):
num_users_per_block = ((4 * L2_CACHE_SIZE) / (8 * num_latent_factors))
while (((num_users_per_block * num_items) * 8) > MAX_MEM_SIZE):
num_users_per_block /= 2
user_sample_ratio = (num_users_per_block / num_users)
estimate_rt = (row[sample_time_col] * num_users)
true_rt = truth_df.query(truth_query_fn(row))['comp_time'].min()
print(model, K, user_sample_ratio, estimate_rt, true_rt) |
def non_sphere_GB(location, orientation):
r_vectors = get_boomerang_r_vectors(location, orientation)
U = 0.0
for k in range(len(r_vectors)):
if (r_vectors[k][2] < A):
return 0.0
U += (WEIGHT[k] * r_vectors[k][2])
h = r_vectors[k][2]
U += ((REPULSION_STRENGTH * np.exp((((- 1.0) * (h - A)) / DEBYE_LENGTH))) / (h - A))
return np.exp((((- 1.0) * U) / KT)) |
class LineEnd(_PositionToken):
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace('\n', ''))
self.errmsg = 'Expected end of line'
def parseImpl(self, instring, loc, doActions=True):
if (loc < len(instring)):
if (instring[loc] == '\n'):
return ((loc + 1), '\n')
else:
raise ParseException(instring, loc, self.errmsg, self)
elif (loc == len(instring)):
return ((loc + 1), [])
else:
raise ParseException(instring, loc, self.errmsg, self) |
def model_setup(model):
assert (len(config.resume_from) > 0)
assert os.path.isdir(config.resume_from)
model.checkpoint_manager = CheckpointManager(model, [])
model.output_dir = config.resume_from
model.last_step = model.checkpoint_manager.load_last_checkpoint()
assert (model.last_step > 0)
return model |
class PiecewiseLinearChannel(Channel):
def __init__(self, name, regions):
self.repr_init()
self.name = name
self.regions = [LinearRegion(**region) for region in regions]
self.n_regions = len(regions)
def sample(self, Z):
X = sum((region.sample(Z) for region in self.regions))
return X
def math(self):
return (('$\\textrm{' + self.name) + '}$')
def second_moment(self, tau_z):
taus = [region.second_moment(tau_z) for region in self.regions]
ps = [region.proba_tau(tau_z) for region in self.regions]
tau_x = sum(((p * tau) for (p, tau) in zip(ps, taus)))
return tau_x
def merge_estimates(self, rs, vs, As):
ps = softmax(As, axis=0)
r = sum(((p * r) for (p, r) in zip(ps, rs)))
Dr = sum((((ps[i] * ps[j]) * ((rs[i] - rs[j]) ** 2)) for i in range(self.n_regions) for j in range((i + 1), self.n_regions)))
v = (sum(((p * v) for (p, v) in zip(ps, vs))) + Dr)
v = v.mean()
return (r, v)
def compute_forward_posterior(self, az, bz, ax, bx):
rs = [region.forward_mean(az, bz, ax, bx) for region in self.regions]
vs = [region.forward_variance(az, bz, ax, bx) for region in self.regions]
As = [region.log_partitions(az, bz, ax, bx) for region in self.regions]
(r, v) = self.merge_estimates(rs, vs, As)
return (r, v)
def compute_backward_posterior(self, az, bz, ax, bx):
rs = [region.backward_mean(az, bz, ax, bx) for region in self.regions]
vs = [region.backward_variance(az, bz, ax, bx) for region in self.regions]
As = [region.log_partitions(az, bz, ax, bx) for region in self.regions]
(r, v) = self.merge_estimates(rs, vs, As)
return (r, v)
def compute_log_partition(self, az, bz, ax, bx):
As = [region.log_partitions(az, bz, ax, bx) for region in self.regions]
A = logsumexp(As, axis=0)
return A.sum()
def beliefs_measure(self, az, ax, tau_z, f):
mu = sum((region.beliefs_measure(az, ax, tau_z, f) for region in self.regions))
return mu
def measure(self, f, zmin, zmax):
assert (zmin < zmax)
mu = sum((region.measure(f, zmin, zmax) for region in self.regions))
return mu |
class TestSameColToken(DistanceTokenTester):
def test_execute(self, env, fields, dom, dom_elem):
included = [(101, 5), (99, 5), (100, 0), (105, 0), (99, 1), (80, 40), (101, 1)]
for (left, width) in included:
new_dom = copy.deepcopy(dom)
new_dom['children'][1]['left'] = left
new_dom['children'][1]['width'] = width
new_state = MiniWoBState('utt', None, new_dom)
same_col = SameColToken(MockReturnElementSet(new_state.dom.children[0]))
dom_elem = env.buttons[0]
env.set_last(dom_elem)
env.observe(new_state)
result = same_col.execute(env)
assert isinstance(result, same_col.return_type)
assert isinstance(result, ElementSet)
assert (len(result) == 1)
assert (set([result[0].ref]) == set([2]))
excluded = [(99, 0), (98, 1), (79, 20), (106, 0), (106, 100)]
for (left, width) in excluded:
new_dom = copy.deepcopy(dom)
new_dom['children'][1]['left'] = left
new_dom['children'][1]['width'] = width
new_state = MiniWoBState('utt', None, new_dom)
same_col = SameColToken(MockReturnElementSet(new_state.dom.children[0]))
dom_elem = env.buttons[0]
env.set_last(dom_elem)
env.observe(new_state)
result = same_col.execute(env)
assert isinstance(result, same_col.return_type)
assert isinstance(result, ElementSet)
assert (len(result) == 0)
new_dom = copy.deepcopy(dom)
new_dom['children'][1]['left'] = 103
new_dom['children'][1]['width'] = 10
new_dom['children'][2]['left'] = 99
new_dom['children'][2]['width'] = 10
new_state = MiniWoBState('utt', None, new_dom)
same_col = SameColToken(MockReturnElementSet(new_state.dom.children[0]))
dom_elem = env.buttons[0]
env.set_last(dom_elem)
env.observe(new_state)
result = same_col.execute(env)
assert isinstance(result, same_col.return_type)
assert isinstance(result, ElementSet)
assert (len(result) == 2) |
def combine_examples(corpus_ex):
combined_ex = [corpus_ex[0]]
for ex in corpus_ex[1:]:
if (ex.sent_num == combined_ex[(- 1)].sent_num):
current_sent = combined_ex.pop()
target_frame_dict = current_sent.targetframedict.copy()
target_frame_dict.update(ex.targetframedict)
current_sent.targetframedict = target_frame_dict
combined_ex.append(current_sent)
continue
combined_ex.append(ex)
sys.stderr.write('Combined {} instances in data into {} instances.\n'.format(len(corpus_ex), len(combined_ex)))
return combined_ex |
def calculate_theta_fwhm_cdr_s1(ekev, qnC):
theta_fwhm = (((17.2 - (6.4 * np.sqrt(qnC))) * 1e-06) / (ekev ** 0.85))
return theta_fwhm |
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if (get_image_backend() == 'accimage'):
return accimage_loader(path)
else:
return pil_loader(path) |
class EigenSparseMatrixPrinter():
def __init__(self, val):
type = val.type
if (type.code == gdb.TYPE_CODE_REF):
type = type.target()
self.type = type.unqualified().strip_typedefs()
tag = self.type.tag
regex = re.compile('\\<.*\\>')
m = regex.findall(tag)[0][1:(- 1)]
template_params = m.split(',')
template_params = [x.replace(' ', '') for x in template_params]
self.options = 0
if (len(template_params) > 1):
self.options = template_params[1]
self.rowMajor = (int(self.options) & 1)
self.innerType = self.type.template_argument(0)
self.val = val
self.data = self.val['m_data']
self.data = self.data.cast(self.innerType.pointer())
class _iterator(_MatrixEntryIterator):
def __init__(self, rows, cols, val, rowMajor):
super(EigenSparseMatrixPrinter._iterator, self).__init__(rows, cols, rowMajor)
self.val = val
def __next__(self):
(row, col) = super(EigenSparseMatrixPrinter._iterator, self).__next__()
outer = (row if self.rowMajor else col)
inner = (col if self.rowMajor else row)
start = self.val['m_outerIndex'][outer]
end = ((start + self.val['m_innerNonZeros'][outer]) if self.val['m_innerNonZeros'] else self.val['m_outerIndex'][(outer + 1)])
data = self.val['m_data']
if (start >= end):
item = 0
elif ((end > start) and (inner == data['m_indices'][(end - 1)])):
item = data['m_values'][(end - 1)]
else:
indices = [data['m_indices'][x] for x in range(int(start), (int(end) - 1))]
idx = (int(start) + bisect_left(indices, inner))
if ((idx < end) and (data['m_indices'][idx] == inner)):
item = data['m_values'][idx]
else:
item = 0
return (('[%d,%d]' % (row, col)), item)
def children(self):
if self.data:
return self._iterator(self.rows(), self.cols(), self.val, self.rowMajor)
return iter([])
def rows(self):
return (self.val['m_outerSize'] if self.rowMajor else self.val['m_innerSize'])
def cols(self):
return (self.val['m_innerSize'] if self.rowMajor else self.val['m_outerSize'])
def to_string(self):
if self.data:
status = ('not compressed' if self.val['m_innerNonZeros'] else 'compressed')
else:
status = 'empty'
dimensions = ('%d x %d' % (self.rows(), self.cols()))
layout = ('row' if self.rowMajor else 'column')
return ('Eigen::SparseMatrix<%s>, %s, %s major, %s' % (self.innerType, dimensions, layout, status)) |
class DatasetFolder(data.Dataset):
def __init__(self, root, list_path, transform=None, target_transform=None, patch_dataset=False):
self.root = root
self.patch_dataset = patch_dataset
if patch_dataset:
self.txn = []
for path in os.listdir(root):
lmdb_path = os.path.join(root, path)
if os.path.isdir(lmdb_path):
env = lmdb.open(lmdb_path, readonly=True, lock=False, readahead=False, meminit=False)
txn = env.begin(write=False)
self.txn.append(txn)
else:
self.env = lmdb.open(root, readonly=True, lock=False, readahead=False, meminit=False)
self.txn = self.env.begin(write=False)
self.list_path = list_path
self.samples = [image_name.strip() for image_name in open(list_path)]
if (len(self.samples) == 0):
raise RuntimeError((('Found 0 files in subfolders of: ' + root) + '\n'))
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_name = self.samples[index]
if self.patch_dataset:
txn_index = (index // (len(self.samples) // 10))
if (txn_index == 10):
txn_index = 9
txn = self.txn[txn_index]
else:
txn = self.txn
datum = Datum()
data_bin = txn.get(img_name.encode('ascii'))
if (data_bin is None):
raise RuntimeError(f'Key {img_name} not found')
datum.ParseFromString(data_bin)
sample = Image.fromarray(cv2.cvtColor(datum.image, cv2.COLOR_BGR2RGB))
target = np.int(datum.label)
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
return (sample, target)
def __len__(self):
return len(self.samples)
def __repr__(self):
fmt_str = (('Dataset ' + self.__class__.__name__) + '\n')
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', ('\n' + (' ' * len(tmp)))))
return fmt_str |
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--filename', type=str, required=True, help='Local filename of data')
parser.add_argument('--frac-to-take', type=float, required=True, help='How much of the data to store in the new filename')
parser.add_argument('--new-filename', type=str, required=True, help='New filename for the data')
parser.add_argument('--data-dir', required=False, default='/homes/gws/sofias6/data/', help='Base data dir')
args = parser.parse_args()
instances = []
with open((args.data_dir + args.filename), 'r') as old_f:
first_line = old_f.readline()
for line in old_f:
if (line.strip() != ''):
instances.append(line)
shuffle(instances)
took = 0
with open((args.data_dir + args.new_filename), 'w') as f:
f.write(first_line)
for instance in instances:
decider = random()
if (decider < args.frac_to_take):
f.write(instance)
took += 1
print(((((('Wrote ' + str(took)) + ' / ') + str(len(instances))) + ' instances to file ') + str((args.data_dir + args.new_filename)))) |
_to_string
class StrictUndefined(Undefined):
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error |
class EnumerateDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, self.dataset[idx]) |
def conv_3_3_hook(module, input, output):
global vgg_conv3_3
vgg_conv3_3 = output
return None |
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
def setUpClass(cls):
if (cls is TestFairseqEncoderModelBase):
raise unittest.SkipTest('Skipping test case in base')
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(issubclass(model_cls, FairseqEncoderModel), msg='This class is only used for testing FairseqEncoderModel')
(task, parser) = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if (extra_args_setters is not None):
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = (get_dummy_input() if (input is None) else input)
self.forward_input.pop('prev_output_tokens', None)
def setUp(self):
super().setUp()
def test_forward(self):
if (self.forward_input and self.model):
bsz = self.forward_input['src_tokens'].size(0)
forward_output = self.model.forward(**self.forward_input)
(succ, msg) = check_encoder_output(forward_output, batch_size=bsz)
if (not succ):
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if (self.model and self.forward_input):
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
self.assertTrue(hasattr(logprob, 'batch_first'))
self.assertTrue(hasattr(prob, 'batch_first'))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob)) |
def _c_string_literal(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\n', '\\n')
return '"{}"'.format(s) |
def test_save_setup_anndata(adata, save_path):
generic_setup_adata_manager(adata, batch_key='batch', labels_key='labels', protein_expression_obsm_key='protein_expression', protein_names_uns_key='protein_names')
adata.write(os.path.join(save_path, 'test.h5ad')) |
class AverageMeter(object):
def __init__(self):
self.reset()
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count) |
.usefixtures('spark')
def interactions_timestamp_spark_dataset(spark):
events = spark.createDataFrame(pd.DataFrame({'user_id': [0, 0, 1, 1, 1, 2], 'item_id': [0, 1, 0, 2, 3, 1], 'timestamp': [0, 1, 2, 3, 4, 5]}))
return {'interactions': events, 'user_col': 'user_id', 'item_col': 'item_id', 'timestamp_col': 'timestamp', 'users_cardinality': 3, 'items_cardinality': 4} |
class STGClassificationModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, nr_classes, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, nr_classes, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.FeatureSelector = FeatureSelector(input_dim, sigma, device)
self.softmax = nn.Softmax()
self.loss = nn.CrossEntropyLoss()
self.reg = self.FeatureSelector.regularizer
self.lam = lam
self.mu = self.FeatureSelector.mu
self.sigma = self.FeatureSelector.sigma
def forward(self, feed_dict):
x = self.FeatureSelector(self._get_input(feed_dict))
logits = super().forward(x)
if self.training:
loss = self.loss(logits, self._get_label(feed_dict))
reg = torch.mean(self.reg(((self.mu + 0.5) / self.sigma)))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(logits)
def _compose_output(self, logits):
value = self.softmax(logits)
(_, pred) = value.max(dim=1)
return dict(prob=value, pred=pred, logits=logits) |
((not workspace.C.has_mkldnn), 'Skipping as we do not have mkldnn.')
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
X = (np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5)
W = (np.random.rand(4096, 9216).astype(np.float32) - 0.5)
b = (np.random.rand(4096).astype(np.float32) - 0.5)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
workspace.FeedBlob('X', X)
workspace.FeedBlob('W', W)
workspace.FeedBlob('b', b)
workspace.FeedBlob('X_mkl', X, device_option=mkl_do)
workspace.FeedBlob('W_mkl', W, device_option=mkl_do)
workspace.FeedBlob('b_mkl', b, device_option=mkl_do)
net = core.Net('test')
net.FC(['X', 'W', 'b'], 'Y')
net.FC(['X_mkl', 'W_mkl', 'b_mkl'], 'Y_mkl', device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
np.testing.assert_allclose(workspace.FetchBlob('Y'), workspace.FetchBlob('Y_mkl'), atol=0.01, rtol=0.01)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print('FC CPU runtime {}, MKL runtime {}.'.format(runtime[1], runtime[2]))
def testConvReluMaxPoolFcSpeed(self):
X = (np.random.rand(1, 256, 13, 13).astype(np.float32) - 0.5)
W = (np.random.rand(256, 256, 3, 3).astype(np.float32) - 0.5)
b = (np.random.rand(256).astype(np.float32) - 0.5)
w_fc = (np.random.rand(4096, 9216).astype(np.float32) - 0.5)
b_fc = (np.random.rand(4096).astype(np.float32) - 0.5)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
workspace.FeedBlob('X', X)
workspace.FeedBlob('W', W)
workspace.FeedBlob('b', b)
workspace.FeedBlob('w_fc', w_fc)
workspace.FeedBlob('b_fc', b_fc)
workspace.FeedBlob('X_mkl', X, device_option=mkl_do)
workspace.FeedBlob('W_mkl', W, device_option=mkl_do)
workspace.FeedBlob('b_mkl', b, device_option=mkl_do)
workspace.FeedBlob('w_fc_mkl', w_fc, device_option=mkl_do)
workspace.FeedBlob('b_fc_mkl', b_fc, device_option=mkl_do)
net = core.Net('test')
net.Conv(['X', 'W', 'b'], 'C', pad=1, stride=1, kernel=3)
net.Relu('C', 'R')
net.MaxPool('R', 'P', stride=2, kernel=3)
net.FC(['P', 'w_fc', 'b_fc'], 'Y')
net.Conv(['X_mkl', 'W_mkl', 'b_mkl'], 'C_mkl', pad=1, stride=1, kernel=3, device_option=mkl_do)
net.Relu('C_mkl', 'R_mkl', device_option=mkl_do)
net.MaxPool('R_mkl', 'P_mkl', stride=2, kernel=3, device_option=mkl_do)
net.FC(['P_mkl', 'w_fc_mkl', 'b_fc_mkl'], 'Y_mkl', device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
np.testing.assert_allclose(workspace.FetchBlob('Y'), workspace.FetchBlob('Y_mkl'), atol=0.01, rtol=0.01)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) |
def _BroadcastComputedParamsSingleHost(devices, model, use_nccl=False):
if (len(devices) == 1):
return
for param_name in model._computed_param_names:
_Broadcast(devices, model, model.net, param_name, use_nccl) |
def f(questions, start):
outs = []
for q in questions:
(question, context) = q.split('[SEP]')
d = pmodel.tokenizer(question, context)
out = pmodel.model.forward(**{k: torch.tensor(d[k]).reshape(1, (- 1)) for k in d})
logits = (out.start_logits if start else out.end_logits)
outs.append(logits.reshape((- 1)).detach().numpy())
return outs |
class LinearScheduler():
def __init__(self, initial_value, final_step, name):
self.final_step = final_step
self.initial_value = initial_value
self.variable = tf.Variable(initial_value, name=name)
self.decayed_ph = tf.placeholder(tf.float32)
self.decay_op = self.variable.assign(self.decayed_ph)
def decay(self, step):
decay = (1.0 - (float(step) / self.final_step))
if (decay < 0.0):
decay = 0.0
feed_dict = {self.decayed_ph: (decay * self.initial_value)}
tf.get_default_session().run(self.decay_op, feed_dict=feed_dict)
def get_variable(self):
return self.variable |
class ClassAwareSampler(Sampler):
def __init__(self, data_source, num_samples_cls=4):
num_classes = data_source.num_classes
self.class_iter = RandomCycleIter(range(num_classes))
cls_data_list = [list() for _ in range(num_classes)]
for (i, label) in enumerate(data_source.labels):
cls_data_list[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in cls_data_list]
self.num_samples = (max([len(x) for x in cls_data_list]) * len(cls_data_list))
self.num_samples_cls = num_samples_cls
def __iter__(self):
return class_aware_sample_generator(self.class_iter, self.data_iter_list, self.num_samples, self.num_samples_cls)
def __len__(self):
return self.num_samples |
def test_siblings_get_binary_examples_2d_1(digraph, features_2d, labels):
policy = SiblingsPolicy(digraph, features_2d, labels)
ground_truth_x = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]]
ground_truth_y = [1, 1, 0, 0, 0, 0, 0, 0]
(x, y, weights) = policy.get_binary_examples('1')
assert_array_equal(ground_truth_x, x)
assert_array_equal(ground_truth_y, y)
assert (weights is None) |
def convert_code_to_features(code, tokenizer, label, args):
code = ' '.join(code.split())
code_tokens = tokenizer.tokenize(code)[:(args.block_size - 2)]
source_tokens = (([tokenizer.cls_token] + code_tokens) + [tokenizer.sep_token])
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
padding_length = (args.block_size - len(source_ids))
source_ids += ([tokenizer.pad_token_id] * padding_length)
return InputFeatures(source_tokens, source_ids, 0, label) |
def get_config() -> ml_collections.ConfigDict:
config = ml_collections.ConfigDict()
config.object_category = 'Airplane'
config.in_memory = True
config.batch_size = 32
config.num_points = 1024
config.val_split = 0.2
config.initial_lr = 0.001
config.drop_every = 20
config.decay_factor = 0.5
config.epochs = 100
config.use_mp = True
config.use_tpus = False
config.metadata_url = '
config.artifact_location = 'gs://pointnet-segmentation'
config.samples_per_shard = 512
config.jitter_minval = (- 0.005)
config.jitter_maxval = 0.005
return config |
class SeparableConv2d_same(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, bias=False, padding=0):
super(SeparableConv2d_same, self).__init__()
self.depthwise = nn.Conv2d(inplanes, inplanes, kernel_size, stride, padding, dilation, groups=inplanes, bias=bias)
self.depthwise_bn = nn.BatchNorm2d(inplanes)
self.pointwise = nn.Conv2d(inplanes, planes, 1, 1, 0, 1, 1, bias=bias)
self.pointwise_bn = nn.BatchNorm2d(planes)
def forward(self, x):
x = fixed_padding(x, self.depthwise.kernel_size[0], rate=self.depthwise.dilation[0])
x = self.depthwise(x)
x = self.depthwise_bn(x)
x = self.pointwise(x)
x = self.pointwise_bn(x)
return x |
def move_and_detach(ts, device):
def f(t):
if isinstance(t, torch.Tensor):
return t.detach().to(device)
return t
return nested_map(f, ts) |
class TIntIntHI(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TIntIntHI_swiginit(self, _snap.new_TIntIntHI(*args))
def __eq__(self, HashKeyDatI):
return _snap.TIntIntHI___eq__(self, HashKeyDatI)
def __lt__(self, HashKeyDatI):
return _snap.TIntIntHI___lt__(self, HashKeyDatI)
def __ref__(self):
return _snap.TIntIntHI___ref__(self)
def __call__(self):
return _snap.TIntIntHI___call__(self)
def __deref__(self):
return _snap.TIntIntHI___deref__(self)
def Next(self):
return _snap.TIntIntHI_Next(self)
def IsEmpty(self):
return _snap.TIntIntHI_IsEmpty(self)
def IsEnd(self):
return _snap.TIntIntHI_IsEnd(self)
def GetKey(self):
return _snap.TIntIntHI_GetKey(self)
def GetDat(self, *args):
return _snap.TIntIntHI_GetDat(self, *args)
__swig_destroy__ = _snap.delete_TIntIntHI |
.parametrize('parameter, message', (('userId', 'No such parameter in `GET /users/{user_id}`: `userId`. Did you mean `user_id`?'), ('what?', 'No such parameter in `GET /users/{user_id}`: `what?`.')))
.operations('create_user', 'get_user', 'update_user')
def test_misspelled_parameter(schema_url, parameter, message):
schema = schemathesis.from_uri(schema_url)
add_link(schema, '#/paths/~1users~1{user_id}/get', parameters={f'header.{parameter}': '$response.body#/id'})
case = schema['/users/{user_id}']['GET'].make_case()
link = schema['/users/']['POST'].links['201']['#/paths/~1users~1{user_id}/get']
with pytest.raises(ValueError, match=re.escape(message)):
link.set_data(case, elapsed=1.0, context=expressions.ExpressionContext(case=case, response=None)) |
def register_Ns3RlcTag_methods(root_module, cls):
cls.add_constructor([param('ns3::RlcTag const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Time', 'senderTimestamp')])
cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSenderTimestamp', 'ns3::Time', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_const=True, is_virtual=True)
cls.add_method('SetSenderTimestamp', 'void', [param('ns3::Time', 'senderTimestamp')])
return |
def test_adjoint_conjugate():
X = np.array([[1j]])
A = interface.aslinearoperator(X)
B = (1j * A)
Y = (1j * X)
v = np.array([1])
assert_equal(B.dot(v), Y.dot(v))
assert_equal(B.H.dot(v), Y.T.conj().dot(v)) |
def test_static_cls():
instance = m.TestProperties()
assert (m.TestProperties.static_cls is m.TestProperties)
assert (instance.static_cls is m.TestProperties)
def check_self(self):
assert (self is m.TestProperties)
m.TestProperties.static_cls = check_self
instance.static_cls = check_self |
def main():
args = get_args()
(trainloader, devloader, testloader, n_classes, n_words) = get_data_loaders(args.data_path, args.task, args.language, args.representation, args.pca_size, args.batch_size)
print(('Language: %s Train size: %d Dev size: %d Test size: %d' % (args.language, len(trainloader.dataset), len(devloader.dataset), len(testloader.dataset))))
print(args)
model = get_model(n_classes, n_words, args)
train(trainloader, devloader, model, args.eval_batches, args.wait_iterations)
(train_results, dev_results, test_results) = eval_all(model, trainloader, devloader, testloader)
save_checkpoints(model, train_results, dev_results, test_results, args.save_path) |
class CategoricalColumnTransformer(BaseColumnTransformer):
def num_classes(self):
raise NotImplementedError() |
def save_kernels(arch):
m = ti.aot.Module()
m.add_kernel(fill_img, template_args={})
m.add_kernel(block1_deactivate_all, template_args={})
m.add_kernel(activate, template_args={})
m.add_kernel(paint, template_args={})
m.add_kernel(check_img_value, template_args={})
m.add_field('x', x)
m.add_field('img', img)
assert ('TAICHI_AOT_FOLDER_PATH' in os.environ.keys())
dir_name = str(os.environ['TAICHI_AOT_FOLDER_PATH'])
m.save(dir_name) |
class GeneralMulAttConvLayer(MessagePassing):
def __init__(self, in_channels, out_channels, improved=False, cached=False, bias=True, **kwargs):
super(GeneralMulAttConvLayer, self).__init__(aggr=cfg.gnn.agg, **kwargs)
self.heads = cfg.gnn.att_heads
self.in_channels = int(((in_channels // self.heads) * self.heads))
self.out_channels = int(((out_channels // self.heads) * self.heads))
self.improved = improved
self.cached = cached
self.normalize = cfg.gnn.normalize_adj
self.negative_slope = 0.2
self.head_channels = (out_channels // self.heads)
self.scaling = (self.head_channels ** (- 0.5))
self.linear_msg = nn.Linear(in_channels, out_channels, bias=False)
self.bias_att = Parameter(torch.Tensor(out_channels))
self.scaler = torch.sqrt(torch.tensor(out_channels, dtype=torch.float))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
zeros(self.bias)
zeros(self.bias_att)
self.cached_result = None
self.cached_num_edges = None
def norm(edge_index, num_nodes, edge_weight=None, improved=False, dtype=None):
if (edge_weight is None):
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype, device=edge_index.device)
fill_value = (1 if (not improved) else 2)
(edge_index, edge_weight) = add_remaining_self_loops(edge_index, edge_weight, fill_value, num_nodes)
(row, col) = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow((- 0.5))
deg_inv_sqrt[(deg_inv_sqrt == float('inf'))] = 0
return (edge_index, ((deg_inv_sqrt[row] * edge_weight) * deg_inv_sqrt[col]))
def forward(self, x, edge_index, edge_weight=None):
if (self.cached and (self.cached_result is not None)):
if (edge_index.size(1) != self.cached_num_edges):
raise RuntimeError('Cached {} number of edges, but found {}. Please disable the caching behavior of this layer by removing the `cached=True` argument in its constructor.'.format(self.cached_num_edges, edge_index.size(1)))
if ((not self.cached) or (self.cached_result is None)):
self.cached_num_edges = edge_index.size(1)
if self.normalize:
(edge_index, norm) = self.norm(edge_index, x.size(self.node_dim), edge_weight, self.improved, x.dtype)
else:
norm = edge_weight
self.cached_result = (edge_index, norm)
(edge_index, norm) = self.cached_result
x = self.linear_msg(x)
return self.propagate(edge_index, x=x, norm=norm)
def message(self, edge_index_i, x_i, x_j, norm, size_i):
x_i = x_i.view((- 1), self.heads, self.head_channels)
x_j = x_j.view((- 1), self.heads, self.head_channels)
alpha = (((x_i * x_j) + self.bias_att).sum(dim=(- 1)) / self.scaler)
alpha = softmax(alpha, edge_index_i, num_nodes=size_i)
alpha = alpha.view((- 1), self.heads, 1)
return (((norm.view((- 1), 1) * x_j) * alpha) if (norm is not None) else (x_j * alpha))
def update(self, aggr_out):
aggr_out = aggr_out.view((- 1), self.out_channels)
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels, self.heads) |
def send_encrypted(channel, message):
cipher = DES.new('')
encrypted_message = cipher.encrypt(message)
channel.send(encrypted_message)
return encrypted_message |
def add_activation_summary(var):
tf.histogram_summary((var.op.name + '/activation'), var)
tf.scalar_summary((var.op.name + '/sparsity'), tf.nn.zero_fraction(var)) |
class TerminalDef(Serialize):
__serialize_fields__ = ('name', 'pattern', 'priority')
__serialize_namespace__ = (PatternStr, PatternRE)
def __init__(self, name, pattern, priority=1):
assert isinstance(pattern, Pattern), pattern
self.name = name
self.pattern = pattern
self.priority = priority
def __repr__(self):
return ('%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)) |
def load_dict(filename_):
with open(filename_, 'rb') as f:
ret_dict = pickle.load(f)
return ret_dict |
class BLEUScorer(object):
def __init__(self):
pass
def score(self, hypothesis, corpus, n=1):
count = [0, 0, 0, 0]
clip_count = [0, 0, 0, 0]
r = 0
c = 0
weights = [0.25, 0.25, 0.25, 0.25]
for (hyps, refs) in zip(hypothesis, corpus):
if (type(hyps[0]) is list):
hyps = [hyp.split() for hyp in hyps[0]]
else:
hyps = [hyp.split() for hyp in hyps]
refs = [ref.split() for ref in refs]
refs[0] = (([u'GO_'] + refs[0]) + [u'EOS_'])
hyps[0] = (([u'GO_'] + hyps[0]) + [u'EOS_'])
for (idx, hyp) in enumerate(hyps):
for i in range(4):
hypcnts = Counter(ngrams(hyp, (i + 1)))
cnt = sum(hypcnts.values())
count[i] += cnt
max_counts = {}
for ref in refs:
refcnts = Counter(ngrams(ref, (i + 1)))
for ng in hypcnts:
max_counts[ng] = max(max_counts.get(ng, 0), refcnts[ng])
clipcnt = dict(((ng, min(count, max_counts[ng])) for (ng, count) in hypcnts.items()))
clip_count[i] += sum(clipcnt.values())
bestmatch = [1000, 1000]
for ref in refs:
if (bestmatch[0] == 0):
break
diff = abs((len(ref) - len(hyp)))
if (diff < bestmatch[0]):
bestmatch[0] = diff
bestmatch[1] = len(ref)
r += bestmatch[1]
c += len(hyp)
if (n == 1):
break
p0 = 1e-07
bp = (1 if (c > r) else math.exp((1 - (float(r) / float(c)))))
p_ns = [((float(clip_count[i]) / float((count[i] + p0))) + p0) for i in range(4)]
s = math.fsum(((w * math.log(p_n)) for (w, p_n) in zip(weights, p_ns) if p_n))
bleu = (bp * math.exp(s))
return bleu |
(derivate=True, coderize=True)
_loss
def custom_gaussian_focal_loss(pred, gaussian_target, pos_inds=None, alpha: float=(- 1), beta: float=4, gamma: float=2, sigmoid_clamp: float=0.0001, ignore_high_fp: float=(- 1.0)):
pred = torch.clamp(pred.sigmoid_(), min=sigmoid_clamp, max=(1 - sigmoid_clamp))
neg_weights = torch.pow((1 - gaussian_target), beta)
pos_pred = pred[pos_inds]
pos_loss = (torch.log(pos_pred) * torch.pow((1 - pos_pred), gamma))
neg_loss = ((torch.log((1 - pred)) * torch.pow(pred, gamma)) * neg_weights)
if (ignore_high_fp > 0):
not_high_fp = (pred < ignore_high_fp).float()
neg_loss = (not_high_fp * neg_loss)
pos_loss = (- pos_loss.sum())
neg_loss = (- neg_loss.sum())
if (alpha >= 0):
pos_loss = (alpha * pos_loss)
neg_loss = ((1 - alpha) * neg_loss)
return (pos_loss + neg_loss) |
def test_err(capfd):
msg = 'Something that should not show up in log'
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
(stdout, stderr) = capfd.readouterr()
assert (stdout == '')
assert (stderr == msg)
assert (stream.getvalue() == '')
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
(stdout, stderr) = capfd.readouterr()
assert (stdout == '')
assert (stderr == '')
assert (stream.getvalue() == msg) |
def global_tempdir_manager():
global _tempdir_manager
with ExitStack() as stack:
(old_tempdir_manager, _tempdir_manager) = (_tempdir_manager, stack)
try:
(yield)
finally:
_tempdir_manager = old_tempdir_manager |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.