code stringlengths 101 5.91M |
|---|
def _shuffle_and_restrict(examples: List[InputExample], num_examples: int, seed: int=42) -> List[InputExample]:
if (0 < num_examples < len(examples)):
random.Random(seed).shuffle(examples)
examples = examples[:num_examples]
return examples |
class DetectionEvaluator(object):
def __init__(self):
pass
def eval(self, pred, golden):
total_mentions = 0.0
pred_error = 0.0
pred_correct = 0.0
for sent_id in golden:
total_mentions += len(golden[sent_id])
if (not (sent_id in pred)):
continue
hits = []
for m in golden[sent_id]:
hits.append([m, False])
for (_, _, tp, t_id) in pred[sent_id]:
find_match = False
for m in hits:
b_id = m[0][0]
e_id = m[0][1]
m_tp = m[0][2]
if ((t_id >= b_id) and (t_id < e_id) and (tp == m_tp)):
if (not m[1]):
m[1] = True
find_match = True
break
find_match = True
if (not find_match):
pred_error += 1
for hit in hits:
if hit[1]:
pred_correct += 1
if (pred_correct == 0):
return (0.0, 0.0, 0.0)
precision = (pred_correct / (pred_correct + pred_error))
recall = (pred_correct / total_mentions)
F1 = ((2 * (precision * recall)) / (precision + recall))
return (precision, recall, F1) |
def stop_afl(cargs):
rv = True
if (not cargs.afl_fuzzing_dir):
print('[*] Must set --afl-fuzzing-dir')
return False
if (not is_dir(cargs.afl_fuzzing_dir)):
print(("[*] Doesn't look like AFL fuzzing directory '%s' exists." % cargs.afl_fuzzing_dir))
return False
if os.path.exists((cargs.afl_fuzzing_dir + '/fuzzer_stats')):
afl_pid = get_running_pid((cargs.afl_fuzzing_dir + '/fuzzer_stats'), 'fuzzer_pid\\s+\\:\\s+(\\d+)')
if afl_pid:
print(('[+] Stopping running afl-fuzz instance, PID: %d' % afl_pid))
os.kill(afl_pid, signal.SIGTERM)
else:
print('[-] No running afl-fuzz instance')
rv = False
else:
found = False
for p in os.listdir(cargs.afl_fuzzing_dir):
stats_file = (((cargs.afl_fuzzing_dir + '/') + p) + '/fuzzer_stats')
if os.path.exists(stats_file):
afl_pid = get_running_pid(stats_file, 'fuzzer_pid\\s+\\:\\s+(\\d+)')
if afl_pid:
print(('[+] Stopping running afl-fuzz instance, PID: %d' % afl_pid))
os.kill(afl_pid, signal.SIGTERM)
found = True
if (not found):
print('[-] No running afl-fuzz instance')
rv = False
return rv |
def Convolutional_Block(inputs, shortcut, num_filters, name, is_training):
with tf.variable_scope(((('conv_block_' + str(num_filters)) + '_') + name)):
for i in range(2):
with tf.variable_scope(('conv1d_%s' % str(i))):
filter_shape = [3, inputs.get_shape()[2], num_filters]
W = tf.get_variable(name='W', shape=filter_shape, initializer=he_normal, regularizer=regularizer)
inputs = tf.nn.conv1d(inputs, W, stride=1, padding='SAME')
inputs = tf.nn.relu(inputs)
if (shortcut is not None):
return (inputs + shortcut)
return inputs |
class EisensteinSubmodule_g1_Q(EisensteinSubmodule_gH_Q):
def _parameters_character(self):
return self.level() |
class MultiAgentActionSpace(list):
def __init__(self, ma_space):
for x in ma_space:
assert isinstance(x, gym.spaces.space.Space)
super(MultiAgentActionSpace, self).__init__(ma_space)
def sample(self):
return [sa_space.sample() for sa_space in self] |
class DebertaForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def update_learning_rate(optimizer, new_lr, param_group=None):
if (param_group is None):
groups = range(len(optimizer.param_groups))
else:
groups = param_group
for i in groups:
old_lr = optimizer.param_groups[i]['lr']
if (new_lr != old_lr):
optimizer.param_groups[i]['lr'] = new_lr
optimizer.param_groups[i]['prev_lr'] = old_lr
logger.info(('Changing lr from %.2g to %.2g' % (old_lr, new_lr))) |
def main():
args = parse_args()
if (args is None):
exit()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
gan = GDWCT(sess, args)
gan.build_model()
show_all_variables()
if (args.phase == 'train'):
gan.train()
print(' [*] Training finished!')
if (args.phase == 'test'):
gan.style_guide_test()
print(' [*] Guide test finished!') |
def export_2d_annotation(root_path, info_path, version):
warning.warn('DeprecationWarning: 2D annotations are not used on the Lyft dataset. The function export_2d_annotation will be deprecated.')
camera_types = ['CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_FRONT_LEFT', 'CAM_BACK', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT']
lyft_infos = mmcv.load(info_path)['infos']
lyft = Lyft(data_path=osp.join(root_path, version), json_path=osp.join(root_path, version, version), verbose=True)
cat2Ids = [dict(id=lyft_categories.index(cat_name), name=cat_name) for cat_name in lyft_categories]
coco_ann_id = 0
coco_2d_dict = dict(annotations=[], images=[], categories=cat2Ids)
for info in mmcv.track_iter_progress(lyft_infos):
for cam in camera_types:
cam_info = info['cams'][cam]
coco_infos = get_2d_boxes(lyft, cam_info['sample_data_token'], visibilities=['', '1', '2', '3', '4'])
(height, width, _) = mmcv.imread(cam_info['data_path']).shape
coco_2d_dict['images'].append(dict(file_name=cam_info['data_path'], id=cam_info['sample_data_token'], width=width, height=height))
for coco_info in coco_infos:
if (coco_info is None):
continue
coco_info['segmentation'] = []
coco_info['id'] = coco_ann_id
coco_2d_dict['annotations'].append(coco_info)
coco_ann_id += 1
mmcv.dump(coco_2d_dict, f'{info_path[:(- 4)]}.coco.json') |
def write_metric(node_name, task_name, metric_name, metric, round_number):
get_writer()
writer.add_scalar(f'{node_name}/{task_name}/{metric_name}', metric, round_number) |
def CuspForms(group=1, weight=2, base_ring=None, use_cache=True, prec=defaults.DEFAULT_PRECISION):
return ModularForms(group, weight, base_ring, use_cache=use_cache, prec=prec).cuspidal_submodule() |
class LogCaptureHandler(logging.Handler):
def __init__(self, log_capture):
self.records = log_capture.records
logging.Handler.__init__(self)
def emit(self, record):
self.records.append(record) |
class StateOKDataset(PretrainDataset):
def __init__(self, epoch: int, index_path: Path, img_transform: Compose, lang_dropout: Optional[float]=None, stream: bool=False, prefix: Optional[Path]=None, no_lang: bool=False, is_val: bool=False, do_retry: bool=True, n_retries: int=3) -> None:
super().__init__()
(self.index_path, self.stream, self.is_val, self.val_loaded) = (index_path, stream, is_val, False)
(self.epoch, self.transform, self.elements, self.prefix) = (epoch, img_transform, None, prefix)
(self.no_lang, self.lang_dropout) = (no_lang, (0.0 if ((lang_dropout is None) or (lang_dropout == 0)) else lang_dropout))
self.dropout_indices = set()
self.r = (N_CORES * get_rank())
(self.do_retry, self.n_retries) = (do_retry, n_retries)
if (not self.no_lang):
language_path = ('val-language-index.json' if self.is_val else 'train-language-index.json')
if (not self.stream):
with open((self.index_path / language_path), 'r') as f:
self.language_index = json.load(f)
else:
blob = BUCKETS[self.r].blob(str(((self.prefix / 'index') / language_path)))
self.language_index = json.loads(blob.download_as_string())
self.set_epoch(self.epoch)
def set_epoch(self, epoch: int) -> None:
if (not self.stream):
if (self.is_val and (not self.val_loaded)):
with open(((self.index_path / 'state+ok') / 'validation-batches.json'), 'r') as f:
self.elements = json.load(f)
n_drop = int((self.lang_dropout * len(self.elements)))
self.dropout_indices = set(np.random.choice(len(self.elements), n_drop, replace=False))
self.val_loaded = True
elif (not self.is_val):
with open(((self.index_path / 'state + ok') / f'train-epoch={epoch}-batches.json'), 'r') as f:
self.elements = json.load(f)
n_drop = int((self.lang_dropout * len(self.elements)))
self.dropout_indices = set(np.random.choice(len(self.elements), n_drop, replace=False))
elif (self.is_val and (not self.val_loaded)):
blob = BUCKETS[self.r].blob(str((((self.prefix / 'index') / 'state+ok') / 'validation-batches.json')))
self.elements = json.loads(blob.download_as_string())
for element in self.elements:
element['states'] = ['/'.join(x.split('/')[(- 2):]) for x in element['states']]
n_drop = int((self.lang_dropout * len(self.elements)))
self.dropout_indices = set(np.random.choice(len(self.elements), n_drop, replace=False))
self.val_loaded = True
elif (not self.is_val):
blob = BUCKETS[self.r].blob(str((((self.prefix / 'index') / 'state+ok') / f'train-epoch={epoch}-batches.json')))
self.elements = json.loads(blob.download_as_string())
for element in self.elements:
element['states'] = ['/'.join(x.split('/')[(- 2):]) for x in element['states']]
n_drop = int((self.lang_dropout * len(self.elements)))
self.dropout_indices = set(np.random.choice(len(self.elements), n_drop, replace=False))
def __getitem__(self, index: int) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
vid = self.elements[index]['vid']
if (not self.no_lang):
lang = torch.tensor(self.language_index[vid]['input_ids'], dtype=torch.int64)
lang_mask = torch.tensor(self.language_index[vid]['attention_mask'], dtype=torch.int64)
if (index in self.dropout_indices):
lang[1:] *= 0
lang_mask[1:] *= 0
if (not self.stream):
imgs = self.elements[index]['states']
imgs = torch.stack([self.transform(read_image(s)) for s in imgs])
if (not self.no_lang):
return (imgs, lang, lang_mask)
else:
return imgs
else:
worker_info = get_worker_info()
r = ((self.r + worker_info.id) if (worker_info is not None) else self.r)
(frame_paths, current_frame) = (list(self.elements[index]['states']), None)
for _i in range(self.n_retries):
try:
imgs = []
for (_current_idx, current_frame) in enumerate(frame_paths):
if self.is_val:
(blob, fobj) = (BUCKETS[r].blob(str(((self.prefix / 'val') / current_frame))), BytesIO())
else:
(blob, fobj) = (BUCKETS[r].blob(str(((self.prefix / 'train') / current_frame))), BytesIO())
blob.download_to_file(fobj)
fobj.seek(0)
img_tensor = pil_to_tensor(Image.open(fobj))
imgs.append(self.transform(img_tensor))
assert (len(imgs) == 2), 'Something went awry with try/except in StateOK Dataset...'
imgs = torch.stack(imgs)
if (not self.no_lang):
return (imgs, lang, lang_mask)
else:
return imgs
except (NotFound, TransportError, UnidentifiedImageError, OSError) as e:
print(f'=>> BROKEN FILE :: {current_frame}')
if (not self.do_retry):
raise e
else:
continue
raise ValueError(f"Failed to fix states `{self.elements[index]['states']}` w/ {self.n_retries} retries...")
def __len__(self) -> int:
return len(self.elements) |
def test_floordiv():
value = 7
proxy = tt.ObjectProxy(value)
assert ((value // 2) == (proxy // 2))
assert (int in tt.UsageTraceNode.from_proxy(proxy).children['__floordiv__'].arg_types[0]) |
def training_loop(run_dir='.', training_set_kwargs={}, validation_set_kwargs={}, data_loader_kwargs={}, G_kwargs={}, D_kwargs={}, G_opt_kwargs={}, D_opt_kwargs={}, augment_kwargs=None, loss_kwargs={}, metrics=[], random_seed=0, num_gpus=1, rank=0, batch_size=4, batch_gpu=4, ema_kimg=10, ema_rampup=0.05, G_reg_interval=None, D_reg_interval=16, augment_p=0, ada_target=None, ada_interval=4, ada_kimg=500, total_kimg=25000, kimg_per_tick=4, image_snapshot_ticks=50, network_snapshot_ticks=50, resume_pkl=None, resume_kimg=0, cudnn_benchmark=True, abort_fn=None, progress_fn=None):
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(((random_seed * num_gpus) + rank))
torch.manual_seed(((random_seed * num_gpus) + rank))
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
grid_sample_gradfix.enabled = True
if (rank == 0):
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs)
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=(batch_size // num_gpus), **data_loader_kwargs))
validation_set = dnnlib.util.construct_class_by_name(**validation_set_kwargs)
if (rank == 0):
print()
print('Num training images: ', len(training_set))
print('Num validation images: ', len(validation_set))
print('Bbox patch shape:', training_set.patch_shape)
print('Label shape:', training_set.label_shape)
print()
if (rank == 0):
print('Constructing networks...')
common_kwargs = dict(num_bbox_labels=training_set.num_bbox_labels, img_channels=training_set.num_channels, img_height=training_set.height, img_width=training_set.width, background_size=training_set.background_size_for_training, c_dim=training_set.label_dim)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device)
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device)
G_ema = copy.deepcopy(G).eval()
G.load_state_dict(torch.load('pretrained/up-detr-pre-training-60ep-imagenet.pth')['model'], strict=False)
D.load_state_dict(torch.load('pretrained/up-detr-pre-training-60ep-imagenet.pth')['model'], strict=False)
G_ema.load_state_dict(torch.load('pretrained/up-detr-pre-training-60ep-imagenet.pth')['model'], strict=False)
if ((resume_pkl is not None) and (rank == 0)):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for (name, module) in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
if (rank == 0):
(W_page_fixed_train, H_page_fixed_train, bbox_real_fixed_train, bbox_class_fixed_train, bbox_text_fixed_train, bbox_patch_fixed_train, bbox_patch_orig_fixed_train, bbox_patch_mask_fixed_train, padding_mask_fixed_train, background_fixed_train, background_orig_fixed_train, z_fixed_train, c_fixed_train) = setup_snapshot(training_set, batch_size, batch_gpu, G.z_dim, device)
bbox_real_temp = bbox_real_fixed_train[0].clone().detach().to(device)
bbox_class_temp = bbox_class_fixed_train[0].clone().detach().to(device)
bbox_text_temp = list(bbox_text_fixed_train[0])
bbox_patch_temp = bbox_patch_fixed_train[0].clone().detach().to(device)
padding_mask_temp = padding_mask_fixed_train[0].clone().detach().to(device)
background_temp = background_fixed_train[0].clone().detach().to(device)
z_temp = torch.empty(z_fixed_train[0].shape, dtype=z_fixed_train[0].dtype, device=device)
c_temp = torch.empty(c_fixed_train[0].shape, dtype=c_fixed_train[0].dtype, device=device)
(bbox_fake_temp, _, _, _, _) = misc.print_module_summary(G, [z_temp, bbox_class_temp, bbox_real_temp, bbox_text_temp, bbox_patch_temp, padding_mask_temp, background_temp, c_temp, True])
misc.print_module_summary(D, [bbox_fake_temp, bbox_class_temp, bbox_text_temp, bbox_patch_temp, padding_mask_temp, background_temp, c_temp, True])
if (rank == 0):
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if ((augment_kwargs is not None) and ((augment_p > 0) or (ada_target is not None))):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device)
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if (ada_target is not None):
ada_stats = training_stats.Collector(regex='Loss/signs/real')
if (rank == 0):
print(f'Distributing across {num_gpus} GPUs...')
for module in [G, D, G_ema, augment_pipe]:
if ((module is not None) and (num_gpus > 1)):
for param in misc.params_and_buffers(module):
torch.distributed.broadcast(param, src=0)
if (rank == 0):
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs)
phases = []
for (name, module, opt_kwargs, reg_interval) in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if (reg_interval is None):
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs)
phases += [dnnlib.EasyDict(name=(name + 'both'), module=module, opt=opt, interval=1)]
else:
mb_ratio = (reg_interval / (reg_interval + 1))
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = (opt_kwargs.lr * mb_ratio)
opt_kwargs.betas = [(beta ** mb_ratio) for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs)
phases += [dnnlib.EasyDict(name=(name + 'main'), module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=(name + 'reg'), module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if (rank == 0):
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
grid_size = None
grid_z = None
grid_c = None
if (rank == 0):
print('Exporting sample images...')
save_image(torch.cat(bbox_real_fixed_train), torch.cat(bbox_class_fixed_train), (~ torch.cat(padding_mask_fixed_train)), training_set.colors, os.path.join(run_dir, 'train_layouts_real.png'), W_page_fixed_train, H_page_fixed_train)
save_real_image(torch.cat(bbox_real_fixed_train), torch.cat(bbox_real_fixed_train), torch.cat(bbox_patch_orig_fixed_train), (~ torch.cat(padding_mask_fixed_train)), os.path.join(run_dir, 'train_images_real.png'), W_page_fixed_train, H_page_fixed_train)
save_real_image_with_background(torch.cat(bbox_real_fixed_train), torch.cat(bbox_real_fixed_train), torch.cat(bbox_patch_orig_fixed_train), (~ torch.cat(padding_mask_fixed_train)), torch.cat(background_orig_fixed_train), os.path.join(run_dir, 'train_images_with_background_real.png'), W_page_fixed_train, H_page_fixed_train)
(W_page_fixed_val, H_page_fixed_val, bbox_real_fixed_val, bbox_class_fixed_val, bbox_text_fixed_val, bbox_patch_fixed_val, bbox_patch_orig_fixed_val, bbox_patch_mask_fixed_val, padding_mask_fixed_val, background_fixed_val, background_orig_fixed_val, z_fixed_val, c_fixed_val) = setup_snapshot(validation_set, batch_size, batch_gpu, G.z_dim, device)
save_image(torch.cat(bbox_real_fixed_val), torch.cat(bbox_class_fixed_val), (~ torch.cat(padding_mask_fixed_val)), training_set.colors, os.path.join(run_dir, 'val_layouts_real.png'), W_page_fixed_val, H_page_fixed_val)
save_real_image(torch.cat(bbox_real_fixed_val), torch.cat(bbox_real_fixed_val), torch.cat(bbox_patch_orig_fixed_val), (~ torch.cat(padding_mask_fixed_val)), os.path.join(run_dir, 'val_images_real.png'), W_page_fixed_val, H_page_fixed_val)
save_real_image_with_background(torch.cat(bbox_real_fixed_val), torch.cat(bbox_real_fixed_val), torch.cat(bbox_patch_orig_fixed_val), (~ torch.cat(padding_mask_fixed_val)), torch.cat(background_orig_fixed_val), os.path.join(run_dir, 'val_images_with_background_real.png'), W_page_fixed_val, H_page_fixed_val)
if (rank == 0):
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if (rank == 0):
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
if (rank == 0):
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = (resume_kimg * 1000)
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - start_time)
batch_idx = 0
if (progress_fn is not None):
progress_fn(0, total_kimg)
while True:
with torch.autograd.profiler.record_function('data_fetch'):
(phase_samples, phase_real_c) = next(training_set_iterator)
phase_bbox_real = phase_samples['bboxes'].to(device).to(torch.float32).split(batch_gpu)
phase_bbox_class = phase_samples['labels'].to(device).to(torch.int64).split(batch_gpu)
phase_samples_text = list(map(list, zip(*phase_samples['texts'])))
phase_bbox_text = split_list(phase_samples_text, batch_gpu)
phase_bbox_patch = phase_samples['patches'].to(device).to(torch.float32).split(batch_gpu)
phase_mask = phase_samples['mask'].to(device).to(torch.bool)
phase_padding_mask = (~ phase_mask).split(batch_gpu)
phase_background = phase_samples['background'].to(device).to(torch.float32).split(batch_gpu)
phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([(len(phases) * batch_size), phase_bbox_class[0].shape[1], G.z_dim], dtype=torch.float32, device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range((len(phases) * batch_size))]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
for (phase, phase_gen_z, phase_gen_c) in zip(phases, all_gen_z, all_gen_c):
if ((batch_idx % phase.interval) != 0):
continue
if (phase.start_event is not None):
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
phase.module.text_encoder.requires_grad_(False)
for (bbox_real, bbox_class, bbox_text, bbox_patch, padding_mask, background, real_c, gen_z, gen_c) in zip(phase_bbox_real, phase_bbox_class, phase_bbox_text, phase_bbox_patch, phase_padding_mask, phase_background, phase_real_c, phase_gen_z, phase_gen_c):
loss.accumulate_gradients(phase=phase.name, bbox_real=bbox_real, bbox_class=bbox_class, bbox_text=bbox_text, bbox_patch=bbox_patch, padding_mask=padding_mask, background=background, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function((phase.name + '_opt')):
params = [param for param in phase.module.parameters() if (param.grad is not None)]
if (len(params) > 0):
flat = torch.cat([param.grad.flatten() for param in params])
if (num_gpus > 1):
torch.distributed.all_reduce(flat)
flat /= num_gpus
misc.nan_to_num(flat, nan=0, posinf=100000.0, neginf=(- 100000.0), out=flat)
grads = flat.split([param.numel() for param in params])
for (param, grad) in zip(params, grads):
param.grad = grad.reshape(param.shape)
phase.opt.step()
if (phase.end_event is not None):
phase.end_event.record(torch.cuda.current_stream(device))
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = (ema_kimg * 1000)
if (ema_rampup is not None):
ema_nimg = min(ema_nimg, (cur_nimg * ema_rampup))
ema_beta = (0.5 ** (batch_size / max(ema_nimg, 1e-08)))
for (p_ema, p) in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for (b_ema, b) in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
cur_nimg += batch_size
batch_idx += 1
if ((ada_stats is not None) and ((batch_idx % ada_interval) == 0)):
ada_stats.update()
adjust = ((np.sign((ada_stats['Loss/signs/real'] - ada_target)) * (batch_size * ada_interval)) / (ada_kimg * 1000))
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
done = (cur_nimg >= (total_kimg * 1000))
if ((not done) and (cur_tick != 0) and (cur_nimg < (tick_start_nimg + (kimg_per_tick * 1000)))):
continue
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', (cur_nimg / 1000.0)):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', (tick_end_time - start_time))):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', (tick_end_time - tick_start_time)):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (((tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg)) * 1000.0)):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', (psutil.Process(os.getpid()).memory_info().rss / (2 ** 30))):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', (torch.cuda.max_memory_allocated(device) / (2 ** 30))):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', (torch.cuda.max_memory_reserved(device) / (2 ** 30))):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', (float(augment_pipe.p.cpu()) if (augment_pipe is not None) else 0)):.3f}"]
training_stats.report0('Timing/total_hours', ((tick_end_time - start_time) / (60 * 60)))
training_stats.report0('Timing/total_days', ((tick_end_time - start_time) / ((24 * 60) * 60)))
if (rank == 0):
print(' '.join(fields))
if ((not done) and (abort_fn is not None) and abort_fn()):
done = True
if (rank == 0):
print()
print('Aborting...')
if ((rank == 0) and (image_snapshot_ticks is not None) and (done or ((cur_tick % image_snapshot_ticks) == 0))):
bbox_fake_fixed_train = []
for (z_fixed_temp, bbox_class_fixed_temp, bbox_real_fixed_temp, bbox_text_fixed_temp, bbox_patch_fixed_temp, padding_mask_fixed_temp, background_fixed_temp, c_fixed_temp) in zip(z_fixed_train, bbox_class_fixed_train, bbox_real_fixed_train, bbox_text_fixed_train, bbox_patch_fixed_train, padding_mask_fixed_train, background_fixed_train, c_fixed_train):
bbox_fake_fixed_temp = G_ema(z_fixed_temp, bbox_class_fixed_temp, bbox_real_fixed_temp, bbox_text_fixed_temp, bbox_patch_fixed_temp, padding_mask_fixed_temp, background_fixed_temp, c_fixed_temp)
bbox_fake_fixed_train.append(bbox_fake_fixed_temp.clone())
save_image(torch.cat(bbox_fake_fixed_train), torch.cat(bbox_class_fixed_train), (~ torch.cat(padding_mask_fixed_train)), training_set.colors, os.path.join(run_dir, f'train_layouts_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_train, H_page_fixed_train)
save_real_image(torch.cat(bbox_fake_fixed_train), torch.cat(bbox_real_fixed_train), torch.cat(bbox_patch_orig_fixed_train), (~ torch.cat(padding_mask_fixed_train)), os.path.join(run_dir, f'train_images_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_train, H_page_fixed_train)
save_real_image_with_background(torch.cat(bbox_fake_fixed_train), torch.cat(bbox_real_fixed_train), torch.cat(bbox_patch_orig_fixed_train), (~ torch.cat(padding_mask_fixed_train)), torch.cat(background_orig_fixed_train), os.path.join(run_dir, f'train_images_with_background_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_train, H_page_fixed_train)
bbox_fake_fixed_val = []
for (z_fixed_temp, bbox_class_fixed_temp, bbox_real_fixed_temp, bbox_text_fixed_temp, bbox_patch_fixed_temp, padding_mask_fixed_temp, background_fixed_temp, c_fixed_temp) in zip(z_fixed_val, bbox_class_fixed_val, bbox_real_fixed_val, bbox_text_fixed_val, bbox_patch_fixed_val, padding_mask_fixed_val, background_fixed_val, c_fixed_val):
bbox_fake_fixed_temp = G_ema(z_fixed_temp, bbox_class_fixed_temp, bbox_real_fixed_temp, bbox_text_fixed_temp, bbox_patch_fixed_temp, padding_mask_fixed_temp, background_fixed_temp, c_fixed_temp)
bbox_fake_fixed_val.append(bbox_fake_fixed_temp.clone())
save_image(torch.cat(bbox_fake_fixed_val), torch.cat(bbox_class_fixed_val), (~ torch.cat(padding_mask_fixed_val)), training_set.colors, os.path.join(run_dir, f'val_layouts_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_val, H_page_fixed_val)
save_real_image(torch.cat(bbox_fake_fixed_val), torch.cat(bbox_real_fixed_val), torch.cat(bbox_patch_orig_fixed_val), (~ torch.cat(padding_mask_fixed_val)), os.path.join(run_dir, f'val_images_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_val, H_page_fixed_val)
save_real_image_with_background(torch.cat(bbox_fake_fixed_val), torch.cat(bbox_real_fixed_val), torch.cat(bbox_patch_orig_fixed_val), (~ torch.cat(padding_mask_fixed_val)), torch.cat(background_orig_fixed_val), os.path.join(run_dir, f'val_images_with_background_fake_{(cur_nimg // 1000):06d}.png'), W_page_fixed_val, H_page_fixed_val)
snapshot_pkl = None
snapshot_data = None
if ((network_snapshot_ticks is not None) and (done or ((cur_tick % network_snapshot_ticks) == 0))):
snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe, training_set_kwargs=dict(training_set_kwargs))
for (key, value) in snapshot_data.items():
if isinstance(value, torch.nn.Module):
value = copy.deepcopy(value).eval().requires_grad_(False)
if (num_gpus > 1):
misc.check_ddp_consistency(value, ignore_regex='.*\\.[^.]+_(avg|ema)')
for param in misc.params_and_buffers(value):
torch.distributed.broadcast(param, src=0)
snapshot_data[key] = value.cpu()
del value
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{(cur_nimg // 1000):06d}.pkl')
if (rank == 0):
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
if ((snapshot_data is not None) and (len(metrics) > 0)):
if (rank == 0):
print('Evaluating metrics...')
for metric in metrics:
if ('_train' in metric):
result_dict = metric_main.calc_metric(metric=metric, run_dir=run_dir, G=snapshot_data['G_ema'], dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
elif ('_val' in metric):
result_dict = metric_main.calc_metric(metric=metric, run_dir=run_dir, G=snapshot_data['G_ema'], dataset_kwargs=validation_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if (rank == 0):
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data
for phase in phases:
value = []
if ((phase.start_event is not None) and (phase.end_event is not None)):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0(('Timing/' + phase.name), value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
timestamp = time.time()
if (stats_jsonl is not None):
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write((json.dumps(fields) + '\n'))
stats_jsonl.flush()
if (stats_tfevents is not None):
global_step = int((cur_nimg / 1000.0))
walltime = (timestamp - start_time)
for (name, value) in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for (name, value) in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if (progress_fn is not None):
progress_fn((cur_nimg // 1000), total_kimg)
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - tick_end_time)
if done:
break
if (rank == 0):
print()
print('Exiting...') |
.script
def script_fork_wait_throw(invalue):
fut = torch.jit._fork(script_raise_func, invalue)
value = torch.jit._wait(fut)
return value |
def _AnalyzeOperators(model):
for op in model.Proto().op:
if (('NCCL' in op.type) or ('Copy' in op.type) or ('Concat' in op.type)):
continue
if (('Sum' == op.type) and (op.name == 'dpm')):
continue
if (('Allreduce' in op.type) and ('GLOO' in op.engine)):
continue
op_dev = op.device_option
op_gpu = op_dev.device_id
if (not core.IsGPUDeviceType(op_dev.device_type)):
continue
namescope = '{}_{}/'.format(model._device_prefix, op_gpu)
for inp in (list(op.input) + list(op.output)):
if (inp.startswith('{}_'.format(model._device_prefix)) and (not inp.startswith(namescope))):
raise Exception('Blob {} of op {}, should have namescope {}. Op: {}'.format(inp, op.type, '{}_{}/'.format(model._device_prefix, op_gpu), str(op))) |
class ArgMaxPredictionProcessorConfig(BatchProcessorConfigType):
id_key: str = 'id'
result_key: str = 'answer' |
class TateTermMonoid(Monoid_class, UniqueRepresentation):
Element = TateAlgebraTerm
def __init__(self, A):
names = A.variable_names()
Monoid_class.__init__(self, names)
self._base = A.base_ring()
self._field = A._field
self._names = names
self._latex_names = A._latex_names
self._ngens = len(names)
self._log_radii = ETuple(A.log_radii())
self._order = A.term_order()
self._sortkey = self._order.sortkey
self._integral = A._integral
self._parent_algebra = A
def _repr_(self):
if (self._ngens == 0):
return ('Monoid of terms over %s' % self._base)
vars = ', '.join((('%s (val >= %s)' % (var, (- r))) for (var, r) in zip(self._names, self._log_radii)))
return ('Monoid of terms in %s over %s' % (vars, self._base))
def _latex_(self):
return ('\\verb"Terms"(%s)' % self._parent_algebra._latex_())
def _coerce_map_from_(self, R):
base = self._base
if base.has_coerce_map_from(R):
return True
if isinstance(R, TateTermMonoid):
return self._parent_algebra.has_coerce_map_from(R.algebra_of_series())
def prime(self):
return self._base.prime()
def algebra_of_series(self):
return self._parent_algebra
def base_ring(self):
return self._base
def variable_names(self):
return self._names
def log_radii(self):
return tuple(self._log_radii)
def term_order(self):
return self._order
def ngens(self):
return self._ngens
def gens(self):
return tuple([self(g) for g in self._parent_algebra.gens()])
def gen(self, n=0):
return self(self._parent_algebra.gen(n))
def some_elements(self):
elts = ([self(self._field.uniformizer())] + list(self.gens()))
elts.append(prod(elts))
return elts |
def stats_viz_dt(stats: Dict[(str, Any)]) -> Dict[(str, Dict[(str, str)])]:
return {'Overview': {k: _format_values(k, v) for (k, v) in stats.items()}} |
def flatten(inputs, scope=None):
if (len(inputs.get_shape()) < 2):
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.op_scope([inputs], scope, 'Flatten'):
return tf.reshape(inputs, [(- 1), k]) |
class stringtype(pointer):
def __init__(self):
super().__init__(int8)
def __call__(self, *args, **kwargs):
return str(*args, **kwargs)
def to_json(self):
return {'type': 'string'}
def from_json(json_obj, context=None):
return stringtype() |
def register_Ns3MmWaveMacSchedSapUserSchedConfigIndParameters_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::MmWaveMacSchedSapUser::SchedConfigIndParameters const &', 'arg0')])
cls.add_instance_attribute('m_dlSfAllocInfo', 'ns3::SfAllocInfo', is_const=False)
cls.add_instance_attribute('m_schedInfoMap', 'std::map< unsigned short, ns3::SchedInfo >', is_const=False)
cls.add_instance_attribute('m_sfAllocInfo', 'ns3::SfAllocInfo', is_const=False)
cls.add_instance_attribute('m_sfnSf', 'ns3::SfnSf', is_const=False)
cls.add_instance_attribute('m_ulSfAllocInfo', 'ns3::SfAllocInfo', is_const=False)
return |
def experiment_file(directory, name, ext=''):
return (os.path.join(BASE_EXPERIMENTS, directory, name) + ext) |
def load_annotations(annotations_json: List[Dict], image_descriptions: Dict[(str, ImageDescription)], category_no_for_id: Callable[([str], int)], split: str) -> Dict[(str, List[Annotation])]:
annotations = defaultdict(list)
total = sum((len(a) for a in annotations_json))
for ann in tqdm(chain(*annotations_json), f'Loading {split} annotations', total=total):
image_id = str(ann['image_id'])
if (image_id not in image_descriptions):
raise ValueError(f'image_id [{image_id}] has no image description.')
category_id = ann['category_id']
try:
category_no = category_no_for_id(str(category_id))
except KeyError:
continue
(width, height) = image_descriptions[image_id].original_size
bbox = ((ann['bbox'][0] / width), (ann['bbox'][1] / height), (ann['bbox'][2] / width), (ann['bbox'][3] / height))
annotations[image_id].append(Annotation(id=ann['id'], area=(bbox[2] * bbox[3]), is_group_of=ann['iscrowd'], image_id=ann['image_id'], bbox=bbox, category_id=str(category_id), category_no=category_no))
return dict(annotations) |
class UtilsTest(unittest.TestCase):
def test_merge_config(self):
config_updates = {'a': 2, 'foo_config': {'a': 0.75}}
bar_config = merge_config(BarConfig(), config_updates)
self.assertEqual(bar_config.a, 2)
self.assertEqual(bar_config.foo_config.a, 0.75) |
class Messages(object):
ChatExpired = 'You ran out of time!'
PartnerConnectionTimeout = "Your partner's connection has timed out! Waiting for a new chat..."
ConnectionTimeout = 'Your connection has timed out. Please reenter this website using the original URL provided to you to start a new chat.'
YouLeftRoom = 'You skipped the chat. '
PartnerLeftRoom = 'Your partner has left the chat!'
WaitingTimeExpired = 'Sorry, no other users appear to be active at the moment. Please come back later!'
ChatCompleted = "Great, you've completed the chat!"
ChatIncomplete = ConnectionTimeout
HITCompletionWarning = 'Please note that you will only get credit for this HIT if you made a good attempt to complete the chat.'
Waiting = 'Waiting for a new chat...' |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token='[CLS]', cls_token_segment_id=1, sep_token='[SEP]', sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, sequence_a_segment_id=0, sequence_b_segment_id=1, mask_padding_with_zero=True):
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
special_tokens_count = (4 if sep_token_extra else 3)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - special_tokens_count))
else:
special_tokens_count = (3 if sep_token_extra else 2)
if (len(tokens_a) > (max_seq_length - special_tokens_count)):
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
tokens = (tokens_a + [sep_token])
if sep_token_extra:
tokens += [sep_token]
segment_ids = ([sequence_a_segment_id] * len(tokens))
if tokens_b:
tokens += (tokens_b + [sep_token])
segment_ids += ([sequence_b_segment_id] * (len(tokens_b) + 1))
if cls_token_at_end:
tokens = (tokens + [cls_token])
segment_ids = (segment_ids + [cls_token_segment_id])
else:
tokens = ([cls_token] + tokens)
segment_ids = ([cls_token_segment_id] + segment_ids)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (max_seq_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
input_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + input_mask)
segment_ids = (([pad_token_segment_id] * padding_length) + segment_ids)
else:
input_ids = (input_ids + ([pad_token] * padding_length))
input_mask = (input_mask + ([(0 if mask_padding_with_zero else 1)] * padding_length))
segment_ids = (segment_ids + ([pad_token_segment_id] * padding_length))
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
if (output_mode == 'classification'):
label_id = label_map[example.label]
elif (output_mode == 'regression'):
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
return features |
def _read_string_data(f):
length = _read_long(f)
if (length > 0):
length = _read_long(f)
string_data = _read_bytes(f, length)
_align_32(f)
else:
string_data = ''
return string_data |
def clean_br_cpf(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame:
if (output_format not in {'compact', 'standard'}):
raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".')
df = to_dask(df)
df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object)
df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0)))
df = df.rename(columns={'_temp_': f'{column}_clean'})
df = df.drop(columns=['clean_code_tup'])
if inplace:
df[column] = df[f'{column}_clean']
df = df.drop(columns=f'{column}_clean')
df = df.rename(columns={column: f'{column}_clean'})
with ProgressBar(minimum=1, disable=(not progress)):
df = df.compute()
return df |
def global_meters_all_avg(args, *meters):
tensors = [torch.tensor(meter, device=args.device, dtype=torch.float32) for meter in meters]
for tensor in tensors:
dist.all_reduce(tensor)
res = [(tensor / args.world_size).item() for tensor in tensors]
if (len(res) == 1):
return res[0]
else:
return res |
def add_effect(tmp_effect, result):
if isinstance(tmp_effect, pddl.ConjunctiveEffect):
for effect in tmp_effect.effects:
add_effect(effect, result)
return
else:
parameters = []
condition = pddl.Truth()
if isinstance(tmp_effect, pddl.UniversalEffect):
parameters = tmp_effect.parameters
if isinstance(tmp_effect.effect, pddl.ConditionalEffect):
condition = tmp_effect.effect.condition
assert isinstance(tmp_effect.effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect.effect
else:
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
elif isinstance(tmp_effect, pddl.ConditionalEffect):
condition = tmp_effect.condition
assert isinstance(tmp_effect.effect, pddl.SimpleEffect)
effect = tmp_effect.effect.effect
else:
assert isinstance(tmp_effect, pddl.SimpleEffect)
effect = tmp_effect.effect
assert isinstance(effect, pddl.Literal)
condition = condition.simplified()
new_effect = pddl.Effect(parameters, condition, effect)
contradiction = pddl.Effect(parameters, condition, effect.negate())
if (contradiction not in result):
result.append(new_effect)
elif isinstance(contradiction.literal, pddl.NegatedAtom):
result.remove(contradiction)
result.append(new_effect) |
def copy_to_local_models(global_model: ProbabilisticModelType, num_local_models: int, key: Tag=OBJECTIVE) -> Mapping[(Tag, ProbabilisticModelType)]:
return {LocalizedTag(key, i): copy.deepcopy(global_model) for i in range(num_local_models)} |
def process_da_ddt(paths, short_name):
assert (short_name == 'da_ddt')
language = 'da'
IN_FILES = ('ddt.train.conllu', 'ddt.dev.conllu', 'ddt.test.conllu')
base_output_path = paths['NER_DATA_DIR']
OUT_FILES = [os.path.join(base_output_path, ('%s.%s.bio' % (short_name, shard))) for shard in SHARDS]
zip_file = os.path.join(paths['NERBASE'], 'da_ddt', 'ddt.zip')
if os.path.exists(zip_file):
for (in_filename, out_filename, shard) in zip(IN_FILES, OUT_FILES, SHARDS):
conll_to_iob.process_conll(in_filename, out_filename, zip_file)
else:
for (in_filename, out_filename, shard) in zip(IN_FILES, OUT_FILES, SHARDS):
in_filename = os.path.join(paths['NERBASE'], 'da_ddt', in_filename)
if (not os.path.exists(in_filename)):
raise FileNotFoundError(('Could not find zip in expected location %s and could not file %s file in %s' % (zip_file, shard, in_filename)))
conll_to_iob.process_conll(in_filename, out_filename)
convert_bio_to_json(base_output_path, base_output_path, short_name) |
def reshape_from_matrix(output_tensor, orig_shape_list):
if (len(orig_shape_list) == 2):
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:(- 1)]
width = output_shape[(- 1)]
return tf.reshape(output_tensor, (orig_dims + [width])) |
class Partition1(nn.Module):
LAYER_SCOPES = ['BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[6]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[7]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[8]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[9]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[10]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[key]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Linear[value]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Softmax[softmax]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfAttention[self]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertAttention[attention]/BertSelfOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertIntermediate[intermediate]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Linear[dense]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/Dropout[dropout]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[11]/BertOutput[output]/LayerNorm[LayerNorm]', 'BertForQuestionAnswering/BertModel[bert]/BertEncoder[encoder]/BertLayer[12]/BertAttention[attention]/BertSelfAttention[self]/Linear[query]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'bert.encoder.6.attention.self.query', 'l_1': 'bert.encoder.6.attention.self.key', 'l_2': 'bert.encoder.6.attention.self.value', 'l_3': 'bert.encoder.6.attention.self.softmax', 'l_4': 'bert.encoder.6.attention.self.dropout', 'l_5': 'bert.encoder.6.attention.output.dense', 'l_6': 'bert.encoder.6.attention.output.dropout', 'l_7': 'bert.encoder.6.attention.output.LayerNorm', 'l_8': 'bert.encoder.6.intermediate.dense', 'l_9': 'bert.encoder.6.output.dense', 'l_10': 'bert.encoder.6.output.dropout', 'l_11': 'bert.encoder.6.output.LayerNorm', 'l_12': 'bert.encoder.7.attention.self.query', 'l_13': 'bert.encoder.7.attention.self.key', 'l_14': 'bert.encoder.7.attention.self.value', 'l_15': 'bert.encoder.7.attention.self.softmax', 'l_16': 'bert.encoder.7.attention.self.dropout', 'l_17': 'bert.encoder.7.attention.output.dense', 'l_18': 'bert.encoder.7.attention.output.dropout', 'l_19': 'bert.encoder.7.attention.output.LayerNorm', 'l_20': 'bert.encoder.7.intermediate.dense', 'l_21': 'bert.encoder.7.output.dense', 'l_22': 'bert.encoder.7.output.dropout', 'l_23': 'bert.encoder.7.output.LayerNorm', 'l_24': 'bert.encoder.8.attention.self.query', 'l_25': 'bert.encoder.8.attention.self.key', 'l_26': 'bert.encoder.8.attention.self.value', 'l_27': 'bert.encoder.8.attention.self.softmax', 'l_28': 'bert.encoder.8.attention.self.dropout', 'l_29': 'bert.encoder.8.attention.output.dense', 'l_30': 'bert.encoder.8.attention.output.dropout', 'l_31': 'bert.encoder.8.attention.output.LayerNorm', 'l_32': 'bert.encoder.8.intermediate.dense', 'l_33': 'bert.encoder.8.output.dense', 'l_34': 'bert.encoder.8.output.dropout', 'l_35': 'bert.encoder.8.output.LayerNorm', 'l_36': 'bert.encoder.9.attention.self.query', 'l_37': 'bert.encoder.9.attention.self.key', 'l_38': 'bert.encoder.9.attention.self.value', 'l_39': 'bert.encoder.9.attention.self.softmax', 'l_40': 'bert.encoder.9.attention.self.dropout', 'l_41': 'bert.encoder.9.attention.output.dense', 'l_42': 'bert.encoder.9.attention.output.dropout', 'l_43': 'bert.encoder.9.attention.output.LayerNorm', 'l_44': 'bert.encoder.9.intermediate.dense', 'l_45': 'bert.encoder.9.output.dense', 'l_46': 'bert.encoder.9.output.dropout', 'l_47': 'bert.encoder.9.output.LayerNorm', 'l_48': 'bert.encoder.10.attention.self.query', 'l_49': 'bert.encoder.10.attention.self.key', 'l_50': 'bert.encoder.10.attention.self.value', 'l_51': 'bert.encoder.10.attention.self.softmax', 'l_52': 'bert.encoder.10.attention.self.dropout', 'l_53': 'bert.encoder.10.attention.output.dense', 'l_54': 'bert.encoder.10.attention.output.dropout', 'l_55': 'bert.encoder.10.attention.output.LayerNorm', 'l_56': 'bert.encoder.10.intermediate.dense', 'l_57': 'bert.encoder.10.output.dense', 'l_58': 'bert.encoder.10.output.dropout', 'l_59': 'bert.encoder.10.output.LayerNorm', 'l_60': 'bert.encoder.11.attention.self.query', 'l_61': 'bert.encoder.11.attention.self.key', 'l_62': 'bert.encoder.11.attention.self.value', 'l_63': 'bert.encoder.11.attention.self.softmax', 'l_64': 'bert.encoder.11.attention.self.dropout', 'l_65': 'bert.encoder.11.attention.output.dense', 'l_66': 'bert.encoder.11.attention.output.dropout', 'l_67': 'bert.encoder.11.attention.output.LayerNorm', 'l_68': 'bert.encoder.11.intermediate.dense', 'l_69': 'bert.encoder.11.output.dense', 'l_70': 'bert.encoder.11.output.dropout', 'l_71': 'bert.encoder.11.output.LayerNorm', 'l_72': 'bert.encoder.12.attention.self.query'}
self.to(self.device)
def forward(self, *args):
(attention_mask, x0) = unflatten(args, self.input_structure)
t_0 = self.l_0(x0)
t_1 = self.l_1(x0)
t_2 = self.l_2(x0)
t_3 = t_0.size()
t_4 = t_1.size()
t_5 = t_2.size()
t_3 = t_3[slice(None, (- 1), None)]
t_3 = (t_3 + (16, 64))
t_6 = t_3[0]
t_7 = t_3[1]
t_8 = t_3[2]
t_3 = t_3[3]
t_3 = t_0.view(t_6, t_7, t_8, t_3)
t_3 = t_3.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_8 = t_4[0]
t_7 = t_4[1]
t_6 = t_4[2]
t_4 = t_4[3]
t_4 = t_1.view(t_8, t_7, t_6, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_6 = t_5[0]
t_7 = t_5[1]
t_8 = t_5[2]
t_5 = t_5[3]
t_5 = t_2.view(t_6, t_7, t_8, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_3, t_4)
t_3 = math.sqrt(64)
t_3 = (t_4 / t_3)
t_3 = (t_3 + attention_mask)
t_3 = self.l_3(t_3)
t_3 = self.l_4(t_3)
t_5 = torch.matmul(t_3, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_3 = t_5.size()
t_3 = t_3[slice(None, (- 2), None)]
t_3 = (t_3 + (1024,))
t_4 = t_3[0]
t_8 = t_3[1]
t_3 = t_3[2]
t_3 = t_5.view(t_4, t_8, t_3)
t_3 = self.l_5(t_3)
t_3 = self.l_6(t_3)
t_3 = (t_3 + x0)
t_3 = self.l_7(t_3)
t_8 = self.l_8(t_3)
t_8 = torch.nn.functional.gelu(t_8)
t_8 = self.l_9(t_8)
t_8 = self.l_10(t_8)
t_3 = (t_8 + t_3)
t_3 = self.l_11(t_3)
t_8 = self.l_12(t_3)
t_4 = self.l_13(t_3)
t_5 = self.l_14(t_3)
t_7 = t_8.size()
t_6 = t_4.size()
t_2 = t_5.size()
t_7 = t_7[slice(None, (- 1), None)]
t_7 = (t_7 + (16, 64))
t_1 = t_7[0]
t_0 = t_7[1]
t_9 = t_7[2]
t_7 = t_7[3]
t_7 = t_8.view(t_1, t_0, t_9, t_7)
t_7 = t_7.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_9 = t_6[0]
t_0 = t_6[1]
t_1 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_9, t_0, t_1, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_1 = t_2[0]
t_0 = t_2[1]
t_9 = t_2[2]
t_2 = t_2[3]
t_2 = t_5.view(t_1, t_0, t_9, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_6 = t_6.transpose((- 1), (- 2))
t_6 = torch.matmul(t_7, t_6)
t_7 = math.sqrt(64)
t_7 = (t_6 / t_7)
t_7 = (t_7 + attention_mask)
t_7 = self.l_15(t_7)
t_7 = self.l_16(t_7)
t_2 = torch.matmul(t_7, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_2 = t_2.contiguous()
t_7 = t_2.size()
t_7 = t_7[slice(None, (- 2), None)]
t_7 = (t_7 + (1024,))
t_6 = t_7[0]
t_9 = t_7[1]
t_7 = t_7[2]
t_7 = t_2.view(t_6, t_9, t_7)
t_7 = self.l_17(t_7)
t_7 = self.l_18(t_7)
t_3 = (t_7 + t_3)
t_3 = self.l_19(t_3)
t_7 = self.l_20(t_3)
t_7 = torch.nn.functional.gelu(t_7)
t_7 = self.l_21(t_7)
t_7 = self.l_22(t_7)
t_3 = (t_7 + t_3)
t_3 = self.l_23(t_3)
t_7 = self.l_24(t_3)
t_9 = self.l_25(t_3)
t_6 = self.l_26(t_3)
t_2 = t_7.size()
t_0 = t_9.size()
t_1 = t_6.size()
t_2 = t_2[slice(None, (- 1), None)]
t_2 = (t_2 + (16, 64))
t_5 = t_2[0]
t_4 = t_2[1]
t_8 = t_2[2]
t_2 = t_2[3]
t_2 = t_7.view(t_5, t_4, t_8, t_2)
t_2 = t_2.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_8 = t_0[0]
t_4 = t_0[1]
t_5 = t_0[2]
t_0 = t_0[3]
t_0 = t_9.view(t_8, t_4, t_5, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_5 = t_1[0]
t_4 = t_1[1]
t_8 = t_1[2]
t_1 = t_1[3]
t_1 = t_6.view(t_5, t_4, t_8, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_0 = t_0.transpose((- 1), (- 2))
t_0 = torch.matmul(t_2, t_0)
t_2 = math.sqrt(64)
t_2 = (t_0 / t_2)
t_2 = (t_2 + attention_mask)
t_2 = self.l_27(t_2)
t_2 = self.l_28(t_2)
t_1 = torch.matmul(t_2, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_1 = t_1.contiguous()
t_2 = t_1.size()
t_2 = t_2[slice(None, (- 2), None)]
t_2 = (t_2 + (1024,))
t_0 = t_2[0]
t_8 = t_2[1]
t_2 = t_2[2]
t_2 = t_1.view(t_0, t_8, t_2)
t_2 = self.l_29(t_2)
t_2 = self.l_30(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_31(t_3)
t_2 = self.l_32(t_3)
t_2 = torch.nn.functional.gelu(t_2)
t_2 = self.l_33(t_2)
t_2 = self.l_34(t_2)
t_3 = (t_2 + t_3)
t_3 = self.l_35(t_3)
t_2 = self.l_36(t_3)
t_8 = self.l_37(t_3)
t_0 = self.l_38(t_3)
t_1 = t_2.size()
t_4 = t_8.size()
t_5 = t_0.size()
t_1 = t_1[slice(None, (- 1), None)]
t_1 = (t_1 + (16, 64))
t_6 = t_1[0]
t_9 = t_1[1]
t_7 = t_1[2]
t_1 = t_1[3]
t_1 = t_2.view(t_6, t_9, t_7, t_1)
t_1 = t_1.permute(0, 2, 1, 3)
t_4 = t_4[slice(None, (- 1), None)]
t_4 = (t_4 + (16, 64))
t_7 = t_4[0]
t_9 = t_4[1]
t_6 = t_4[2]
t_4 = t_4[3]
t_4 = t_8.view(t_7, t_9, t_6, t_4)
t_4 = t_4.permute(0, 2, 1, 3)
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_6 = t_5[0]
t_9 = t_5[1]
t_7 = t_5[2]
t_5 = t_5[3]
t_5 = t_0.view(t_6, t_9, t_7, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_4 = t_4.transpose((- 1), (- 2))
t_4 = torch.matmul(t_1, t_4)
t_1 = math.sqrt(64)
t_1 = (t_4 / t_1)
t_1 = (t_1 + attention_mask)
t_1 = self.l_39(t_1)
t_1 = self.l_40(t_1)
t_5 = torch.matmul(t_1, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_5 = t_5.contiguous()
t_1 = t_5.size()
t_1 = t_1[slice(None, (- 2), None)]
t_1 = (t_1 + (1024,))
t_4 = t_1[0]
t_7 = t_1[1]
t_1 = t_1[2]
t_1 = t_5.view(t_4, t_7, t_1)
t_1 = self.l_41(t_1)
t_1 = self.l_42(t_1)
t_3 = (t_1 + t_3)
t_3 = self.l_43(t_3)
t_1 = self.l_44(t_3)
t_1 = torch.nn.functional.gelu(t_1)
t_1 = self.l_45(t_1)
t_1 = self.l_46(t_1)
t_3 = (t_1 + t_3)
t_3 = self.l_47(t_3)
t_1 = self.l_48(t_3)
t_7 = self.l_49(t_3)
t_4 = self.l_50(t_3)
t_5 = t_1.size()
t_9 = t_7.size()
t_6 = t_4.size()
t_5 = t_5[slice(None, (- 1), None)]
t_5 = (t_5 + (16, 64))
t_0 = t_5[0]
t_8 = t_5[1]
t_2 = t_5[2]
t_5 = t_5[3]
t_5 = t_1.view(t_0, t_8, t_2, t_5)
t_5 = t_5.permute(0, 2, 1, 3)
t_9 = t_9[slice(None, (- 1), None)]
t_9 = (t_9 + (16, 64))
t_2 = t_9[0]
t_8 = t_9[1]
t_0 = t_9[2]
t_9 = t_9[3]
t_9 = t_7.view(t_2, t_8, t_0, t_9)
t_9 = t_9.permute(0, 2, 1, 3)
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_0 = t_6[0]
t_8 = t_6[1]
t_2 = t_6[2]
t_6 = t_6[3]
t_6 = t_4.view(t_0, t_8, t_2, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_9 = t_9.transpose((- 1), (- 2))
t_9 = torch.matmul(t_5, t_9)
t_5 = math.sqrt(64)
t_5 = (t_9 / t_5)
t_5 = (t_5 + attention_mask)
t_5 = self.l_51(t_5)
t_5 = self.l_52(t_5)
t_6 = torch.matmul(t_5, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_6 = t_6.contiguous()
t_5 = t_6.size()
t_5 = t_5[slice(None, (- 2), None)]
t_5 = (t_5 + (1024,))
t_9 = t_5[0]
t_2 = t_5[1]
t_5 = t_5[2]
t_5 = t_6.view(t_9, t_2, t_5)
t_5 = self.l_53(t_5)
t_5 = self.l_54(t_5)
t_3 = (t_5 + t_3)
t_3 = self.l_55(t_3)
t_5 = self.l_56(t_3)
t_5 = torch.nn.functional.gelu(t_5)
t_5 = self.l_57(t_5)
t_5 = self.l_58(t_5)
t_3 = (t_5 + t_3)
t_3 = self.l_59(t_3)
t_5 = self.l_60(t_3)
t_2 = self.l_61(t_3)
t_9 = self.l_62(t_3)
t_6 = t_5.size()
t_8 = t_2.size()
t_0 = t_9.size()
t_6 = t_6[slice(None, (- 1), None)]
t_6 = (t_6 + (16, 64))
t_4 = t_6[0]
t_7 = t_6[1]
t_1 = t_6[2]
t_6 = t_6[3]
t_6 = t_5.view(t_4, t_7, t_1, t_6)
t_6 = t_6.permute(0, 2, 1, 3)
t_8 = t_8[slice(None, (- 1), None)]
t_8 = (t_8 + (16, 64))
t_1 = t_8[0]
t_7 = t_8[1]
t_4 = t_8[2]
t_8 = t_8[3]
t_8 = t_2.view(t_1, t_7, t_4, t_8)
t_8 = t_8.permute(0, 2, 1, 3)
t_0 = t_0[slice(None, (- 1), None)]
t_0 = (t_0 + (16, 64))
t_4 = t_0[0]
t_7 = t_0[1]
t_1 = t_0[2]
t_0 = t_0[3]
t_0 = t_9.view(t_4, t_7, t_1, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_8 = t_8.transpose((- 1), (- 2))
t_8 = torch.matmul(t_6, t_8)
t_6 = math.sqrt(64)
t_6 = (t_8 / t_6)
t_6 = (t_6 + attention_mask)
t_6 = self.l_63(t_6)
t_6 = self.l_64(t_6)
t_0 = torch.matmul(t_6, t_0)
t_0 = t_0.permute(0, 2, 1, 3)
t_0 = t_0.contiguous()
t_6 = t_0.size()
t_6 = t_6[slice(None, (- 2), None)]
t_6 = (t_6 + (1024,))
t_8 = t_6[0]
t_1 = t_6[1]
t_6 = t_6[2]
t_6 = t_0.view(t_8, t_1, t_6)
t_6 = self.l_65(t_6)
t_6 = self.l_66(t_6)
t_3 = (t_6 + t_3)
t_3 = self.l_67(t_3)
t_6 = self.l_68(t_3)
t_6 = torch.nn.functional.gelu(t_6)
t_6 = self.l_69(t_6)
t_6 = self.l_70(t_6)
t_3 = (t_6 + t_3)
t_3 = self.l_71(t_3)
t_6 = self.l_72(t_3)
return list(flatten((t_3, t_6)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def get_logger(name: Optional[str]=None, level: str='INFO', rank_zero_only: bool=True, **kwargs) -> logging.Logger:
log = logging.getLogger(name)
from l2hmc.utils.rich import get_console, is_interactive
if rank_zero_only:
if (RANK != 0):
log.setLevel('CRITICAL')
else:
log.setLevel(level)
if (RANK == 0):
console = get_console(markup=True, redirect=(WORLD_SIZE > 1), **kwargs)
if console.is_jupyter:
console.is_jupyter = False
use_markup = ((WORLD_SIZE == 1) and (not is_interactive()))
log.addHandler(RichHandler(omit_repeated_times=False, level=level, console=console, show_time=True, show_level=True, show_path=True, markup=use_markup, enable_link_path=use_markup))
log.setLevel(level)
if ((len(log.handlers) > 1) and all(((i == log.handlers[0]) for i in log.handlers))):
log.handlers = [log.handlers[0]]
return log |
class _Constraint(ABC):
def __init__(self):
self.hidden = False
def is_satisfied_by(self, val):
def __str__(self): |
def getHistogramsWithMask(img, mask):
imgHsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
(h, s, v) = cv2.split(imgHsv)
(histH, _) = np.histogram(h, bins=NBINS, density=True, weights=mask)
(histS, _) = np.histogram(s, bins=NBINS, density=True, weights=mask)
(histV, _) = np.histogram(v, bins=NBINS, density=True, weights=mask)
imgGray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lbp = ft.local_binary_pattern(imgGray, 24, 3, 'uniform')
(histLBP, _) = np.histogram(lbp, bins=NBINS, density=True, weights=mask)
hist = np.concatenate((histH, histS, histV, histLBP))
return hist |
def unfreeze_by_patterns(module, patterns):
unfreeze_params = []
unfreeze_modules = []
for pattern in patterns:
if pattern.startswith('module:'):
unfreeze_modules.append(pattern[7:])
else:
unfreeze_params.append(pattern) |
def to_lean_description_aux(expr: Expression, local_vars: Dict[(int, str)]={}, context: Optional[LeanDescContext]=None) -> Tuple[(str, int)]:
div_var_startnum = (context.div_var_startnum if (context is not None) else 0)
if ((len(local_vars) == 0) and (context is not None)):
local_vars = context.local_vars
if isinstance(expr, ExprConst):
return ((str(expr.val) if (0 <= expr.val) else f'({str(expr.val)})'), div_var_startnum)
if isinstance(expr, ExprNeg):
(result, new_div_var_startnum) = to_lean_description_aux(expr.val, local_vars, context)
return (f'(-{result})', new_div_var_startnum)
if isinstance(expr, ExprCast):
return to_lean_description_aux(expr.expr, local_vars, context)
if isinstance(expr, ExprIdentifier):
name_sub = (context.name_sub if (context is not None) else {})
return (name_with_sub(expr.name, name_sub), div_var_startnum)
if isinstance(expr, ExprOperator):
(op1, new_div_var_startnum) = to_lean_description_aux(expr.a, local_vars, context)
simplifier = (context.simplifier if (context is not None) else None)
div_var_basename = (context.div_var_basename if (context is not None) else '_')
is_ddiv = ((expr.op == '/') and (get_const_div_inv(expr, simplifier) is None))
if is_ddiv:
div_var_name = f'{div_var_basename}{new_div_var_startnum}'
new_div_var_startnum += 1
if (context is not None):
context.div_var_startnum = new_div_var_startnum
(op2, new_div_var_startnum) = to_lean_description_aux(expr.b, local_vars, context)
if is_ddiv:
return (f'ddiv {op1} {op2} {div_var_name}', new_div_var_startnum)
if (expr.op == '/'):
div_simp = get_const_div_inv(expr, simplifier)
if ((div_simp is not None) and div_simp[1]):
return (f'({op1} : Z) / ({op2} : Z)', new_div_var_startnum)
return (f'{op1} / ({op2} : Z)', new_div_var_startnum)
return (f'{op1} {expr.op} {op2}', new_div_var_startnum)
if isinstance(expr, ExprPow):
(op1, new_div_var_startnum) = to_lean_description_aux(expr.a, local_vars, context)
if (context is not None):
context.div_var_startnum = new_div_var_startnum
(op2, new_div_var_startnum) = to_lean_description_aux(expr.b, local_vars, context)
return (f'{op1} ^ {op2}', new_div_var_startnum)
if isinstance(expr, ExprParentheses):
(result, new_div_var_startnum) = to_lean_description_aux(expr.val, local_vars, context)
return (f'({result})', new_div_var_startnum)
if isinstance(expr, ExprFuncCall):
assert ((context is None) or (context.cairo_type is None) or (isinstance(context.cairo_type, TypeStruct) and (expr.rvalue.func_ident.name == str(context.cairo_type.scope[(- 1):])))), 'Function call name does not match type.'
return to_obj_constructor(args=expr.rvalue.arguments, local_vars=local_vars, context=context)
if isinstance(expr, ExprTuple):
return to_obj_constructor(args=expr.members, local_vars=local_vars, context=context)
if isinstance(expr, ExprDeref):
reg_and_offset = get_reg_offset(expr.addr)
if (reg_and_offset is not None):
(reg, offset) = reg_and_offset
if ((reg == Register.FP) and (offset in local_vars)):
return (local_vars[offset], div_var_startnum)
return ('mem (.{}{})'.format(('fp' if (reg == Register.FP) else 'ap'), ('' if (offset == 0) else (f' + {offset}' if (0 < offset) else f' - {(- offset)}'))), div_var_startnum)
(result, new_div_var_startnum) = to_lean_description_aux(expr.addr, local_vars, context)
return (f'mem ({result})', new_div_var_startnum)
if isinstance(expr, ExprReg):
return (('.fp' if (expr.reg == Register.FP) else '.ap'), div_var_startnum)
if isinstance(expr, ExprAddressOf):
if isinstance(expr.expr, ExprSubscript):
(inner, _, new_div_var_startnum) = to_lean_subscript_inner_desc_and_cast(expr=expr.expr, local_vars=local_vars, context=context)
return (inner, new_div_var_startnum)
(sub_expr, new_div_var_startnum) = to_lean_description_aux(expr.expr, local_vars, context)
if sub_expr.startswith('mem ('):
addr_prefix = 'mem ('
addr_suffix = ')'
elif sub_expr.startswith('mem '):
addr_prefix = 'mem '
addr_suffix = ''
else:
raise Exception('Cannot determine address expression.')
if addr_suffix:
addr_expr = sub_expr[len(addr_prefix):(- len(addr_suffix))]
else:
addr_expr = sub_expr[len(addr_prefix):]
return (addr_expr, new_div_var_startnum)
if isinstance(expr, ExprSubscript):
(inner, cast, new_div_var_startnum) = to_lean_subscript_inner_desc_and_cast(expr=expr, local_vars=local_vars, context=context)
lean_expr = (f'{cast} (mem ({inner}))' if cast else f'mem ({inner})')
return (lean_expr, new_div_var_startnum)
if isinstance(expr, ExprDot):
(base_expr, new_div_var_startnum) = to_lean_description_aux(expr.expr, local_vars, context)
return (f'({base_expr}).{expr.member.name}', new_div_var_startnum)
if isinstance(expr, ExprHint):
if (context is not None):
for hint_var in context.hint_vars:
if (hint_var.expr == expr):
return ((LEAN_HINT_VAR_PREFIX + hint_var.identifier.identifier.name), div_var_startnum)
raise Exception('Failed to resolve hint variable.')
raise Exception('Unsupported expression type.') |
def find_trigger_distribution(model, data, num_triggers, threshold):
def generate_random_trigger():
pattern = (np.ones((3, 3, 3)) * 0.5)
return Trigger(model.name, pattern, target=0, type_=0)
pool = TriggerPool()
pool.add(find_trigger(model, data))
while (len(pool.success_triggers(threshold)) < num_triggers):
pool.test(model, data)
pool.expand(5)
print(('Found %d triggers in %d, threshold %.1f.' % (len(pool.success_triggers(threshold)), len(pool.triggers), float(threshold))))
return pool.success_triggers(threshold) |
('/ngsi-ld/v1/entityOperations/upsert', methods=['POST'])
def upsertNotification():
print(dir(request))
entities = request.get_json()
print(entities)
entity = entities[0]
print(entity['id'])
entityIdDict.append(entity['id'])
return 'Done' |
def validate_cr_cpf(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]:
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(cpf.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if (column != ''):
return df[column].apply(cpf.is_valid)
else:
return df.applymap(cpf.is_valid)
return cpf.is_valid(df) |
def test_should_explain_output(convolutional_model, random_data, mocker):
mocker.patch('tf_explain.core.integrated_gradients.grid_display', side_effect=(lambda x: x))
(images, labels) = random_data
explainer = IntegratedGradients()
grid = explainer.explain((images, labels), convolutional_model, 0)
assert (grid.shape == images.shape[:(- 1)]) |
class NpzFormat(Format):
def _can_read(self, request):
return (request.extension in self.extensions)
def _can_write(self, request):
return (request.extension in self.extensions)
class Reader(Format.Reader):
def _open(self):
self._npz = np.load(self.request.get_file())
assert isinstance(self._npz, np.lib.npyio.NpzFile)
sorter = (lambda x: x.split('_')[(- 1)])
self._names = sorted(self._npz.files, key=sorter)
def _close(self):
self._npz.close()
def _get_length(self):
return len(self._names)
def _get_data(self, index):
if ((index < 0) or (index >= len(self._names))):
raise IndexError('Index out of range while reading from nzp')
im = self._npz[self._names[index]]
return (im, {})
def _get_meta_data(self, index):
raise RuntimeError('The npz format does not support meta data.')
class Writer(Format.Writer):
def _open(self):
self._images = []
def _close(self):
np.savez_compressed(self.request.get_file(), *self._images)
def _append_data(self, im, meta):
self._images.append(im)
def set_meta_data(self, meta):
raise RuntimeError('The npz format does not support meta data.') |
class TestFPS(unittest.TestCase):
def setUp(self):
(self.X, _) = get_dataset(return_X_y=True)
self.idx = [0, 6, 1, 2, 4, 9, 3]
def test_restart(self):
selector = FPS(n_to_select=1, initialize=self.idx[0])
selector.fit(self.X)
for i in range(2, len(self.idx)):
selector.n_to_select = i
selector.fit(self.X, warm_start=True)
self.assertEqual(selector.selected_idx_[(i - 1)], self.idx[(i - 1)])
def test_initialize(self):
for initialize in [self.idx[0], 'random']:
with self.subTest(initialize=initialize):
selector = FPS(n_to_select=1, initialize=initialize)
selector.fit(self.X)
initialize = self.idx[:4]
with self.subTest(initialize=initialize):
selector = FPS(n_to_select=(len(self.idx) - 1), initialize=initialize)
selector.fit(self.X)
for i in range(4):
self.assertEqual(selector.selected_idx_[i], self.idx[i])
with self.assertRaises(ValueError) as cm:
selector = FPS(n_to_select=1, initialize='bad')
selector.fit(self.X)
self.assertEqual(str(cm.exception), 'Invalid value of the initialize parameter')
def test_get_distances(self):
selector = FPS(n_to_select=7)
selector.fit(self.X)
d = selector.get_select_distance()
dist_grad = (d[1:(- 1)] - d[2:])
self.assertTrue(all((dist_grad > 0)))
with self.assertRaises(NotFittedError):
selector = FPS(n_to_select=7)
_ = selector.get_select_distance() |
def _partial_powers(one_hot_encoded_row, Aadj_T, num_powers):
partial_power = tf.reshape(tf.sparse.to_dense(one_hot_encoded_row), shape=(1, Aadj_T.shape[1]))
partial_powers_list = []
for i in range(num_powers):
partial_power = K.transpose(K.dot(Aadj_T, K.transpose(partial_power)))
partial_powers_list.append(partial_power)
return K.squeeze(tf.stack(partial_powers_list, axis=1), axis=0) |
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer |
class BottleneckWithFixedBatchNorm(Bottleneck):
def __init__(self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1, dcn_config=None):
super(BottleneckWithFixedBatchNorm, self).__init__(in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=FrozenBatchNorm2d, dcn_config=dcn_config) |
def save_to_HDF5(settings, df):
list_training_features = [f'FLUXCAL_{f}' for f in settings.list_filters]
list_training_features += [f'FLUXCALERR_{f}' for f in settings.list_filters]
list_training_features += ['delta_time', 'HOSTGAL_PHOTOZ', 'HOSTGAL_PHOTOZ_ERR', 'HOSTGAL_SPECZ', 'HOSTGAL_SPECZ_ERR']
if settings.additional_train_var:
list_training_features += list(settings.additional_train_var)
list_misc_features = ['PEAKMJD', settings.sntype_var, 'mB', 'c', 'x1', 'SIM_REDSHIFT_CMB', 'SIM_PEAKMAG_z', 'SIM_PEAKMAG_g', 'SIM_PEAKMAG_r', 'SIM_PEAKMAG_i']
if (settings.photo_window_var not in list_misc_features):
list_misc_features += settings.photo_window_var
list_misc_features = [k for k in list_misc_features if (k in df.keys())]
assert (df.index.name == 'SNID'), 'Must set SNID as index'
ID = df.index.values
idx_change = (np.where((ID[1:] != ID[:(- 1)]))[0] + 1)
idx_change = np.hstack(([0], idx_change, [len(df)]))
list_start_end = [(s, e) for (s, e) in zip(idx_change[:(- 1)], idx_change[1:])]
if (not settings.data_testing):
list_start_end = list(filter((lambda x: ((x[1] - x[0]) >= 3)), list_start_end))
np.random.shuffle(list_start_end)
with h5py.File(settings.hdf5_file_name, 'w') as hf:
n_samples = len(list_start_end)
used = set()
unique_classes = [x for x in settings.sntypes.values() if ((x not in used) and (used.add(x) or True))]
list_classes = list(set([2, len(unique_classes)]))
list_names = ['target', 'dataset_photometry', 'dataset_saltfit']
start_idxs = [i[0] for i in list_start_end]
shuffled_ID = ID[start_idxs]
hf.create_dataset('SNID', data=shuffled_ID, dtype=h5py.special_dtype(vlen=str))
df_SNID = pd.DataFrame(shuffled_ID, columns=['SNID'])
logging_utils.print_green('Saving misc features')
for feat in list_misc_features:
if (feat == settings.sntype_var):
dtype = np.dtype('int32')
else:
dtype = np.dtype('float32')
hf.create_dataset(feat, data=df[feat].values[start_idxs], dtype=dtype)
df.drop(columns=feat, inplace=True)
logging_utils.print_green('Saving class')
for c_ in list_classes:
for name in list_names:
field_name = f'{name}_{c_}classes'
hf.create_dataset(field_name, data=df[field_name].values[start_idxs], dtype=np.dtype('int8'))
df.drop(columns=field_name, inplace=True)
df['time'] = df[['delta_time']].groupby(df.index).cumsum()
df = df.reset_index()
logging_utils.print_green('Saving unique nights')
for (offset, suffix) in zip(OFFSETS, OFFSETS_STR):
new_column = f'PEAKMJD{suffix}_unique_nights'
df_nights = df[(df['time'] < (df['PEAKMJDNORM'] + offset))][['PEAKMJDNORM', 'SNID']].groupby('SNID').count().astype(np.uint8).rename(columns={'PEAKMJDNORM': new_column}).reset_index()
hf.create_dataset(new_column, data=df_SNID.merge(df_nights, on='SNID', how='left')[new_column].values, dtype=np.dtype('uint8'))
logging_utils.print_green('Saving filter occurences')
for flt in settings.list_filters:
df[f'has_{flt}'] = df.FLT.str.contains(flt).astype(np.uint8)
for (offset, suffix) in zip(OFFSETS, OFFSETS_STR):
new_column = f'PEAKMJD{suffix}_num_{flt}'
df_flt = df[(df['time'] < (df['PEAKMJDNORM'] + offset))][[f'has_{flt}', 'SNID']].groupby('SNID').sum().astype(np.uint8).rename(columns={f'has_{flt}': new_column}).reset_index()
hf.create_dataset(new_column, data=df_SNID.merge(df_flt, on='SNID', how='left')[new_column].values, dtype=np.dtype('uint8'))
df.drop(columns=f'has_{flt}', inplace=True)
hf.create_dataset('PEAKMJDNORM', data=df['PEAKMJDNORM'].values[start_idxs], dtype=np.dtype('float32'))
cols_to_drop = [k for k in ['time', 'SNID', 'PEAKMJDNORM', settings.photo_window_var] if (k in df.keys())]
df.drop(columns=list(set(cols_to_drop)), inplace=True)
logging_utils.print_green('Compute normalizations')
gnorm = hf.create_group('normalizations')
for feat in settings.training_features_to_normalize:
log_standardized = log_standardization(df[feat].values)
gnorm.create_dataset(f'{feat}/min', data=log_standardized.arr_min)
gnorm.create_dataset(f'{feat}/mean', data=log_standardized.arr_mean)
gnorm.create_dataset(f'{feat}/std', data=log_standardized.arr_std)
logging_utils.print_green('Compute global normalizations')
gnorm = hf.create_group('normalizations_global')
flux_features = [f'FLUXCAL_{f}' for f in settings.list_filters]
flux_log_standardized = log_standardization(df[flux_features].values)
gnorm.create_dataset('FLUXCAL/min', data=flux_log_standardized.arr_min)
gnorm.create_dataset('FLUXCAL/mean', data=flux_log_standardized.arr_mean)
gnorm.create_dataset('FLUXCAL/std', data=flux_log_standardized.arr_std)
fluxerr_features = [f'FLUXCALERR_{f}' for f in settings.list_filters]
fluxerr_log_standardized = log_standardization(df[fluxerr_features].values)
gnorm.create_dataset('FLUXCALERR/min', data=fluxerr_log_standardized.arr_min)
gnorm.create_dataset('FLUXCALERR/mean', data=fluxerr_log_standardized.arr_mean)
gnorm.create_dataset('FLUXCALERR/std', data=fluxerr_log_standardized.arr_std)
logging_utils.print_green('Save non-data features to HDF5')
data_type = h5py.special_dtype(vlen=np.dtype('float32'))
hf.create_dataset('data', (n_samples,), dtype=data_type)
list_to_fill = [k for k in list_training_features if (k not in df.columns.values.tolist())]
if (len([k for k in list_to_fill if ('HOST' not in k)]) > 0):
logging_utils.print_red('missing information in input')
raise AttributeError
for key in list_to_fill:
df[key] = np.zeros(len(df))
logging_utils.print_green('Fit onehot on FLT')
assert (sorted(df.columns.values.tolist()) == sorted((list_training_features + ['FLT'])))
tmp = pd.concat([pd.Series(settings.list_filters_combination), df['FLT']])
tmp_onehot = pd.get_dummies(tmp)
FLT_onehot = tmp_onehot[len(settings.list_filters_combination):]
df = pd.concat([df[list_training_features], FLT_onehot], axis=1)
list_training_features = df.columns.values.tolist()
hf.create_dataset('features', (len(list_training_features),), dtype=h5py.special_dtype(vlen=str))
hf['features'][:] = list_training_features
logging_utils.print_green('Saved features:', ','.join(list_training_features))
logging_utils.print_green('Save data features to HDF5')
arr_feat = df[list_training_features].values
hf['data'].attrs['n_features'] = len(list_training_features)
for (idx, idx_pair) in enumerate(tqdm(list_start_end, desc='Filling hdf5', ncols=100)):
arr = arr_feat[idx_pair[0]:idx_pair[1]]
hf['data'][idx] = np.ravel(arr)
try:
hf['data_types_training'] = np.asarray(settings.data_types_training).astype(np.dtype('S100'))
except Exception:
hf['data_types_training'] = f'{settings.data_types_training}' |
def bpe_tokenizer(sentence):
tokens = sentence.strip().split()
tokens = [((w + '</w>') if (not w.endswith('')) else w) for w in tokens]
tokens = [w.replace('', '') for w in tokens]
return tokens |
def _set_file(path):
global _FILE_HANDLER
if osp.isfile(path):
backup_name = ((path + '.') + _get_time_str())
shutil.move(path, backup_name)
_logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name))
hdl = logging.FileHandler(filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
_logger.info(('Argv: ' + ' '.join(sys.argv))) |
def deterministic_index_select(x, dim, indices):
tensor_transpose = torch.transpose(x, 0, dim)
return tensor_transpose[indices].transpose(dim, 0) |
class _Sampler(nn.Module):
def __init__(self):
super(_Sampler, self).__init__()
def forward(self, input):
mu = input[0]
logvar = input[1]
std = logvar.mul(0.5).exp_()
if opt.cuda:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu) |
def test_rnns(experim_creator, control_creator, check_grad=True, verbose=False, seqLength=100, numLayers=1, inputSize=512, hiddenSize=512, miniBatch=64, device='cuda', seed=17):
creator_args = dict(seqLength=seqLength, numLayers=numLayers, inputSize=inputSize, hiddenSize=hiddenSize, miniBatch=miniBatch, device=device, seed=seed)
print('Setting up...')
control = control_creator(**creator_args)
experim = experim_creator(**creator_args)
assertEqual(experim.inputs, control.inputs)
assertEqual(experim.params, control.params)
print('Checking outputs...')
control_outputs = control.forward(*control.inputs)
experim_outputs = experim.forward(*experim.inputs)
assertEqual(experim_outputs, control_outputs)
print('Checking grads...')
assert (control.backward_setup is not None)
assert (experim.backward_setup is not None)
assert (control.backward is not None)
assert (experim.backward is not None)
control_backward_inputs = control.backward_setup(control_outputs, seed)
experim_backward_inputs = experim.backward_setup(experim_outputs, seed)
control.backward(*control_backward_inputs)
experim.backward(*experim_backward_inputs)
control_grads = [p.grad for p in control.params]
experim_grads = [p.grad for p in experim.params]
assertEqual(experim_grads, control_grads)
if verbose:
print(experim.forward.graph_for(*experim.inputs))
print('') |
def _triangulate(g, comb_emb):
if (not g.is_connected()):
raise NotImplementedError('_triangulate() only knows how to handle connected graphs')
if (g.order() < 3):
raise ValueError("a Graph with less than 3 vertices doesn't have any triangulation")
faces = g.faces(comb_emb)
edges_added = []
for face in faces:
new_face = []
if (len(face) < 3):
raise RuntimeError(('Triangulate method created face %s with < 3 edges.' % face))
if (len(face) == 3):
continue
elif (len(face) == 4):
(u, v, w, x) = (e[0] for e in face)
if ((w == u) or g.has_edge(w, u)):
(u, v, w, x) = (v, w, x, u)
new_face = (w, u)
comb_emb[w].insert(comb_emb[w].index(x), u)
comb_emb[u].insert(comb_emb[u].index(v), w)
g.add_edge(new_face)
edges_added.append(new_face)
else:
N = len(face)
i = 0
while (i < (N - 1)):
new_edge = (face[(i + 1)][1], face[i][0])
if (g.has_edge(new_edge) or (new_edge[0] == new_edge[1])):
new_face.append(face[i])
if (i == (N - 2)):
break
i += 1
continue
g.add_edge(new_edge)
edges_added.append(new_edge)
comb_emb[new_edge[0]].insert(comb_emb[new_edge[0]].index((face + new_face)[(i + 2)][1]), new_edge[1])
comb_emb[new_edge[1]].insert(comb_emb[new_edge[1]].index(face[i][1]), new_edge[0])
new_face.append((new_edge[1], new_edge[0]))
i += 2
if (i != N):
new_face.append(face[(- 1)])
faces.append(new_face)
return edges_added |
class MonolingualDataset(FairseqDataset):
def __init__(self, dataset, sizes, src_vocab, tgt_vocab=None, add_eos_for_other_targets=False, shuffle=False, targets=None, add_bos_token=False):
self.dataset = dataset
self.sizes = np.array(sizes)
self.vocab = src_vocab
self.tgt_vocab = (tgt_vocab or src_vocab)
self.add_eos_for_other_targets = add_eos_for_other_targets
self.shuffle = shuffle
self.add_bos_token = add_bos_token
assert ((targets is None) or all(((t in {'self', 'future', 'past'}) for t in targets))), "targets must be none or one of 'self', 'future', 'past'"
if ((targets is not None) and (len(targets) == 0)):
targets = None
self.targets = targets
def __getitem__(self, index):
if (self.targets is not None):
(source, future_target, past_target) = self.dataset[index]
(source, target) = self._make_source_target(source, future_target, past_target)
else:
source = self.dataset[index]
target = None
(source, target) = self._maybe_add_bos(source, target)
return {'id': index, 'source': source, 'target': target}
def __len__(self):
return len(self.dataset)
def _make_source_target(self, source, future_target, past_target):
if (self.targets is not None):
target = []
if (self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) and (source[(- 1)] != self.vocab.eos())):
source = torch.cat([source, source.new([self.vocab.eos()])])
if ('future' in self.targets):
future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])
if ('past' in self.targets):
past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[((- 2), None)]])
for t in self.targets:
if (t == 'self'):
target.append(source)
elif (t == 'future'):
target.append(future_target)
elif (t == 'past'):
target.append(past_target)
else:
raise Exception(('invalid target ' + t))
if (len(target) == 1):
target = target[0]
else:
target = future_target
return (source, self._filter_vocab(target))
def _maybe_add_bos(self, source, target):
if self.add_bos_token:
source = torch.cat([source.new([self.vocab.bos()]), source])
if (target is not None):
target = torch.cat([target.new([self.tgt_vocab.bos()]), target])
return (source, target)
def _filter_vocab(self, target):
if (len(self.tgt_vocab) != len(self.vocab)):
def _filter(target):
mask = target.ge(len(self.tgt_vocab))
if mask.any():
target[mask] = self.tgt_vocab.unk()
return target
if isinstance(target, list):
return [_filter(t) for t in target]
return _filter(target)
return target
def collater(self, samples):
return collate(samples, self.vocab.pad(), self.vocab.eos())
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)
def supports_prefetch(self):
return getattr(self.dataset, 'supports_prefetch', False)
def prefetch(self, indices):
self.dataset.prefetch(indices) |
def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=False):
if is_dist:
return batch_norm_dist_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay)
else:
return batch_norm_template(inputs, is_training, scope, [0, 1, 2, 3], bn_decay) |
_criterion('cross_entropy_acc')
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
target = target.view((- 1))
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if (not hasattr(lprobs, 'batch_first')):
logging.warning('ERROR: we need to know whether batch first for the net output; you need to set batch_first attribute for the return value of model.get_normalized_probs. Now, we assume this is true, but in the future, we will raise exception instead. ')
batch_first = getattr(lprobs, 'batch_first', True)
if (not batch_first):
lprobs = lprobs.transpose(0, 1)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=reduction)
return (lprobs, loss)
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view((- 1))
mask = (target != self.padding_idx)
correct = torch.sum((lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)))
total = torch.sum(mask)
sample_size = (sample['target'].size(0) if self.sentence_avg else sample['ntokens'])
logging_output = {'loss': utils.item(loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size, 'correct': utils.item(correct.data), 'total': utils.item(total.data), 'nframes': torch.sum(sample['net_input']['src_lengths']).item()}
return (sample_size, logging_output)
def forward(self, model, sample, reduction='sum', log_probs=True):
net_output = model(**sample['net_input'])
target = model.get_targets(sample, net_output)
(lprobs, loss) = self.compute_loss(model, net_output, target, reduction, log_probs)
(sample_size, logging_output) = self.get_logging_output(sample, target, lprobs, loss)
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
correct_sum = sum((log.get('correct', 0) for log in logging_outputs))
total_sum = sum((log.get('total', 0) for log in logging_outputs))
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
nframes = sum((log.get('nframes', 0) for log in logging_outputs))
agg_output = {'loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'nframes': nframes, 'sample_size': sample_size, 'acc': (((correct_sum * 100.0) / total_sum) if (total_sum > 0) else 0.0), 'correct': correct_sum, 'total': total_sum}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output |
.parametrize('dataset_class', [Sinusoid, Harmonic, SinusoidAndLine])
def test_toy_sample(dataset_class):
dataset = dataset_class(10, num_tasks=1000, noise_std=None)
task = dataset[0]
(input, target) = task[0]
assert isinstance(input, np.ndarray)
assert isinstance(target, np.ndarray)
assert (input.shape == (1,))
assert (target.shape == (1,)) |
def dedent(text, reindent=0):
from textwrap import dedent
text = dedent(text)
if (reindent > 0):
indent = (' ' * reindent)
text = '\n'.join([(indent + x) for x in text.split('\n')])
return text |
_start_docstrings(VISION_TEXT_DUAL_ENCODER_START_DOCSTRING)
class FlaxVisionTextDualEncoderModel(FlaxPreTrainedModel):
config_class = VisionTextDualEncoderConfig
module_class = FlaxVisionTextDualEncoderModule
def __init__(self, config: VisionTextDualEncoderConfig, input_shape: Optional[Tuple]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, _do_init: bool=True, **kwargs):
if (not _do_init):
raise ValueError('`FlaxVisionTextDualEncoderModel` cannot be created without initializing, `_do_init` must be `True`.')
if (input_shape is None):
input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3))
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict:
input_ids = jnp.zeros(input_shape[0], dtype='i4')
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[(- 1)]), input_shape[0])
token_type_ids = jnp.ones_like(input_ids)
attention_mask = jnp.ones_like(input_ids)
pixel_values = jax.random.normal(rng, input_shape[1])
(params_rng, dropout_rng) = jax.random.split(rng)
rngs = {'params': params_rng, 'dropout': dropout_rng}
random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids)['params']
if (params is not None):
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def __call__(self, input_ids, pixel_values, attention_mask=None, position_ids=None, token_type_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train: bool=False, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None):
output_attentions = (output_attentions if (output_attentions is not None) else self.config.output_attentions)
output_hidden_states = (output_hidden_states if (output_hidden_states is not None) else self.config.output_hidden_states)
return_dict = (return_dict if (return_dict is not None) else self.config.return_dict)
pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
if (position_ids is None):
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[(- 1)]), input_ids.shape)
if (token_type_ids is None):
token_type_ids = jnp.zeros_like(input_ids)
if (attention_mask is None):
attention_mask = jnp.ones_like(input_ids)
rngs = {}
if (dropout_rng is not None):
rngs['dropout'] = dropout_rng
return self.module.apply({'params': (params or self.params)}, jnp.array(input_ids, dtype='i4'), jnp.array(pixel_values, dtype=jnp.float32), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), jnp.array(token_type_ids, dtype='i4'), (not train), output_attentions, output_hidden_states, return_dict, rngs=rngs)
def get_text_features(self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train=False):
if (position_ids is None):
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[(- 1)]), input_ids.shape)
if (token_type_ids is None):
token_type_ids = jnp.zeros_like(input_ids)
if (attention_mask is None):
attention_mask = jnp.ones_like(input_ids)
rngs = {}
if (dropout_rng is not None):
rngs['dropout'] = dropout_rng
def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic):
text_outputs = module.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic)
pooled_output = text_outputs[1]
text_features = module.text_projection(pooled_output)
return text_features
return self.module.apply({'params': (params or self.params)}, jnp.array(input_ids, dtype='i4'), jnp.array(attention_mask, dtype='i4'), jnp.array(position_ids, dtype='i4'), jnp.array(token_type_ids, dtype='i4'), (not train), method=_get_features, rngs=rngs)
def get_image_features(self, pixel_values, params: dict=None, dropout_rng: jax.random.PRNGKey=None, train=False):
rngs = {}
if (dropout_rng is not None):
rngs['dropout'] = dropout_rng
def _get_features(module, pixel_values, deterministic):
vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic)
pooled_output = vision_outputs[1]
image_features = module.visual_projection(pooled_output)
return image_features
return self.module.apply({'params': (params or self.params)}, jnp.array(pixel_values, dtype=jnp.float32), (not train), method=_get_features, rngs=rngs)
def from_vision_text_pretrained(cls, vision_model_name_or_path: str=None, text_model_name_or_path: str=None, *model_args, **kwargs) -> FlaxPreTrainedModel:
kwargs_vision = {argument[len('vision_'):]: value for (argument, value) in kwargs.items() if argument.startswith('vision_')}
kwargs_text = {argument[len('text_'):]: value for (argument, value) in kwargs.items() if argument.startswith('text_')}
for key in kwargs_vision.keys():
del kwargs[('vision_' + key)]
for key in kwargs_text.keys():
del kwargs[('text_' + key)]
vision_model = kwargs_vision.pop('model', None)
if (vision_model is None):
if (vision_model_name_or_path is None):
raise ValueError('If `vision_model` is not defined as an argument, a `vision_model_name_or_path` has to be defined')
if ('config' not in kwargs_vision):
vision_config = AutoConfig.from_pretrained(vision_model_name_or_path)
if (vision_config.model_type == 'clip'):
kwargs_vision['config'] = vision_config.vision_config
vision_model = FlaxCLIPVisionModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision)
else:
kwargs_vision['config'] = vision_config
vision_model = FlaxAutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision)
text_model = kwargs_text.pop('model', None)
if (text_model is None):
if (text_model_name_or_path is None):
raise ValueError('If `text_model` is not defined as an argument, a `text_model_name_or_path` has to be defined')
if ('config' not in kwargs_text):
text_config = AutoConfig.from_pretrained(text_model_name_or_path)
kwargs_text['config'] = text_config
text_model = FlaxAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text)
dtype = kwargs.pop('dtype', jnp.float32)
config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_model.config, text_model.config, **kwargs)
model = cls(config, *model_args, dtype=dtype, **kwargs)
model.params['vision_model'] = vision_model.params
model.params['text_model'] = text_model.params
logger.warning("The projection layer and logit scale weights `[('visual_projection', 'kernel'), ('text_projection', 'kernel'), ('logit_scale',)]` are newly initialized. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.")
return model |
def _load_state_dict(model: nn.Module, model_url: str, progress: bool) -> None:
pattern = re.compile('^(.*denselayer\\d+\\.(?:norm|relu|conv))\\.((?:[12])\\.(?:weight|bias|running_mean|running_var))$')
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = (res.group(1) + res.group(2))
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict) |
_utils.in_tempdir
def test_dory_make_bgzf(location):
copy_dory_subset()
print('** running make_bgzf')
args = ['dory-subset.fa', '-o', 'reads.bgz']
assert (make_bgzf.main(args) == 0) |
def _read_and_preprocess_human_eval(target_path: str, num_train_instances: int, num_val_instances: int, num_test_instances: int) -> List[CodeInstance]:
problems = _read_human_eval(target_path)
instances = []
for (sample_idx, task_id) in enumerate(problems):
if (sample_idx < num_train_instances):
split = TRAIN_SPLIT
elif (sample_idx < (num_train_instances + num_val_instances)):
split = VALID_SPLIT
else:
split = TEST_SPLIT
instance = CodeInstance(input=Input(text=problems[task_id]['prompt']), references=[CodeReference(output=Output(text=problems[task_id]['canonical_solution']), test_cases=problems[task_id], tags=[CORRECT_TAG])], split=split)
instances.append(instance)
return instances |
class RansomwareClientServer(Server):
__is_botnet_enabled: bool = False
def supportBotnet(self, is_botnet_enabled: bool) -> RansomwareClientServer:
self.__is_botnet_enabled = is_botnet_enabled
return self
def install(self, node: Node):
node.appendStartCommand('rm -f /root/.bashrc && cd /bof && ./server &')
if self.__is_botnet_enabled:
node.addSoftware('git cmake python3-dev gcc g++ make python3-pip')
node.addBuildCommand('curl > /tmp/byob-requirements.txt'.format(BYOB_VERSION))
node.addBuildCommand('pip3 install -r /tmp/byob-requirements.txt')
node.addSoftware('systemctl')
node.addBuildCommand('pip3 uninstall pycryptodome Crypto -y && pip3 install pycryptodome Crypto')
node.addBuildCommand('pip3 install pysocks numpy typing_extensions')
node.setFile('/tmp/tmp/hello.txt', 'Hello\nThis is the target file.')
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'Ransomware client object.\n'
return out |
class ToTensor():
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if (np_img.ndim < 3):
np_img = np.expand_dims(np_img, axis=(- 1))
np_img = np.rollaxis(np_img, 2)
return torch.from_numpy(np_img).to(dtype=self.dtype) |
def delexicaliseReferenceNumber(sent, metadata):
domains = ['restaurant', 'hotel', 'attraction', 'train', 'taxi', 'hospital']
if metadata:
for domain in domains:
if metadata[domain]['book']['booked']:
for slot in metadata[domain]['book']['booked'][0]:
if (slot == 'reference'):
val = (((('[' + domain) + '_') + slot) + ']')
else:
val = (((('[' + domain) + '_') + slot) + ']')
key = normalize(metadata[domain]['book']['booked'][0][slot])
sent = ((' ' + sent) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
key = normalize(('#' + metadata[domain]['book']['booked'][0][slot]))
sent = ((' ' + sent) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
key = normalize(('ref#' + metadata[domain]['book']['booked'][0][slot]))
sent = ((' ' + sent) + ' ').replace(((' ' + key) + ' '), ((' ' + val) + ' '))
return sent |
def get_credentials(path: str) -> Dict[(str, str)]:
with open(path, 'r') as f:
credentials = {}
for line in f.readlines():
elt = line.replace(' ', '').replace('\n', '').split(':')
if (len(elt) == 2):
credentials[elt[0]] = elt[1].split('"')[1]
return credentials |
def prepare_query(filters):
if filters['outlets']:
outlets = filters['outlets'].split(',')
else:
outlets = ['Journal De Montreal', 'La Presse', 'Le Devoir', 'Le Droit', 'Radio Canada', 'TVA News']
if filters['doc_id_list']:
doc_id_list = [ObjectId(x.strip()) for x in filters['doc_id_list'].split(',')]
query = {'_id': {'$in': doc_id_list}}
else:
query = {'$and': (([{'outlet': {'$in': outlets}}, {'body': {'$ne': ''}}] + filters['date_filters']) + filters['other_filters'])}
return query |
def main():
test_track = 'Al James - Schoolboy Facination'
mus = musdb.DB(download=True)
track = [track for track in mus.tracks if (track.name == test_track)][0]
audio = torch.tensor(track.audio.T, dtype=torch.float32)
stft = model.STFT(n_fft=4096, n_hop=1024)
spec = model.Spectrogram(power=1, mono=False)
magnitude_spectrogram = spec(stft(audio[(None, ...)]))
torch.save(magnitude_spectrogram, 'Al James - Schoolboy Facination.spectrogram.pt') |
def test_dice_loss():
pred = torch.Tensor([[[(- 1000), (- 1000), (- 1000)], [(- 1000), (- 1000), (- 1000)], [(- 1000), (- 1000), (- 1000)]]])
target = torch.Tensor([[[0, 0, 0], [0, 0, 0], [0, 0, 0]]])
mask = torch.Tensor([[[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
pan_loss = losses.PANLoss()
dice_loss = pan_loss.dice_loss_with_logits(pred, target, mask)
assert np.allclose(dice_loss.item(), 0) |
def make_attrgetter(environment, attribute, postprocess=None, default=None):
attribute = _prepare_attribute_parts(attribute)
def attrgetter(item):
for part in attribute:
item = environment.getitem(item, part)
if (default and isinstance(item, Undefined)):
item = default
if (postprocess is not None):
item = postprocess(item)
return item
return attrgetter |
def get_prior_grad_EP_scalar(prior, ax, bx):
def A_func(bx):
return prior.scalar_log_partition(ax, bx)
grad_bx_A1 = numerical_1st_derivative(bx, A_func, EPSILON)
grad_bx_A2 = numerical_2nd_derivative(bx, A_func, EPSILON)
rx = prior.scalar_forward_mean(ax, bx)
vx = prior.scalar_forward_variance(ax, bx)
def A_func(ax):
return prior.scalar_log_partition(ax, bx)
grad_ax_A = numerical_1st_derivative(ax, A_func, EPSILON)
qx = (rx ** 2)
tx = (qx + vx)
return {'grad_bx_A1': grad_bx_A1, 'grad_bx_A2': grad_bx_A2, 'grad_ax_A': grad_ax_A, 'rx': rx, 'vx': vx, 'tx': tx, 'qx': qx} |
class VGroupByClause(object):
def __init__(self):
self.fields = None
self.field_aggregation_ops = None
self.field_distincts = None
self.field_arithmetic_ops = None |
.parametrize('csr_container', CSR_CONTAINERS)
def test_linearsvc_iris(csr_container):
iris_data_sp = csr_container(iris.data)
sp_clf = svm.LinearSVC(dual='auto', random_state=0).fit(iris_data_sp, iris.target)
clf = svm.LinearSVC(dual='auto', random_state=0).fit(iris.data, iris.target)
assert (clf.fit_intercept == sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
pred = np.argmax(sp_clf.decision_function(iris_data_sp), axis=1)
assert_allclose(pred, clf.predict(iris.data))
clf.sparsify()
assert_array_equal(pred, clf.predict(iris_data_sp))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris_data_sp)) |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_path')
parser.add_argument('from_dir')
parser.add_argument('to_dir')
parser.add_argument('--min_num_chars', default=500, type=int)
parser.add_argument('--max_num_chars', default=2000, type=int)
parser.add_argument('--sample_ratio', default=1.0, type=float)
parser.add_argument('--docs_per_file', default=1000, type=int)
parser.add_argument('--seed', default=29, type=int)
parser.add_argument('--concat', default=False, action='store_true')
return parser.parse_args() |
def get_inpatient_admission_discharge_times(patient: Patient, ontology: extension_datasets.Ontology) -> List[Tuple[(datetime.datetime, datetime.datetime)]]:
events: List[Event] = get_inpatient_admission_events(patient, ontology)
times: List[Tuple[(datetime.datetime, datetime.datetime)]] = []
for e in events:
if (e.end is None):
raise RuntimeError(f'Event {e} cannot have `None` as its `end` attribute.')
if (e.start > e.end):
raise RuntimeError(f'Event {e} cannot have `start` after `end`.')
times.append((e.start, e.end))
return times |
class Generator():
def __init__(self, depths=[1024, 512, 256, 128], s_size=4):
self.depths = (depths + [3])
self.s_size = s_size
self.reuse = False
def __call__(self, inputs, training=False):
inputs = tf.convert_to_tensor(inputs)
with tf.variable_scope('g', reuse=self.reuse):
with tf.variable_scope('reshape'):
outputs = tf.layers.dense(inputs, ((self.depths[0] * self.s_size) * self.s_size))
outputs = tf.reshape(outputs, [(- 1), self.s_size, self.s_size, self.depths[0]])
outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=training), name='outputs')
with tf.variable_scope('deconv1'):
outputs = tf.layers.conv2d_transpose(outputs, self.depths[1], [5, 5], strides=(2, 2), padding='SAME')
outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=training), name='outputs')
with tf.variable_scope('deconv2'):
outputs = tf.layers.conv2d_transpose(outputs, self.depths[2], [5, 5], strides=(2, 2), padding='SAME')
outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=training), name='outputs')
with tf.variable_scope('deconv3'):
outputs = tf.layers.conv2d_transpose(outputs, self.depths[3], [5, 5], strides=(2, 2), padding='SAME')
outputs = tf.nn.relu(tf.layers.batch_normalization(outputs, training=training), name='outputs')
with tf.variable_scope('deconv4'):
outputs = tf.layers.conv2d_transpose(outputs, self.depths[4], [5, 5], strides=(2, 2), padding='SAME')
with tf.variable_scope('tanh'):
outputs = tf.tanh(outputs, name='outputs')
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='g')
return outputs |
_method('Intracomm', 'Isend')
def _intracomm_isend(pv: 'ProgramVisitor', sdfg: SDFG, state: SDFGState, icomm: 'Intracomm', buffer: str, dst: Union[(str, sp.Expr, Number)], tag: Union[(str, sp.Expr, Number)]):
from mpi4py import MPI
(icomm_name, icomm_obj) = icomm
if (icomm_obj != MPI.COMM_WORLD):
raise ValueError('Only the mpi4py.MPI.COMM_WORLD Intracomm is supported in DaCe Python programs.')
(req, _) = sdfg.add_array('isend_req', [1], dace.dtypes.opaque('MPI_Request'), transient=True, find_new_name=True)
_isend(pv, sdfg, state, buffer, dst, tag, req)
return req |
def createExecutableFile(data):
with open('test.bin', 'wb') as f:
f.write(data)
os.chmod('test.bin', 493)
os.system('test.bin') |
def test_flatten_labels_2():
y = pd.DataFrame({'Product': ['Debt collection', 'Checking or savings account'], 'Sub-product': ['I do not know', 'Checking account']})
separator = ','
flat_y = flatten_labels(y, separator)
ground_truth = pd.Series(['Debt collection,I do not know', 'Checking or savings account,Checking account'])
assert_series_equal(ground_truth, flat_y) |
_level_function()
def from_regular(array, axis=1, *, highlevel=True, behavior=None, attrs=None):
(yield (array,))
return _impl(array, axis, highlevel, behavior, attrs) |
def save_object(obj, filename):
with open(filename, 'wb') as output:
pickle.dump(obj, output, 2) |
def run(verbose=0, model_version=None, coref_models=[], data_dir=None, exp_dir=None, do_preprocess_train=False, do_preprocess_eval=False, force=False, **kwargs):
args = AttrDict(kwargs)
exp_dir = Path(exp_dir)
logging.getLogger('steppy').setLevel(logging.INFO)
if (verbose == 0):
logging.getLogger('steppy').setLevel(logging.WARNING)
if (do_preprocess_train or do_preprocess_eval):
if (do_preprocess_train and force):
shutil.rmtree((exp_dir / 'data_pipeline'), ignore_errors=True)
if (do_preprocess_eval and force):
shutil.rmtree(((exp_dir / 'data_pipeline') / 'test'), ignore_errors=True)
if (model_version == 'grep'):
coref_models_ = init_coref_models(coref_models)
else:
coref_models_ = []
else:
coref_models_ = {name: None for name in coref_models}
annotate_coref_mentions = pretrained_proref = (model_version == 'grep')
(X_trn, X_val, X_tst, X_neither, X_inference) = init_data(data_dir, exp_dir, persist=True, sanitize_labels=args.sanitize_labels, annotate_coref_mentions=annotate_coref_mentions, pretrained_proref=pretrained_proref, coref_models=coref_models_, test_path=args.test_path, verbose=verbose)
if (args.do_train or args.do_eval):
n_gpu = torch.cuda.device_count()
n_samples = 0
if (n_gpu == 4):
n_samples = 3
if (n_gpu == 8):
n_samples = 8
if args.do_kaggle:
res = Model().ensembled_lms(fit_fold, pd.concat([X_trn, X_val, X_tst, X_neither, X_neither.head(n_samples)]).reset_index(drop=True), None, X_tst=X_inference, seeds=args.seeds, n_folds=args.n_folds, lms=args.lms, exp_dir=exp_dir, sub_sample_path=args.sub_sample_path, verbose=verbose, parameters={'do_train': args.do_train, 'do_eval': args.do_eval, 'max_seq_length': args.max_seq_length, 'train_batch_size': args.train_batch_size, 'eval_batch_size': args.eval_batch_size, 'learning_rate': args.learning_rate, 'num_train_epochs': args.num_train_epochs, 'patience': args.patience, 'model_version': model_version, 'n_coref_models': len(coref_models)})
else:
if args.test_path:
X_tst = X_inference
res = Model().train_evaluate(fit_fold, X_trn, X_val, X_tst=X_tst, seed=args.seeds[0], lm=args.lms[0], exp_dir=exp_dir, sub_sample_path=args.sub_sample_path, test_path=args.test_path, verbose=verbose, parameters={'do_train': args.do_train, 'do_eval': args.do_eval, 'max_seq_length': args.max_seq_length, 'train_batch_size': args.train_batch_size, 'eval_batch_size': args.eval_batch_size, 'learning_rate': args.learning_rate, 'num_train_epochs': args.num_train_epochs, 'patience': args.patience, 'model_version': model_version, 'n_coref_models': len(coref_models)})
return res |
def _set_opset_version(opset_version):
global _export_onnx_opset_version
if (opset_version == _default_onnx_opset_version):
_export_onnx_opset_version = opset_version
return
if (opset_version in (_onnx_stable_opsets + [_onnx_master_opset])):
_export_onnx_opset_version = opset_version
return
raise ValueError(('Unsupported ONNX opset version: ' + str(opset_version))) |
def test_inheritance_modifier():
cluster = generate_test_cluster('tests.fixtures.cluster.inheritance')
from tests.fixtures.cluster.inheritance import Bar
from tests.fixtures.cluster.inheritance import Foo
assert (len(cluster.get_modifiers_for(cluster.type_system.convert_type_hint(Bar))) == 2)
assert (len(cluster.get_modifiers_for(cluster.type_system.convert_type_hint(Foo))) == 1) |
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--load_pretrain', type=str, default='./checkpoints/label2face_512p', help='load the pretrained model from the specified location')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=5e-05, help='initial learning rate for adam')
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
self.isTrain = True |
def prep_image_for_return(image):
image = ((image / 2) + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
image = (image[0] * 255).round().astype('uint8')
image = Image.fromarray(image)
return image |
def test_warning_valid_index_empty() -> None:
valid_index = [[]]
with pytest.warns(UserWarning, match='.*At least one sequence is empty*'):
find_lambda_control_star(r_hat, valid_index, lambdas) |
def test_UnknownType():
assert (str(ak.types.unknowntype.UnknownType()) == 'unknown')
with pytest.raises(TypeError):
ak.types.unknowntype.UnknownType(parameters={'x': 123})
with pytest.raises(TypeError):
ak.types.unknowntype.UnknownType(parameters={'__categorical__': True})
assert (repr(ak.types.unknowntype.UnknownType()) == 'UnknownType()') |
class TNEANetAStrI(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_snap.TNEANetAStrI_swiginit(self, _snap.new_TNEANetAStrI(*args))
def Next(self):
return _snap.TNEANetAStrI_Next(self)
def __lt__(self, I):
return _snap.TNEANetAStrI___lt__(self, I)
def __eq__(self, I):
return _snap.TNEANetAStrI___eq__(self, I)
def GetDat(self):
return _snap.TNEANetAStrI_GetDat(self)
def IsDeleted(self):
return _snap.TNEANetAStrI_IsDeleted(self)
__swig_destroy__ = _snap.delete_TNEANetAStrI |
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, feature_dim=128):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
self.reshape = torch.nn.Sequential(nn.Linear(((cardinality * bottleneck_width) * 8), 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True))
def _make_layer(self, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width)
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.reshape(out)
return F.normalize(out) |
def build_resnet_fpn_backbone(cfg):
body = resnet.ResNet(cfg)
in_channels_stage2 = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.BACKBONE_OUT_CHANNELS
fpn = fpn_module.FPN(in_channels_list=[in_channels_stage2, (in_channels_stage2 * 2), (in_channels_stage2 * 4), (in_channels_stage2 * 8)], out_channels=out_channels, conv_block=conv_with_kaiming_uniform(cfg.MODEL.FPN.USE_GN, cfg.MODEL.FPN.USE_RELU))
model = nn.Sequential(OrderedDict([('body', body), ('fpn', fpn)]))
model.out_channels = out_channels
model.is_3d = False
return model |
class AdamWeightDecay(tf.keras.optimizers.Adam):
def __init__(self, learning_rate: Union[(float, tf.keras.optimizers.schedules.LearningRateSchedule)]=0.001, beta_1: float=0.9, beta_2: float=0.999, epsilon: float=1e-07, amsgrad: bool=False, weight_decay_rate: float=0.0, include_in_weight_decay: Optional[List[str]]=None, exclude_from_weight_decay: Optional[List[str]]=None, name: str='AdamWeightDecay', **kwargs):
super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
def from_config(cls, config):
custom_objects = {'WarmUp': WarmUp}
return super(AdamWeightDecay, cls).from_config(config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]['weight_decay_rate'] = tf.constant(self.weight_decay_rate, name='adam_weight_decay_rate')
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(((learning_rate * var) * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate']), use_locking=self._use_locking)
return tf.no_op()
def apply_gradients(self, grads_and_vars, name=None, **kwargs):
(grads, tvars) = list(zip(*grads_and_vars))
return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), name=name, **kwargs)
def _get_lr(self, var_device, var_dtype, apply_state):
if (apply_state is None):
return (self._decayed_lr_t[var_dtype], {})
apply_state = (apply_state or {})
coefficients = apply_state.get((var_device, var_dtype))
if (coefficients is None):
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return (coefficients['lr_t'], dict(apply_state=apply_state))
def _resource_apply_dense(self, grad, var, apply_state=None):
(lr_t, kwargs) = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_dense(grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
(lr_t, kwargs) = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_sparse(grad, var, indices, **kwargs)
def get_config(self):
config = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate})
return config
def _do_use_weight_decay(self, param_name):
if (self.weight_decay_rate == 0):
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if (re.search(r, param_name) is not None):
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if (re.search(r, param_name) is not None):
return False
return True |
def register_Ns3Icmpv6OptionLinkLayerAddress_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv6OptionLinkLayerAddress const &', 'arg0')])
cls.add_constructor([param('bool', 'source')])
cls.add_constructor([param('bool', 'source'), param('ns3::Address', 'addr')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetAddress', 'ns3::Address', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetAddress', 'void', [param('ns3::Address', 'addr')])
return |
def scale_enum(anchor, scales):
(w, h, x_ctr, y_ctr) = whctrs(anchor)
ws = (w * scales)
hs = (h * scales)
anchors = mkanchors(ws, hs, x_ctr, y_ctr)
return anchors |
class TTable(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
__repr__ = _swig_repr
def SetMP(Value):
return _snap.TTable_SetMP(Value)
SetMP = staticmethod(SetMP)
def GetMP():
return _snap.TTable_GetMP()
GetMP = staticmethod(GetMP)
def NormalizeColName(ColName):
return _snap.TTable_NormalizeColName(ColName)
NormalizeColName = staticmethod(NormalizeColName)
def NormalizeColNameV(Cols):
return _snap.TTable_NormalizeColNameV(Cols)
NormalizeColNameV = staticmethod(NormalizeColNameV)
def AddIntCol(self, ColName):
return _snap.TTable_AddIntCol(self, ColName)
def AddFltCol(self, ColName):
return _snap.TTable_AddFltCol(self, ColName)
def AddStrCol(self, ColName):
return _snap.TTable_AddStrCol(self, ColName)
def GroupByIntColMP(self, GroupBy, Grouping, UsePhysicalIds=True):
return _snap.TTable_GroupByIntColMP(self, GroupBy, Grouping, UsePhysicalIds)
def __init__(self, *args):
_snap.TTable_swiginit(self, _snap.new_TTable(*args))
def New(*args):
return _snap.TTable_New(*args)
New = staticmethod(New)
def LoadSS(*args):
return _snap.TTable_LoadSS(*args)
LoadSS = staticmethod(LoadSS)
def SaveSS(self, OutFNm):
return _snap.TTable_SaveSS(self, OutFNm)
def SaveBin(self, OutFNm):
return _snap.TTable_SaveBin(self, OutFNm)
def Load(SIn, Context):
return _snap.TTable_Load(SIn, Context)
Load = staticmethod(Load)
def Save(self, SOut):
return _snap.TTable_Save(self, SOut)
def Dump(self, *args):
return _snap.TTable_Dump(self, *args)
def TableFromHashMap(*args):
return _snap.TTable_TableFromHashMap(*args)
TableFromHashMap = staticmethod(TableFromHashMap)
def AddRow(self, Row):
return _snap.TTable_AddRow(self, Row)
def GetContext(self):
return _snap.TTable_GetContext(self)
def ChangeContext(self, Context):
return _snap.TTable_ChangeContext(self, Context)
def GetColIdx(self, ColName):
return _snap.TTable_GetColIdx(self, ColName)
def GetIntVal(self, ColName, RowIdx):
return _snap.TTable_GetIntVal(self, ColName, RowIdx)
def GetFltVal(self, ColName, RowIdx):
return _snap.TTable_GetFltVal(self, ColName, RowIdx)
def GetStrVal(self, ColName, RowIdx):
return _snap.TTable_GetStrVal(self, ColName, RowIdx)
def GetStrMapById(self, ColIdx, RowIdx):
return _snap.TTable_GetStrMapById(self, ColIdx, RowIdx)
def GetStrMapByName(self, ColName, RowIdx):
return _snap.TTable_GetStrMapByName(self, ColName, RowIdx)
def GetStrValById(self, ColIdx, RowIdx):
return _snap.TTable_GetStrValById(self, ColIdx, RowIdx)
def GetStrValByName(self, ColName, RowIdx):
return _snap.TTable_GetStrValByName(self, ColName, RowIdx)
def GetIntRowIdxByVal(self, ColName, Val):
return _snap.TTable_GetIntRowIdxByVal(self, ColName, Val)
def GetStrRowIdxByMap(self, ColName, Map):
return _snap.TTable_GetStrRowIdxByMap(self, ColName, Map)
def GetFltRowIdxByVal(self, ColName, Val):
return _snap.TTable_GetFltRowIdxByVal(self, ColName, Val)
def RequestIndexInt(self, ColName):
return _snap.TTable_RequestIndexInt(self, ColName)
def RequestIndexFlt(self, ColName):
return _snap.TTable_RequestIndexFlt(self, ColName)
def RequestIndexStrMap(self, ColName):
return _snap.TTable_RequestIndexStrMap(self, ColName)
def GetStr(self, KeyId):
return _snap.TTable_GetStr(self, KeyId)
def GetIntValAtRowIdx(self, ColIdx, RowIdx):
return _snap.TTable_GetIntValAtRowIdx(self, ColIdx, RowIdx)
def GetFltValAtRowIdx(self, ColIdx, RowIdx):
return _snap.TTable_GetFltValAtRowIdx(self, ColIdx, RowIdx)
def GetSchema(self, *args):
return _snap.TTable_GetSchema(self, *args)
def ToGraphSequence(self, *args):
return _snap.TTable_ToGraphSequence(self, *args)
def ToVarGraphSequence(self, SplitAttr, AggrPolicy, SplitIntervals):
return _snap.TTable_ToVarGraphSequence(self, SplitAttr, AggrPolicy, SplitIntervals)
def ToGraphPerGroup(self, GroupAttr, AggrPolicy):
return _snap.TTable_ToGraphPerGroup(self, GroupAttr, AggrPolicy)
def ToGraphSequenceIterator(self, *args):
return _snap.TTable_ToGraphSequenceIterator(self, *args)
def ToVarGraphSequenceIterator(self, SplitAttr, AggrPolicy, SplitIntervals):
return _snap.TTable_ToVarGraphSequenceIterator(self, SplitAttr, AggrPolicy, SplitIntervals)
def ToGraphPerGroupIterator(self, GroupAttr, AggrPolicy):
return _snap.TTable_ToGraphPerGroupIterator(self, GroupAttr, AggrPolicy)
def NextGraphIterator(self):
return _snap.TTable_NextGraphIterator(self)
def IsLastGraphOfSequence(self):
return _snap.TTable_IsLastGraphOfSequence(self)
def GetSrcCol(self):
return _snap.TTable_GetSrcCol(self)
def SetSrcCol(self, Src):
return _snap.TTable_SetSrcCol(self, Src)
def GetDstCol(self):
return _snap.TTable_GetDstCol(self)
def SetDstCol(self, Dst):
return _snap.TTable_SetDstCol(self, Dst)
def AddEdgeAttr(self, *args):
return _snap.TTable_AddEdgeAttr(self, *args)
def AddSrcNodeAttr(self, *args):
return _snap.TTable_AddSrcNodeAttr(self, *args)
def AddDstNodeAttr(self, *args):
return _snap.TTable_AddDstNodeAttr(self, *args)
def AddNodeAttr(self, *args):
return _snap.TTable_AddNodeAttr(self, *args)
def SetCommonNodeAttrs(self, SrcAttr, DstAttr, CommonAttrName):
return _snap.TTable_SetCommonNodeAttrs(self, SrcAttr, DstAttr, CommonAttrName)
def GetSrcNodeIntAttrV(self):
return _snap.TTable_GetSrcNodeIntAttrV(self)
def GetDstNodeIntAttrV(self):
return _snap.TTable_GetDstNodeIntAttrV(self)
def GetEdgeIntAttrV(self):
return _snap.TTable_GetEdgeIntAttrV(self)
def GetSrcNodeFltAttrV(self):
return _snap.TTable_GetSrcNodeFltAttrV(self)
def GetDstNodeFltAttrV(self):
return _snap.TTable_GetDstNodeFltAttrV(self)
def GetEdgeFltAttrV(self):
return _snap.TTable_GetEdgeFltAttrV(self)
def GetSrcNodeStrAttrV(self):
return _snap.TTable_GetSrcNodeStrAttrV(self)
def GetDstNodeStrAttrV(self):
return _snap.TTable_GetDstNodeStrAttrV(self)
def GetEdgeStrAttrV(self):
return _snap.TTable_GetEdgeStrAttrV(self)
def GetNodeTable(Network, Context):
return _snap.TTable_GetNodeTable(Network, Context)
GetNodeTable = staticmethod(GetNodeTable)
def GetEdgeTable(Network, Context):
return _snap.TTable_GetEdgeTable(Network, Context)
GetEdgeTable = staticmethod(GetEdgeTable)
def GetEdgeTablePN(Network, Context):
return _snap.TTable_GetEdgeTablePN(Network, Context)
GetEdgeTablePN = staticmethod(GetEdgeTablePN)
def GetFltNodePropertyTable(Network, Property, NodeAttrName, NodeAttrType, PropertyAttrName, Context):
return _snap.TTable_GetFltNodePropertyTable(Network, Property, NodeAttrName, NodeAttrType, PropertyAttrName, Context)
GetFltNodePropertyTable = staticmethod(GetFltNodePropertyTable)
def GetColType(self, ColName):
return _snap.TTable_GetColType(self, ColName)
def GetNumRows(self):
return _snap.TTable_GetNumRows(self)
def GetNumValidRows(self):
return _snap.TTable_GetNumValidRows(self)
def GetRowIdMap(self):
return _snap.TTable_GetRowIdMap(self)
def BegRI(self):
return _snap.TTable_BegRI(self)
def EndRI(self):
return _snap.TTable_EndRI(self)
def BegRIWR(self):
return _snap.TTable_BegRIWR(self)
def EndRIWR(self):
return _snap.TTable_EndRIWR(self)
def GetPartitionRanges(self, Partitions, NumPartitions):
return _snap.TTable_GetPartitionRanges(self, Partitions, NumPartitions)
def Rename(self, Column, NewLabel):
return _snap.TTable_Rename(self, Column, NewLabel)
def Unique(self, *args):
return _snap.TTable_Unique(self, *args)
def Select(self, *args):
return _snap.TTable_Select(self, *args)
def Classify(self, Predicate, LabelName, PositiveLabel=1, NegativeLabel=0):
return _snap.TTable_Classify(self, Predicate, LabelName, PositiveLabel, NegativeLabel)
def SelectAtomic(self, *args):
return _snap.TTable_SelectAtomic(self, *args)
def ClassifyAtomic(self, Col1, Col2, Cmp, LabelName, PositiveLabel=1, NegativeLabel=0):
return _snap.TTable_ClassifyAtomic(self, Col1, Col2, Cmp, LabelName, PositiveLabel, NegativeLabel)
def SelectAtomicConst(self, Col, Val, Cmp, SelectedRows, SelectedTable, Remove=True, Table=True):
return _snap.TTable_SelectAtomicConst(self, Col, Val, Cmp, SelectedRows, SelectedTable, Remove, Table)
def SelectAtomicIntConst(self, *args):
return _snap.TTable_SelectAtomicIntConst(self, *args)
def SelectAtomicStrConst(self, *args):
return _snap.TTable_SelectAtomicStrConst(self, *args)
def SelectAtomicFltConst(self, *args):
return _snap.TTable_SelectAtomicFltConst(self, *args)
def Group(self, GroupBy, GroupColName, Ordered=True, UsePhysicalIds=True):
return _snap.TTable_Group(self, GroupBy, GroupColName, Ordered, UsePhysicalIds)
def Count(self, CountColName, Col):
return _snap.TTable_Count(self, CountColName, Col)
def Order(self, *args):
return _snap.TTable_Order(self, *args)
def Aggregate(self, GroupByAttrs, AggOp, ValAttr, ResAttr, Ordered=True):
return _snap.TTable_Aggregate(self, GroupByAttrs, AggOp, ValAttr, ResAttr, Ordered)
def AggregateCols(self, AggrAttrs, AggOp, ResAttr):
return _snap.TTable_AggregateCols(self, AggrAttrs, AggOp, ResAttr)
def SpliceByGroup(self, GroupByAttrs, Ordered=True):
return _snap.TTable_SpliceByGroup(self, GroupByAttrs, Ordered)
def Join(self, *args):
return _snap.TTable_Join(self, *args)
def ThresholdJoin(self, KeyCol1, JoinCol1, Table, KeyCol2, JoinCol2, Threshold, PerJoinKey=False):
return _snap.TTable_ThresholdJoin(self, KeyCol1, JoinCol1, Table, KeyCol2, JoinCol2, Threshold, PerJoinKey)
def SelfJoin(self, Col):
return _snap.TTable_SelfJoin(self, Col)
def SelfSimJoin(self, Cols, DistanceColName, SimType, Threshold):
return _snap.TTable_SelfSimJoin(self, Cols, DistanceColName, SimType, Threshold)
def SelfSimJoinPerGroup(self, *args):
return _snap.TTable_SelfSimJoinPerGroup(self, *args)
def SimJoin(self, Cols1, Table, Cols2, DistanceColName, SimType, Threshold):
return _snap.TTable_SimJoin(self, Cols1, Table, Cols2, DistanceColName, SimType, Threshold)
def SelectFirstNRows(self, N):
return _snap.TTable_SelectFirstNRows(self, N)
def Defrag(self):
return _snap.TTable_Defrag(self)
def StoreIntCol(self, ColName, ColVals):
return _snap.TTable_StoreIntCol(self, ColName, ColVals)
def StoreFltCol(self, ColName, ColVals):
return _snap.TTable_StoreFltCol(self, ColName, ColVals)
def StoreStrCol(self, ColName, ColVals):
return _snap.TTable_StoreStrCol(self, ColName, ColVals)
def UpdateFltFromTable(self, KeyAttr, UpdateAttr, Table, FKeyAttr, ReadAttr, DefaultFltVal=0.0):
return _snap.TTable_UpdateFltFromTable(self, KeyAttr, UpdateAttr, Table, FKeyAttr, ReadAttr, DefaultFltVal)
def UpdateFltFromTableMP(self, KeyAttr, UpdateAttr, Table, FKeyAttr, ReadAttr, DefaultFltVal=0.0):
return _snap.TTable_UpdateFltFromTableMP(self, KeyAttr, UpdateAttr, Table, FKeyAttr, ReadAttr, DefaultFltVal)
def SetFltColToConstMP(self, UpdateColIdx, DefaultFltVal):
return _snap.TTable_SetFltColToConstMP(self, UpdateColIdx, DefaultFltVal)
def Union(self, *args):
return _snap.TTable_Union(self, *args)
def UnionAll(self, *args):
return _snap.TTable_UnionAll(self, *args)
def UnionAllInPlace(self, *args):
return _snap.TTable_UnionAllInPlace(self, *args)
def Intersection(self, *args):
return _snap.TTable_Intersection(self, *args)
def Minus(self, *args):
return _snap.TTable_Minus(self, *args)
def Project(self, ProjectCols):
return _snap.TTable_Project(self, ProjectCols)
def ProjectInPlace(self, ProjectCols):
return _snap.TTable_ProjectInPlace(self, ProjectCols)
def ColMin(self, *args):
return _snap.TTable_ColMin(self, *args)
def ColMax(self, *args):
return _snap.TTable_ColMax(self, *args)
def ColGenericOp(self, *args):
return _snap.TTable_ColGenericOp(self, *args)
def ColGenericOpMP(self, *args):
return _snap.TTable_ColGenericOpMP(self, *args)
def ColAdd(self, *args):
return _snap.TTable_ColAdd(self, *args)
def ColSub(self, *args):
return _snap.TTable_ColSub(self, *args)
def ColMul(self, *args):
return _snap.TTable_ColMul(self, *args)
def ColDiv(self, *args):
return _snap.TTable_ColDiv(self, *args)
def ColMod(self, *args):
return _snap.TTable_ColMod(self, *args)
def ColConcat(self, *args):
return _snap.TTable_ColConcat(self, *args)
def ColConcatConst(self, *args):
return _snap.TTable_ColConcatConst(self, *args)
def ReadIntCol(self, ColName, Result):
return _snap.TTable_ReadIntCol(self, ColName, Result)
def ReadFltCol(self, ColName, Result):
return _snap.TTable_ReadFltCol(self, ColName, Result)
def ReadStrCol(self, ColName, Result):
return _snap.TTable_ReadStrCol(self, ColName, Result)
def InitIds(self):
return _snap.TTable_InitIds(self)
def IsNextK(self, *args):
return _snap.TTable_IsNextK(self, *args)
def GetMapPageRank(GraphSeq, Context, C=0.85, Eps=0.0001, MaxIter=100):
return _snap.TTable_GetMapPageRank(GraphSeq, Context, C, Eps, MaxIter)
GetMapPageRank = staticmethod(GetMapPageRank)
def GetMapHitsIterator(GraphSeq, Context, MaxIter=20):
return _snap.TTable_GetMapHitsIterator(GraphSeq, Context, MaxIter)
GetMapHitsIterator = staticmethod(GetMapHitsIterator)
def PrintSize(self):
return _snap.TTable_PrintSize(self)
def PrintContextSize(self):
return _snap.TTable_PrintContextSize(self)
def GetMemUsedKB(self):
return _snap.TTable_GetMemUsedKB(self)
def GetContextMemUsedKB(self):
return _snap.TTable_GetContextMemUsedKB(self)
__swig_destroy__ = _snap.delete_TTable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.