code stringlengths 101 5.91M |
|---|
_spec_function('cub200')
def get_cub200_spec(run_human_eval: bool=False) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.image_generation.cub200_scenario.CUB200Scenario', args={})
adapter_spec = get_image_generation_adapter_spec(num_outputs=1)
metric_specs: List[MetricSpec] = (get_heim_reference_required_metric_specs() + get_core_heim_metric_specs())
if run_human_eval:
metric_specs += get_heim_critique_metric_specs(include_aesthetics=True, include_subject=True, num_examples=10)
return RunSpec(name='cub200', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=metric_specs, groups=['cub200']) |
class GeneratorHubInterface(nn.Module):
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
for model in self.models:
model.make_generation_fast_(beamable_mm_beam_size=(None if getattr(args, 'no_beamable_mm', False) else getattr(args, 'beam', 5)), need_attn=getattr(args, 'print_alignment', False))
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(self.task.max_positions(), *[model.max_positions() for model in models])
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int=5, verbose: bool=False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int=1, verbose: bool=False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(self, tokenized_sentences: List[torch.LongTensor], beam: int=5, verbose: bool=False, skip_invalid_size_inputs=False, **kwargs) -> List[List[Dict[(str, torch.Tensor)]]]:
if (torch.is_tensor(tokenized_sentences) and (tokenized_sentences.dim() == 1)):
return self.generate(tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs)[0]
gen_args = copy.copy(self.args)
gen_args.beam = beam
for (k, v) in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample((lambda t: t.to(self.device)), batch)
translations = self.task.inference_step(generator, self.models, batch)
for (id, hypos) in zip(batch['id'].tolist(), translations):
results.append((id, hypos))
outputs = [hypos for (_, hypos) in sorted(results, key=(lambda x: x[0]))]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for (source_tokens, target_hypotheses) in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(' '.join(map((lambda x: '{:.4f}'.format(x)), hypo['positional_scores'].tolist()))))
if ((hypo['alignment'] is not None) and getarg('print_alignment', False)):
logger.info('A\t{}'.format(' '.join(map((lambda x: str(utils.item(x))), hypo['alignment'].int().cpu()))))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if (self.tokenizer is not None):
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if (self.bpe is not None):
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(self, tokens: List[List[int]], skip_invalid_size_inputs: bool) -> Iterator[Dict[(str, Any)]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(dataset=self.task.build_dataset_for_inference(tokens, lengths), max_tokens=self.args.max_tokens, max_sentences=self.args.max_sentences, max_positions=self.max_positions, ignore_invalid_inputs=skip_invalid_size_inputs).next_epoch_itr(shuffle=False)
return batch_iterator |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--noisy', action='store_true', help='Noisy actions')
parser.add_argument('--maze', type=str, default='u-maze', help='Maze type. small or default')
parser.add_argument('--num_samples', type=int, default=int(1000000.0), help='Num samples to collect')
parser.add_argument('--env', type=str, default='Ant', help='Environment type')
parser.add_argument('--policy_file', type=str, default='policy_file', help='file_name')
parser.add_argument('--max_episode_steps', default=1000, type=int)
parser.add_argument('--video', action='store_true')
parser.add_argument('--multi_start', action='store_true')
parser.add_argument('--multigoal', action='store_true')
args = parser.parse_args()
if (args.maze == 'u-maze'):
maze = maze_env.U_MAZE
elif (args.maze == 'big-maze'):
maze = maze_env.BIG_MAZE
elif (args.maze == 'hardest-maze'):
maze = maze_env.HARDEST_MAZE
else:
raise NotImplementedError
if (args.env == 'Ant'):
env = NormalizedBoxEnv(ant.AntMazeEnv(maze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start))
elif (args.env == 'Swimmer'):
env = NormalizedBoxEnv(swimmer.SwimmerMazeEnv(mmaze_map=maze, maze_size_scaling=4.0, non_zero_reset=args.multi_start))
env.set_target_goal()
s = env.reset()
print(s.shape)
act = env.action_space.sample()
done = False
(policy, train_env) = load_policy(args.policy_file)
def _goal_reaching_policy_fn(obs, goal):
(goal_x, goal_y) = goal
obs_new = obs[2:(- 2)]
goal_tuple = np.array([goal_x, goal_y])
goal_tuple = ((goal_tuple / np.linalg.norm(goal_tuple)) * 10.0)
new_obs = np.concatenate([obs_new, goal_tuple], (- 1))
return (policy.get_action(new_obs)[0], ((goal_tuple[0] + obs[0]), (goal_tuple[1] + obs[1])))
data = reset_data()
data_collection_policy = env.create_navigation_policy(_goal_reaching_policy_fn)
if args.video:
frames = []
ts = 0
num_episodes = 0
for _ in range(args.num_samples):
(act, waypoint_goal) = data_collection_policy(s)
if args.noisy:
act = (act + (np.random.randn(*act.shape) * 0.2))
act = np.clip(act, (- 1.0), 1.0)
(ns, r, done, info) = env.step(act)
if (ts >= args.max_episode_steps):
done = True
append_data(data, s[:(- 2)], act, r, env.target_goal, done, env.physics.data)
if ((len(data['observations']) % 10000) == 0):
print(len(data['observations']))
ts += 1
if done:
done = False
ts = 0
s = env.reset()
env.set_target_goal()
if args.video:
frames = np.array(frames)
save_video('./videos/', (args.env + '_navigation'), frames, num_episodes)
num_episodes += 1
frames = []
else:
s = ns
if args.video:
curr_frame = env.physics.render(width=500, height=500, depth=False)
frames.append(curr_frame)
if args.noisy:
fname = (args.env + ('_maze_%s_noisy_multistart_%s_multigoal_%s.hdf5' % (args.maze, str(args.multi_start), str(args.multigoal))))
else:
fname = (args.env + ('maze_%s_multistart_%s_multigoal_%s.hdf5' % (args.maze, str(args.multi_start), str(args.multigoal))))
dataset = h5py.File(fname, 'w')
npify(data)
for k in data:
dataset.create_dataset(k, data=data[k], compression='gzip') |
def register_Ns3OlsrHelper_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::OlsrHelper const &', 'arg0')])
cls.add_method('Copy', 'ns3::OlsrHelper *', [], is_const=True, is_virtual=True)
cls.add_method('ExcludeInterface', 'void', [param('ns3::Ptr< ns3::Node >', 'node'), param('uint32_t', 'interface')])
cls.add_method('Create', 'ns3::Ptr< ns3::Ipv4RoutingProtocol >', [param('ns3::Ptr< ns3::Node >', 'node')], is_const=True, is_virtual=True)
cls.add_method('Set', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
cls.add_method('AssignStreams', 'int64_t', [param('ns3::NodeContainer', 'c'), param('int64_t', 'stream')])
return |
def main():
train_dataset = torchvision.datasets.MNIST('./data', train=True, download=False)
epochs = 200
model = LatentModel(128).cuda()
model.train()
optim = t.optim.Adam(model.parameters(), lr=0.0001)
writer = SummaryWriter()
global_step = 0
for epoch in range(epochs):
dloader = DataLoader(train_dataset, batch_size=16, collate_fn=collate_fn, shuffle=True, num_workers=16)
pbar = tqdm(dloader)
for (i, data) in enumerate(pbar):
global_step += 1
adjust_learning_rate(optim, global_step)
(context_x, context_y, target_x, target_y) = data
context_x = context_x.cuda()
context_y = context_y.cuda()
target_x = target_x.cuda()
target_y = target_y.cuda()
(y_pred, kl, loss) = model(context_x, context_y, target_x, target_y)
optim.zero_grad()
loss.backward()
optim.step()
writer.add_scalars('training_loss', {'loss': loss, 'kl': kl.mean()}, global_step)
t.save({'model': model.state_dict(), 'optimizer': optim.state_dict()}, os.path.join('./checkpoint', ('checkpoint_%d.pth.tar' % (epoch + 1)))) |
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
(args.out_dir / 'checkpoints').mkdir(exist_ok=True)
(args.out_dir / 'samples').mkdir(exist_ok=True)
((args.out_dir / 'samples') / 'text').mkdir(exist_ok=True)
((args.out_dir / 'samples') / 'image').mkdir(exist_ok=True)
device = torch.device('cuda')
logging.info(f'Creating the model...')
if (args.image_model == 'label_only'):
label_map = utils.load_json(args.label_map_filename)
model = clipsep.LabelSepV2(args.n_mix, args.n_labels, label_map, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask)
elif (args.image_model == 'bert'):
label_map = utils.load_json(args.label_map_filename)
model = clipsep.BERTSep(args.n_mix, label_map, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask, bert_embeddings=args.bert_embeddings)
elif (args.image_model == 'pit'):
model = clipsep.PITSep(args.n_mix, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask)
elif (args.image_model == 'clipsepnit'):
model = clipsep.CLIPPITSepV4(args.n_mix, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask, reg_coef=args.reg_coef, reg2_coef=args.reg2_coef, reg2_epsilon=args.reg2_epsilon)
elif (args.fusion == 'late'):
model = clipsep.CLIPSep(args.n_mix, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask)
elif (args.fusion == 'early'):
model = clipsep.CLIPSepV2(args.n_mix, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask)
elif (args.fusion == 'middle'):
model = clipsep.CLIPSepV3(args.n_mix, args.layers, args.channels, use_log_freq=args.log_freq, use_weighted_loss=args.weighted_loss, use_binary_mask=args.binary_mask)
model = torch.nn.DataParallel(model, device_ids=range(args.gpus))
model.to(device)
logging.info(f'Total number of parameters: {count_parameters(model)}')
if (args.weights is not None):
model.load_state_dict(torch.load(args.weights, map_location=device))
logging.info(f'Loaded the model weights from: {args.weights}')
if ('clip' in args.image_model):
(clip_net, _) = clip.load('ViT-B/32', device)
clip_net.old_forward = clip_net.forward
clip_net.forward = types.MethodType(new_clip_forward, clip_net)
clip_net = torch.nn.DataParallel(clip_net, device_ids=range(args.gpus))
clip_net.to(device)
clip_net.eval()
elif (args.image_model == 'sop'):
res_net = clipsep.ResnetDilated(torchvision.models.resnet18(weights='DEFAULT'))
res_net = torch.nn.DataParallel(res_net, device_ids=range(args.gpus))
res_net.to(device)
res_net.eval()
logging.info('Creating the data loaders...')
train_dataset = dataset.MixDataset(args.train_list, 'train', n_mix=args.n_mix, audio_len=args.audio_len, audio_rate=args.audio_rate, n_fft=args.n_fft, hop_len=args.hop_len, win_len=args.win_len, n_frames=args.frames, stride_frames=args.stride_frames, img_size=args.img_size, fps=args.fps, preprocess_func=dataset.transform(), max_sample=None, return_waveform=False, repeat=args.repeat, frame_margin=args.frame_margin, audio_only=args.audio_only)
if (args.repeat is None):
logging.info(f'Training set size: {len(train_dataset)}')
else:
logging.info(f'Training set size: {(len(train_dataset) // args.repeat)}')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, drop_last=True)
val_dataset = dataset.MixDataset(args.val_list, 'valid', n_mix=args.n_mix, audio_len=args.audio_len, audio_rate=args.audio_rate, n_fft=args.n_fft, hop_len=args.hop_len, win_len=args.win_len, n_frames=args.frames, stride_frames=args.stride_frames, img_size=args.img_size, fps=args.fps, preprocess_func=dataset.transform(), max_sample=args.n_validation, return_waveform=False, audio_only=args.audio_only)
logging.info(f'Validation set size: {len(val_dataset)}')
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, drop_last=False)
optimizer = torch.optim.Adam(model.parameters(), args.lr)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=(lambda step: get_lr_multiplier(step, args.lr_warmup_steps, args.lr_decay_steps, args.lr_decay_multiplier)))
loss_history = []
if (args.audio_only or ('clip' in args.image_model)):
loss_header = 'step,train_loss,val_loss'
else:
loss_header = 'step,train_loss,val_loss_text,val_loss_img'
if ('clipsepnit' in args.image_model):
act_history = []
act_header = 'step,mean_act,mean_pit_act'
step = 0
min_val_loss = float('inf')
train_iterator = iter(train_loader)
while (step < args.steps):
if ((args.drop_closest is not None) and (args.drop_closest > 0) and (step > args.drop_closest_steps)):
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=(args.batch_size + args.drop_closest), shuffle=True, num_workers=args.workers, drop_last=True)
train_iterator = iter(train_loader)
logging.info('Training...')
model.train()
recent_losses = []
pbar = tqdm.tqdm(range(args.valid_steps), ncols=120)
for _ in pbar:
try:
batch = next(train_iterator)
except StopIteration:
train_iterator = iter(train_loader)
batch = next(train_iterator)
with torch.no_grad():
img_emb = []
for n in range(args.n_mix):
if ('clip' in args.image_model):
if (args.train_mode == 'hybrid'):
if ((step % 2) == 0):
train_mode = 'image'
else:
train_mode = 'text'
else:
train_mode = args.train_mode
if (train_mode == 'image'):
frames = batch['frames'][n]
(B, T, C, H, W) = frames.size()
out = clip_net(image=frames.view((B * T), C, H, W)).type(frames.dtype)
C = out.size(1)
img_emb.append(out.view(B, T, C).mean(1))
elif (train_mode == 'text'):
B = batch['mag_mix'].size(0)
text_inputs = []
for b in range(B):
prompt = get_text_prompt(batch['infos'][n][3][b])
text_inputs.append(clip.tokenize(prompt))
text_inputs = torch.cat(text_inputs)
img_emb.append(clip_net(text=text_inputs).type(batch['mag_mix'].dtype))
elif (args.image_model == 'sop'):
frames = batch['frames'][n]
(B, T, C, H, W) = frames.size()
out = res_net(frames.view((B * T), C, H, W))
C = out.size(1)
img_emb.append(out.view(B, T, C).mean(1))
optimizer.zero_grad()
if (step > args.drop_closest_steps):
(loss, out) = model.forward(batch, img_emb, drop_closest=args.drop_closest)
elif ((args.image_model == 'clipsepnit') and (step < args.pit_warmup_steps)):
(loss, out) = model.forward(batch, img_emb, pit_loss=False)
else:
(loss, out) = model.forward(batch, img_emb)
loss = loss.mean()
if ('clipsepnit' in args.image_model):
mean_act = out['mean_act']
mean_pit_act = out['mean_pit_act']
loss.backward()
if (args.grad_norm_clip is not None):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm_clip)
optimizer.step()
scheduler.step()
recent_losses.append(float(loss))
if (len(recent_losses) > 100):
del recent_losses[0]
train_loss = np.mean(recent_losses)
if ('clipsepnit' in args.image_model):
pbar.set_postfix(loss=f'{train_loss:.4f}', mean_act=f'{mean_act:.4f}', mean_pit_act=f'{mean_pit_act:.4f}')
else:
pbar.set_postfix(loss=f'{train_loss:.4f}')
step += 1
logging.info('Validating...')
model.eval()
val_losses = {}
if (args.audio_only or (args.image_model in ('label_only', 'bert', 'pit'))):
val_modes = ['text']
elif (args.image_model == 'sop'):
val_modes = ['image']
else:
val_modes = ['text', 'image']
for mode in val_modes:
with torch.no_grad():
total_loss = 0
count = 0
if ('clipsepnit' in args.image_model):
total_mean_act = 0
total_mean_pit_act = 0
pbar = tqdm.tqdm(val_loader, ncols=120)
for (i, batch) in enumerate(pbar):
img_emb = []
for n in range(args.n_mix):
if ('clip' in args.image_model):
if (mode == 'image'):
frames = batch['frames'][n]
(B, T, C, H, W) = frames.size()
reshaped = frames.view((B * T), C, H, W)
out = clip_net(image=reshaped).type(frames.dtype)
C = out.size(1)
img_emb.append(out.view(B, T, C).mean(1))
elif (mode == 'text'):
B = batch['mag_mix'].size(0)
text_inputs = []
for b in range(B):
prompt = get_text_prompt(batch['infos'][n][3][b])
text_inputs.append(clip.tokenize(prompt))
text_inputs = torch.cat(text_inputs)
img_emb.append(clip_net(text=text_inputs).type(batch['mag_mix'].dtype))
elif (args.image_model == 'sop'):
frames = batch['frames'][n]
(B, T, C, H, W) = frames.size()
out = res_net(frames.view((B * T), C, H, W))
C = out.size(1)
img_emb.append(out.view(B, T, C).mean(1))
(loss, out) = model.forward(batch, img_emb)
pbar.set_postfix(loss=f'{loss:.4f}')
B = batch['mag_mix'].size(0)
total_loss += (B * float(loss))
count += B
if ('clipsepnit' in args.image_model):
total_mean_act += (B * float(out['mean_act']))
total_mean_pit_act += (B * float(out['mean_pit_act']))
val_loss = (total_loss / count)
val_losses[mode] = val_loss
logging.info(f'Validation loss ({mode} query) at step {step}: {val_loss:.4f}')
if ('clipsepnit' in args.image_model):
mean_act = (total_mean_act / count)
mean_pit_act = (total_mean_pit_act / count)
logging.info(f'Activations: mean_act={mean_act:.4f}, mean_pit_act={mean_pit_act:.4f}')
if (args.audio_only or (args.image_model in ('label_only', 'bert', 'pit'))):
loss_history.append((step, train_loss, val_losses['text']))
elif (args.image_model == 'sop'):
loss_history.append((step, train_loss, val_losses['image']))
else:
loss_history.append((step, train_loss, val_losses['text'], val_losses['image']))
utils.save_csv((args.out_dir / 'loss.csv'), loss_history, fmt='%f', header=loss_header)
if ('clipsepnit' in args.image_model):
act_history.append((step, mean_act, mean_pit_act))
utils.save_csv((args.out_dir / 'act.csv'), act_history, fmt='%f', header=act_header)
checkpoint_filename = ((args.out_dir / 'checkpoints') / f'model_{step}.pt')
torch.save(model.state_dict(), checkpoint_filename)
logging.info(f'Saved the model to: {checkpoint_filename}')
if (args.image_model in ('label_only', 'bert', 'pit')):
val_mode = 'text'
elif (args.train_mode == 'hybrid'):
val_mode = 'image'
else:
val_mode = args.train_mode
if (val_losses[val_mode] < min_val_loss):
min_val_loss = val_losses[val_mode]
shutil.copyfile(checkpoint_filename, ((args.out_dir / 'checkpoints') / 'best_model.pt'))
logging.info(f'Minimum validation loss achieved: {min_val_loss:.4f}')
optimizer_filename = ((args.out_dir / 'checkpoints') / f'optimizer_{step}.pt')
torch.save(optimizer.state_dict(), optimizer_filename)
logging.info(f'Saved the optimizer state to: {optimizer_filename}')
scheduler_filename = ((args.out_dir / 'checkpoints') / f'scheduler_{step}.pt')
torch.save(scheduler.state_dict(), scheduler_filename)
logging.info(f'Saved the scheduler state to: {scheduler_filename}') |
def compress_init_box(input_box, tol=1e-09):
inputs = len(input_box)
dtype = type(input_box[0][0])
assert (dtype in [float, np.float64, np.float32]), f'input_box dtype should be float32/64, got {dtype}'
cur_bias = np.array(([0] * inputs), dtype=dtype)
cur_bm_transpose = []
new_input_box = []
for (dim, (lb, ub)) in enumerate(input_box):
mid = ((lb + ub) / 2.0)
if (abs((ub - lb)) < tol):
cur_bias[dim] = mid
else:
new_input_box.append((lb, ub))
cur_bm_transpose.append([(1 if (d == dim) else 0) for d in range(inputs)])
cur_bm = np.array(cur_bm_transpose, dtype=dtype).transpose()
return (cur_bm, cur_bias, new_input_box) |
class TdbCmdBackend(cmd.Cmd):
def __init__(self, bmodel_file: str=None, final_mlir_fn: str=None, tensor_loc_file: str=None, input_data_fn: str=None, reference_data_fn: List[str]=None, extra_plugins: List[str]=[], extra_check: List[str]=[], completekey='tab', stdin=None, stdout=None, ddr_size=(2 ** 32)):
super().__init__(completekey, stdin, stdout)
self.bmodel_file = bmodel_file
self.final_mlir_fn = final_mlir_fn
self.tensor_loc_file = tensor_loc_file
self.input_data_fn = input_data_fn
if (reference_data_fn is None):
reference_data_fn = []
elif isinstance(reference_data_fn, str):
reference_data_fn = [reference_data_fn]
self.reference_data_fns = reference_data_fn
self.ddr_size = ddr_size
self.status = TdbStatus.UNINIT
builtins.tdb = self
from .static_check import Checker
from . import plugins as _
self.checker = Checker(self)
self.displays = Displays.get_instance()
self.static_mode = False
self.enable_message = True
self.cmditer: List[Union[(BaseTpuCmd, CpuCmd, DynIrCmd)]]
self.plugins = PluginCompact(self)
self.add_plugin('breakpoint')
self.add_plugin('display')
self.add_plugin('print')
self.add_plugin('info')
self.add_plugin('final-mlir')
self.add_plugin('reload')
if (len(reference_data_fn) > 0):
self.add_plugin('data-check')
for extra_plugin in extra_plugins:
self.add_plugin(extra_plugin)
self.add_plugin('static-check')
self.extra_check = extra_check
self.message(f'Load plugins: {self.plugins}')
self.message(f'Type `s` to initialize bmodel, type `r` to run full commands, or `help` to see other commands.')
_callback('load')
def _reset(self):
self._load_bmodel()
self._load_data()
self.status = TdbStatus.IDLE
self.static_mode = False
self._build_index()
if (self.get_plugin('progress') is None):
self.message('You are in quiet mode, add `-v/--verbose` argument to open prograss bar,\nor use `info progress` to show progress information.')
def _load_bmodel(self):
bmodel_file = self.bmodel_file
if (bmodel_file is None):
raise Exception('Nothing to debug.')
bmodel = BModel(bmodel_file)
self.message(f'Load {bmodel_file}')
context = bmodel.context
self.bmodel = bmodel
self.message(f'Load {context.device.name} backend')
self.atomic_mlir = BModel2MLIR(bmodel)
self.cmditer = self.atomic_mlir.create_cmdlist()
self.cmd_point = 0
self.message(f'Build {self.final_mlir_fn} index')
self.message(f'Decode bmodel back into atomic dialect')
self.message(f'static_mode = {self.static_mode}')
self.runner = context.get_runner(self.ddr_size)
self.context = context
self.memory = context.memory
self.decoder = context.decoder
self.message(f'initialize memory')
self.memory.clear_memory()
coeff = self.atomic_mlir.functions[0].regions[0].data
if coeff:
address = coeff.address
if (isinstance(self.context, BM1688Context) or isinstance(self.context, SG2260Context)):
address = self.context.fix_addr(address)
addr_offset_ddr = (address - self.context.memmap[MType.G][0])
if self.runner.using_cmodel:
self.LMEM = self.runner.LMEM
self.SMEM = self.runner.SMEM
self.DDR = self.runner.DDR
self.DDR[addr_offset_ddr:(addr_offset_ddr + len(coeff.data))] = memoryview(coeff.data)
else:
self.memory.set_data_to_address(coeff.address, np.frombuffer(coeff.data, dtype=np.uint8))
def _load_data(self):
file = self.input_data_fn
if ((file is None) or (not os.path.isfile(file))):
self.error(f'input data file `{file}` is invalid')
return
if file.endswith('.dat'):
inputs = np.fromfile(file, dtype=np.uint8)
_offset = 0
for arg in self.atomic_mlir.functions[0].signature[0]:
mem = arg.memref
size = int((np.prod(mem.shape) * mem.itemsize))
self.memory.set_data(mem, inputs[_offset:(_offset + size)].view(mem.np_dtype))
_offset += size
elif file.endswith('.npz'):
inputs = np.load(file)
self.set_inputs_dict(inputs)
def _build_index(self):
cmd_records = [{'executed_id': 0, 'subnet_id': None, 'core_id': None, 'cmd_id': None, 'cmd_id_dep': None, 'cmd_type': None, 'cmd_index': (None, None, None, None), 'op_name': None, 'is_sys': None}]
for (executed_id, op) in enumerate(self.cmditer, start=1):
record = {'executed_id': executed_id, 'subnet_id': op.subnet_id, 'core_id': op.core_id, 'cmd_id': op.cmd_id, 'cmd_id_dep': None, 'cmd_type': op.cmd_type, 'cmd_index': op.tuple_key, 'op_name': op.name, 'is_sys': False}
if isinstance(op, BaseTpuCmd):
record['cmd_id_dep'] = op.cmd_id_dep
record['is_sys'] = self.context.is_sys(op)
cmd_records.append(record)
index_df = pd.DataFrame.from_records(cmd_records)
index_df['executed_id'] = index_df['executed_id'].astype(int)
self.index_df = index_df.set_index('cmd_index', drop=False)
def add_plugin(self, plugin_name: str):
plugin = self.plugins.add_plugin(plugin_name)
if isinstance(plugin, TdbPluginCmd):
assert (not hasattr(self, f'do_{plugin_name}')), plugin_name
func_names = getattr(plugin, 'func_names', [plugin_name])
for func_name in func_names:
setattr(self, f'do_{func_name}', plugin.onecmd)
setattr(self, f'complete_{func_name}', plugin.complete_plugin)
setattr(self, f'help_{func_name}', partial(plugin.do_help, ''))
def get_plugin(self, name: Union[(str, Type['TdbPlugin'])]) -> 'TdbPlugin':
if isinstance(name, str):
return self.plugins[name]
else:
return self.plugins[name.name]
def set_inputs(self, *inputs):
args = self.atomic_mlir.functions[0].signature
assert (len(inputs) == len(args))
for (id, input) in enumerate(inputs):
self.set_input(id, input)
def set_input(self, id, input):
args = self.atomic_mlir.functions[0].signature[0]
mem = args[id].memref
self.memory.set_data(mem, input)
def get_names(self):
return dir(self)
def get_op_context(self, pre=2, next=2, cmd_point=None):
if (cmd_point is None):
cmd_point = self.cmd_point
pre = max(0, (cmd_point - pre))
return self.cmditer[pre:((cmd_point + next) + 1)]
def get_cmd(self):
if (self.status == TdbStatus.UNINIT):
raise StopIteration()
if (self.cmd_point >= len(self.cmditer)):
raise StopIteration()
return self.cmditer[self.cmd_point]
def get_precmd(self):
if (self.cmd_point == 0):
raise StopIteration()
return self.cmditer[(self.cmd_point - 1)]
def get_nextop(self):
op = self.get_cmd()
self.cmd_point += 1
return op
_callback('step')
def step(self):
cmd = self.get_cmd()
try:
if (not self.static_mode):
cmd_type = cmd.cmd_type
if cmd_type.is_static():
if (not self.context.is_sys(cmd)):
if (cmd_type == CMDType.tiu):
self.runner.tiu_compute(cmd)
elif (cmd_type == CMDType.dma):
self.runner.dma_compute(cmd)
elif (cmd_type == CMDType.cpu):
self.runner.cpu_compute(cmd)
elif (cmd_type == CMDType.dyn_ir):
self.runner.dynamic_compute(cmd)
else:
self.error('skip unknown CMDType')
except ValueError as e:
self.error(e)
raise BreakpointStop()
self.cmd_point += 1
def set_inputs_dict(self, inputs):
args = self.atomic_mlir.functions[0].signature[0]
from utils.lowering import lowering
for (id, arg) in enumerate(args):
input = lowering(inputs[arg.name], pdtype=arg.dtype.name, pshape=arg.shape[0], pzero_point=arg.zero_point, pscale=arg.scale)
self.set_input(id, input)
def message(self, msg):
if self.enable_message:
pprint(msg, file=self.stdout)
def error(self, msg):
if self.enable_message:
if isinstance(msg, Exception):
get_console().print_exception(show_locals=True)
else:
pprint('***', msg, file=self.stdout)
def _complete_expression(self, text, line, begidx, endidx):
ns = {**sys._getframe().f_globals, **sys._getframe().f_locals}
if ('.' in text):
dotted = text.split('.')
try:
obj = ns[dotted[0]]
for part in dotted[1:(- 1)]:
obj = getattr(obj, part)
except (KeyError, AttributeError):
return []
prefix = ('.'.join(dotted[:(- 1)]) + '.')
return [(prefix + n) for n in dir(obj) if n.startswith(dotted[(- 1)])]
else:
return [n for n in ns.keys() if n.startswith(text)]
def cmdloop(self, intro=None):
self.preloop()
if (self.use_rawinput and self.completekey):
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind((self.completekey + ': complete'))
except ImportError:
pass
try:
if (intro is not None):
self.intro = intro
if self.intro:
self.stdout.write((str(self.intro) + '\n'))
stop = None
while (not stop):
if self.cmdqueue:
line = self.cmdqueue.pop(0)
elif self.use_rawinput:
try:
line = input(self.prompt)
except KeyboardInterrupt:
line = ''
print(file=self.stdout)
continue
except EOFError:
line = 'EOF'
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if (not len(line)):
line = 'EOF'
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if (self.use_rawinput and self.completekey):
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass |
class UpstreamExpert(nn.Module):
def __init__(self, ckpt: str=None, model_name: str=None, window_secs: float=1, hop_secs: float=0.05, model_config: str=None):
super().__init__()
self.model = serab.load_model(ckpt, model_name)
self.frame_duration = (window_secs * 1000)
self.hop_size = (hop_secs * 1000)
self.model_config = model_config
def get_downsample_rates(self, key: str=None) -> int:
return int(((self.hop_size / 1000) * SAMPLE_RATE))
def forward(self, wavs: List[Tensor]) -> Dict[(str, Union[(Tensor, List[Tensor])])]:
padded_wavs = pad_sequence(wavs, batch_first=True)
(embeddings, timestamps) = serab.get_timestamp_embeddings(padded_wavs, self.model, self.frame_duration, self.hop_size)
return {'hidden_states': [embeddings]} |
_tokenizers
class CpmTokenizationTest(XLNetModelTest):
def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name):
return True
def test_pre_tokenization(self):
tokenizer = CpmTokenizer.from_pretrained('TsinghuaAI/CPM-Generate')
text = 'Hugging Face,'
normalized_text = 'Hugging Face,<unk>'
bpe_tokens = 'Hu gg ing F ace , '.split()
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = (tokens + [tokenizer.unk_token])
input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
reconstructed_text = tokenizer.decode(input_bpe_tokens)
self.assertEqual(reconstructed_text, normalized_text) |
class vanilla_transformer_block(nn.Module):
def __init__(self, dim, head, FFNdim) -> None:
super(vanilla_transformer_block, self).__init__()
self.mha = MultiheadAttention(embed_dim=dim, num_heads=head)
self.FFN = FeedForwardNetwork(dim, FFNdim)
self.ln1 = nn.LayerNorm(dim, eps=1e-05)
self.ln2 = nn.LayerNorm(dim, eps=1e-05)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=(- 1))
def forward(self, x, haltingscore):
x = x.permute([1, 0, 2])
residual = x
(x1, attn) = self.mha(key=x, value=x, query=x)
x = (residual + x1)
x = self.ln1(x)
residual = x
x2 = self.FFN(x)
x = self.ln2((residual + x2))
x = x.permute([1, 0, 2])
attn = torch.sum(attn, dim=1)
attn = self.softmax(attn)
haltingscore += attn
return (x, haltingscore, attn) |
.parametrize('workers', (1, 2))
def test_explicit_headers(testdir, unique_hook, empty_open_api_3_schema, cli, openapi3_base_url, hypothesis_max_examples, workers, snapshot_cli):
header_name = 'X-Session-ID'
empty_open_api_3_schema['paths'] = {'/success': {'get': {'parameters': [{'name': name, 'in': location, 'required': True, 'schema': {'type': 'string'}} for (name, location) in ((header_name, 'header'), ('key', 'query'))], 'responses': {'200': {'description': 'OK'}}}}}
assert (run(testdir, cli, unique_hook, empty_open_api_3_schema, openapi3_base_url, hypothesis_max_examples, f'-H {header_name}: fixed', f'--workers={workers}') == snapshot_cli) |
class AstVector(Z3PPObject):
def __init__(self, v=None, ctx=None):
self.vector = None
if (v is None):
self.ctx = _get_ctx(ctx)
self.vector = Z3_mk_ast_vector(self.ctx.ref())
else:
self.vector = v
assert (ctx is not None)
self.ctx = ctx
Z3_ast_vector_inc_ref(self.ctx.ref(), self.vector)
def __del__(self):
if ((self.vector is not None) and (self.ctx.ref() is not None)):
Z3_ast_vector_dec_ref(self.ctx.ref(), self.vector)
def __len__(self):
return int(Z3_ast_vector_size(self.ctx.ref(), self.vector))
def __getitem__(self, i):
if isinstance(i, int):
if (i < 0):
i += self.__len__()
if (i >= self.__len__()):
raise IndexError
return _to_ast_ref(Z3_ast_vector_get(self.ctx.ref(), self.vector, i), self.ctx)
elif isinstance(i, slice):
result = []
for ii in range(*i.indices(self.__len__())):
result.append(_to_ast_ref(Z3_ast_vector_get(self.ctx.ref(), self.vector, ii), self.ctx))
return result
def __setitem__(self, i, v):
if (i >= self.__len__()):
raise IndexError
Z3_ast_vector_set(self.ctx.ref(), self.vector, i, v.as_ast())
def push(self, v):
Z3_ast_vector_push(self.ctx.ref(), self.vector, v.as_ast())
def resize(self, sz):
Z3_ast_vector_resize(self.ctx.ref(), self.vector, sz)
def __contains__(self, item):
for elem in self:
if elem.eq(item):
return True
return False
def translate(self, other_ctx):
return AstVector(Z3_ast_vector_translate(self.ctx.ref(), self.vector, other_ctx.ref()), ctx=other_ctx)
def __copy__(self):
return self.translate(self.ctx)
def __deepcopy__(self, memo={}):
return self.translate(self.ctx)
def __repr__(self):
return obj_to_string(self)
def sexpr(self):
return Z3_ast_vector_to_string(self.ctx.ref(), self.vector) |
class CSFI2(nn.Module):
def __init__(self, mid_channels):
super().__init__()
self.conv1to2 = _conv1x1_layer(mid_channels, mid_channels)
self.conv2to1 = _conv3x3_layer(mid_channels, mid_channels, stride=2)
self.conv_merge1 = _conv3x3_layer((mid_channels * 2), mid_channels)
self.conv_merge2 = _conv3x3_layer((mid_channels * 2), mid_channels)
def forward(self, x1, x2):
x12 = F.interpolate(x1, scale_factor=2, mode='bicubic', align_corners=False)
x12 = F.relu(self.conv1to2(x12))
x21 = F.relu(self.conv2to1(x2))
x1 = F.relu(self.conv_merge1(torch.cat((x1, x21), dim=1)))
x2 = F.relu(self.conv_merge2(torch.cat((x2, x12), dim=1)))
return (x1, x2) |
class VoxLingua(HearScene):
_cfg(**HearScene.setup.default_except(corpus=dict(CLS=field(hear_scene_kfolds, '\nThe corpus class. You can add the **kwargs right below this CLS key', str), dataset_root=field('???', 'The root path of the corpus', str), test_fold=field('???', 'The testing fold id. Options: [0, 1, 2, 3, 4]'), num_folds=NUM_FOLDS), train_sampler=dict(batch_size=32), task=dict(prediction_type='multiclass', scores=['top1_acc', 'd_prime', 'aucroc', 'mAP'])))
def setup(cls, **cfg):
super().setup(**cfg)
_cfg(**HearScene.train.default_except(trainer=dict(valid_metric='top1_acc', valid_higher_better=True)))
def train(cls, **cfg):
super().train(**cfg)
_cfg(**HearScene.inference.default_cfg)
def inference(cls, **cfg):
super().inference(**cfg)
_cfg(**HearScene.run.default_except(stages=['setup', 'train', 'inference'], start_stage='setup', final_stage='inference', setup=setup.default_cfg.deselect('workspace', 'resume'), train=train.default_cfg.deselect('workspace', 'resume'), inference=inference.default_cfg.deselect('workspace', 'resume')))
def run(cls, **cfg):
super().run(**cfg)
_cfg(num_fold=field(NUM_FOLDS, 'The number of folds to run cross validation', int), **run.default_except(workspace=field('???', "The root workspace for all folds.\nEach fold will use a 'fold_{id}' sub-workspace under this root workspace"), setup=dict(corpus=dict(test_fold=field('TBD', "This will be auto-set by 'run_cross_validation'")))))
def cross_validation(cls, **cfg):
super().cross_validation(**cfg) |
class _PatchAnalysis(object):
def __init__(self, patchinfo: PatchInfo, points_in_patch: List[Point], line: LineModel):
self.patchinfo = patchinfo
self.points = points_in_patch
self.ransacline: LineModel = line
pass
def __str__(self):
display = ('Line=%s Points in line=%d Total points in patch=%d PatchInfo=%s' % (self.ransacline, len(self.ransacline.points), len(self.points), self.patchinfo))
return display |
class FoilGainExpandCriterion(SplitCriterion):
def __init__(self, min_branch_frac_option=0.01):
super().__init__()
self.min_branch_frac_option = min_branch_frac_option
self.best_idx = 0
self.class_idx = 0
def get_merit_of_split(self, pre_split_dist, post_split_dist):
if (self.num_subsets_greater_than_frac(post_split_dist, self.min_branch_frac_option) < 2):
return (- np.inf)
entropy1 = self.compute_entropy(post_split_dist[0])
entropy2 = self.compute_entropy(post_split_dist[1])
if (entropy1 >= entropy2):
self.best_idx = 0
else:
self.best_idx = 1
entropy = min(entropy1, entropy2)
gain = (entropy - self.compute_entropy(pre_split_dist))
try:
return (post_split_dist[self.best_idx][self.class_idx] * gain)
except KeyError:
return 0
def get_range_of_merit(self, pre_split_dist):
num_classes = len(pre_split_dist)
return np.log2(num_classes)
def compute_entropy(self, dist):
try:
return np.log2((dist[self.class_idx] / sum(dist.values())))
except KeyError:
return 0
def num_subsets_greater_than_frac(distributions, min_frac):
total_weight = 0.0
dist_sums = ([0.0] * len(distributions))
for i in range(len(dist_sums)):
dist_sums[i] = sum(distributions[i].values())
total_weight += dist_sums[i]
num_greater = 0
for d in dist_sums:
if ((d / total_weight) > min_frac):
num_greater += 1
return num_greater |
class DDPG():
def __init__(self, state_shape, action_shape, max_action=1, discount=0.99, tau=0.005, batch_size=256, device='cpu', seed=0, logger=None):
np.random.seed(seed)
torch.manual_seed(seed)
self.actor = DeterministicPolicy(state_shape=state_shape, action_shape=action_shape, hidden_units=[256, 256], hidden_activation=nn.ReLU(inplace=True)).to(device)
self.actor_target = deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=0.0003)
self.critic = Critic(state_shape, action_shape).to(device)
self.critic_target = deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=0.0003)
self.logger = logger
self.expl_noise = 0.1
self.action_shape = action_shape
self.dtype = torch.float
self.discount = discount
self.tau = tau
self.batch_size = batch_size
self.max_action = max_action
self.device = device
def exploit(self, state):
state = torch.FloatTensor(state.reshape(1, (- 1))).to(self.device)
return self.actor(state).cpu().data.numpy().flatten()
def explore(self, state):
state = torch.tensor(state, dtype=self.dtype, device=self.device).unsqueeze_(0)
with torch.no_grad():
noise = ((torch.randn(self.action_shape) * self.max_action) * self.expl_noise).to(self.device)
action = (self.actor(state) + noise)
a = action.cpu().numpy()[0]
return np.clip(a, (- self.max_action), self.max_action)
def update(self, batch):
(state, action, reward, done, next_state) = batch
target_Q = self.critic_target(next_state, self.actor_target(next_state))
target_Q = (reward + (((1.0 - done) * self.discount) * target_Q.detach()))
current_Q = self.critic(state, action)
critic_loss = F.mse_loss(current_Q, target_Q)
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
actor_loss = (- self.critic(state, self.actor(state)).mean())
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
for (param, target_param) in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(((self.tau * param.data) + ((1 - self.tau) * target_param.data)))
for (param, target_param) in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(((self.tau * param.data) + ((1 - self.tau) * target_param.data)))
def get_policy_state_dict(self) -> OrderedDict:
return self.actor.state_dict() |
def est_rank(layer):
W = layer.weight.data
mode3 = tl.base.unfold(W, 0)
mode4 = tl.base.unfold(W, 1)
diag_0 = EVBMF(mode3)
diag_1 = EVBMF(mode4)
return int((np.ceil((max([diag_0.shape[0], diag_1.shape[0]]) / 16)) * 16)) |
def convert_conv2convsamepadding_model(module, process_group=None, channel_last=False):
mod = module
if isinstance(module, torch.nn.modules.conv._ConvNd):
if isinstance(module.bias, torch.Tensor):
bias = True
else:
bias = False
mod = Conv2dSamePadding(module.in_channels, module.out_channels, module.kernel_size, module.stride, module.dilation, module.groups, bias=bias)
mod.weight.data = module.weight.data.clone().detach()
if bias:
mod.bias.data = module.bias.data.clone().detach()
for (name, child) in module.named_children():
mod.add_module(name, convert_conv2convsamepadding_model(child, process_group=process_group, channel_last=channel_last))
del module
return mod |
class Sinc2_autograd(torch.autograd.Function):
def forward(ctx, theta):
ctx.save_for_backward(theta)
return sinc2(theta)
def backward(ctx, grad_output):
(theta,) = ctx.saved_tensors
grad_theta = None
if ctx.needs_input_grad[0]:
grad_theta = (grad_output * sinc2_dt(theta).to(grad_output))
return grad_theta |
def dense_layer(inputs, output_units, bias=True, activation=None, batch_norm=None, dropout=None, scope='dense-layer', reuse=False):
with tf.variable_scope(scope, reuse=reuse):
W = tf.get_variable(name='weights', initializer=tf.contrib.layers.variance_scaling_initializer(), shape=[shape(inputs, (- 1)), output_units])
z = tf.matmul(inputs, W)
if bias:
b = tf.get_variable(name='biases', initializer=tf.constant_initializer(), shape=[output_units])
z = (z + b)
if (batch_norm is not None):
z = tf.layers.batch_normalization(z, training=batch_norm, reuse=reuse)
z = (activation(z) if activation else z)
z = (tf.nn.dropout(z, dropout) if (dropout is not None) else z)
return z |
class TestCorpora(unittest.TestCase):
def setUp(self):
directory = (os.path.dirname(os.path.realpath(__file__)) + '/resources/')
self.input_data = open((directory + 'input.conll'), 'r')
def test_conll_reader(self):
corpus = Corpus.from_file('test', self.input_data)
self.assertEqual(5, len(corpus.documents)) |
def direct_kark_sort(s):
alphabet = ([None] + sorted(set(s)))
k = len(alphabet)
n = len(s)
t = dict(((c, i) for (i, c) in enumerate(alphabet)))
SA = array('i', ([0] * (n + 3)))
kark_sort(array('i', ([t[c] for c in s] + ([0] * 3))), SA, n, k)
return SA[:n] |
def import_object(model_dir, model_path, axis_forward='-Z', axis_up='Y'):
for o in bpy.data.objects:
o.select_set(False)
name = osp.basename(model_dir)
path = osp.join(model_dir, model_path)
bpy.ops.import_scene.obj(filepath=path, axis_forward=axis_forward, axis_up=axis_up)
selected_objs = bpy.context.selected_objects
if (len(selected_objs) > 1):
ctx = bpy.context.copy()
ctx['active_object'] = selected_objs[0]
ctx['selected_editable_objects'] = selected_objs
bpy.ops.object.join(ctx)
obj = selected_objs[0]
obj.select_set(state=True)
obj.rotation_euler[2] = np.random.uniform(0, (2 * np.pi))
verts = np.array([vert.co for vert in obj.data.vertices])
verts_max = np.max(verts, axis=0)
verts_min = np.min(verts, axis=0)
bb_max = (obj.matrix_world Vector(verts_max))
bb_min = (obj.matrix_world Vector(verts_min))
scale = np.max(np.abs((bb_max - bb_min)))
scale_factor = (2.0 / scale)
obj.scale = (np.ones(3) * scale_factor)
bb_min *= scale_factor
bb_max *= scale_factor
obj.location[2] -= bb_min[2]
bb_min[2] = 0
bb_max[2] -= bb_min[2]
print_info(obj.location, bb_min, bb_max)
return (obj, (bb_min, bb_max)) |
def loop_train(model, optimizer, train_noisy_speech, train_clean_speech):
with tf.GradientTape() as tape:
train_predict_speech = model(train_noisy_speech)
if (loss_function == 'SDR'):
train_loss = modified_SDR_loss(train_predict_speech, train_clean_speech)
elif (loss_function == 'wSDR'):
train_loss = weighted_SDR_loss(train_noisy_speech, train_predict_speech, train_clean_speech)
gradients = tape.gradient(train_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return train_loss |
def test_check_input2():
with pytest.raises(TypeError, match=('Please check you are using the right model object,' + ' or the right order of the attributes!')):
trainer = Trainer(dataHandler, None, losses, validation_metrics, save_to_path, params)
trainer.train() |
class GenDictWithBasering():
def __init__(self, parent, start):
P = self._P = parent
if isinstance(start, list):
self._D = start
return
self._D = [start]
while (hasattr(P, 'base_ring') and (P.base_ring() is not P)):
P = P.base_ring()
D = P.gens_dict()
if isinstance(D, GenDictWithBasering):
self._D.extend(D._D)
break
else:
self._D.append(D)
def __next__(self):
if (len(self._D) <= 1):
raise ValueError(('no next term for %s available' % self))
return GenDictWithBasering(self._P.base_ring(), self._D[1:])
next = __next__
def __repr__(self):
return ('GenDict of ' + repr(self._P))
def __getitem__(self, k):
for D in self._D:
try:
return D[k]
except KeyError:
pass
raise KeyError(('%s is not a variable name of %s or its iterated base rings' % (k, self._P))) |
class TestGetWindow():
def test_boxcar(self):
w = windows.get_window('boxcar', 12)
assert_array_equal(w, np.ones_like(w))
w = windows.get_window(('boxcar',), 16)
assert_array_equal(w, np.ones_like(w))
def test_cheb_odd(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, 'This window is not suitable')
w = windows.get_window(('chebwin', (- 40)), 53, fftbins=False)
assert_array_almost_equal(w, cheb_odd_true, decimal=4)
def test_cheb_even(self):
with suppress_warnings() as sup:
sup.filter(UserWarning, 'This window is not suitable')
w = windows.get_window(('chebwin', 40), 54, fftbins=False)
assert_array_almost_equal(w, cheb_even_true, decimal=4)
def test_dpss(self):
win1 = windows.get_window(('dpss', 3), 64, fftbins=False)
win2 = windows.dpss(64, 3)
assert_array_almost_equal(win1, win2, decimal=4)
def test_kaiser_float(self):
win1 = windows.get_window(7.2, 64)
win2 = windows.kaiser(64, 7.2, False)
assert_allclose(win1, win2)
def test_invalid_inputs(self):
assert_raises(ValueError, windows.get_window, set('hann'), 8)
assert_raises(ValueError, windows.get_window, 'broken', 4)
def test_array_as_window(self):
osfactor = 128
sig = np.arange(128)
win = windows.get_window(('kaiser', 8.0), (osfactor // 2))
with assert_raises(ValueError, match='must have the same length'):
resample(sig, (len(sig) * osfactor), window=win)
def test_general_cosine(self):
assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4), [0.4, 0.3, 1, 0.3])
assert_allclose(get_window(('general_cosine', [0.5, 0.3, 0.2]), 4, fftbins=False), [0.4, 0.55, 0.55, 0.4])
def test_general_hamming(self):
assert_allclose(get_window(('general_hamming', 0.7), 5), [0.4, 0.6072949, 0.9427051, 0.9427051, 0.6072949])
assert_allclose(get_window(('general_hamming', 0.7), 5, fftbins=False), [0.4, 0.7, 1.0, 0.7, 0.4])
def test_lanczos(self):
assert_allclose(get_window('lanczos', 6), [0.0, 0., 0., 1.0, 0., 0.], atol=1e-09)
assert_allclose(get_window('lanczos', 6, fftbins=False), [0.0, 0., 0., 0., 0., 0.0], atol=1e-09)
assert_allclose(get_window('lanczos', 6), get_window('sinc', 6)) |
_builder('laion2B_multi')
class Laion2BMultiBuilder(BaseDatasetBuilder):
train_dataset_cls = LaionDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/laion/defaults_2B_multi.yaml'}
def _download_ann(self):
pass
def _download_vis(self):
pass
def build(self):
self.build_processors()
build_info = self.config.build_info
datasets = dict()
split = 'train'
dataset_cls = self.train_dataset_cls
datasets[split] = dataset_cls(vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage).inner_dataset
return datasets |
class PairedDataset():
def __init__(self, dataset1, dataset2):
self.dataset1 = dataset1
self.dataset2 = dataset2
def __len__(self):
return len(self.dataset1)
def __getitem__(self, k):
ret1 = self.dataset1[k]
ret1 = (ret1 if isinstance(ret1, tuple) else (ret1,))
ret2 = self.dataset2[k]
ret2 = (ret2 if isinstance(ret2, tuple) else (ret2,))
return (ret1 + ret2) |
class FreqEncoder():
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append((lambda x: x))
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = (2.0 ** torch.linspace(0.0, max_freq, N_freqs))
else:
freq_bands = torch.linspace((2.0 ** 0.0), (2.0 ** max_freq), N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append((lambda x, p_fn=p_fn, freq=freq: p_fn((x * freq))))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], (- 1)) |
(Output('topic-data', 'data'), [Input('date-dropdown', 'value')])
def get_topic_data(value):
with MongoClient(**MONGO_ARGS) as connection:
read_collection = connection[READ_DB][READ_COL]
data = read_collection.find({'_id': value})
data = list(data)[0]
return data |
class TestBool(object):
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
assert_raises(TypeError, np.positive, a)
assert_raises(TypeError, np.subtract, a, a)
def test_truth_table_logical(self):
input1 = [0, 0, 3, 2]
input2 = [0, 4, 0, 2]
typecodes = ((np.typecodes['AllFloat'] + np.typecodes['AllInteger']) + '?')
for dtype in map(np.dtype, typecodes):
arg1 = np.asarray(input1, dtype=dtype)
arg2 = np.asarray(input2, dtype=dtype)
out = [False, True, True, True]
for func in (np.logical_or, np.maximum):
assert_equal(func(arg1, arg2).astype(bool), out)
out = [False, False, False, True]
for func in (np.logical_and, np.minimum):
assert_equal(func(arg1, arg2).astype(bool), out)
out = [False, True, True, False]
for func in (np.logical_xor, np.not_equal):
assert_equal(func(arg1, arg2).astype(bool), out)
def test_truth_table_bitwise(self):
arg1 = [False, False, True, True]
arg2 = [False, True, False, True]
out = [False, True, True, True]
assert_equal(np.bitwise_or(arg1, arg2), out)
out = [False, False, False, True]
assert_equal(np.bitwise_and(arg1, arg2), out)
out = [False, True, True, False]
assert_equal(np.bitwise_xor(arg1, arg2), out)
def test_reduce(self):
none = np.array([0, 0, 0, 0], bool)
some = np.array([1, 0, 1, 1], bool)
every = np.array([1, 1, 1, 1], bool)
empty = np.array([], bool)
arrs = [none, some, every, empty]
for arr in arrs:
assert_equal(np.logical_and.reduce(arr), all(arr))
for arr in arrs:
assert_equal(np.logical_or.reduce(arr), any(arr))
for arr in arrs:
assert_equal(np.logical_xor.reduce(arr), ((arr.sum() % 2) == 1)) |
def get_transforms(split, size):
normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if (size == 448):
resize_dim = 512
crop_dim = 448
elif (size == 224):
resize_dim = 256
crop_dim = 224
elif (size == 384):
resize_dim = 438
crop_dim = 384
if (split == 'train'):
transform = tv.transforms.Compose([tv.transforms.Resize(resize_dim), tv.transforms.RandomCrop(crop_dim), tv.transforms.RandomHorizontalFlip(0.5), tv.transforms.ToTensor(), normalize])
else:
transform = tv.transforms.Compose([tv.transforms.Resize(resize_dim), tv.transforms.CenterCrop(crop_dim), tv.transforms.ToTensor(), normalize])
return transform |
class FlopCountAnalysis(JitModelAnalysis):
def __init__(self, model: nn.Module, inputs: Union[(Tensor, Tuple[(Tensor, ...)])]) -> None:
super().__init__(model=model, inputs=inputs)
self.set_op_handle(**_DEFAULT_SUPPORTED_OPS)
__init__.__doc__ = JitModelAnalysis.__init__.__doc__ |
def get_installed_distributions(local_only=True, skip=stdlib_pkgs, include_editables=True, editables_only=False, user_only=False):
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return (not dist_is_editable(d))
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set if (local_test(d) and (d.key not in skip) and editable_test(d) and editables_only_test(d) and user_test(d))] |
def repr_lincomb(terms, is_latex=False, scalar_mult='*', strip_one=False, repr_monomial=None, latex_scalar_mult=None):
if is_latex:
if (latex_scalar_mult is not None):
scalar_mult = latex_scalar_mult
elif (scalar_mult == '*'):
scalar_mult = ' '
if (repr_monomial is None):
if is_latex:
def repr_monomial(monomial):
return (monomial._latex_() if hasattr(monomial, '_latex_') else str(monomial))
else:
repr_monomial = str
s = ''
first = True
if (scalar_mult is None):
scalar_mult = ('' if is_latex else '*')
for (monomial, c) in terms:
if (c != 0):
coeff = coeff_repr(c)
negative = False
if (len(coeff) and (coeff[0] == '-')):
negative = True
try:
if (c < 0):
negative = True
except (NotImplementedError, TypeError):
pass
if negative:
coeff = coeff_repr((- c), is_latex)
else:
coeff = coeff_repr(c, is_latex)
if (coeff == '1'):
coeff = ''
if (coeff != '0'):
if negative:
if first:
sign = '-'
else:
sign = ' - '
elif first:
sign = ''
else:
sign = ' + '
b = repr_monomial(monomial)
if len(b):
if (coeff != ''):
if ((b == '1') and strip_one):
b = ''
else:
b = (scalar_mult + b)
s += ('%s%s%s' % (sign, coeff, b))
first = False
if first:
return '0'
else:
return s |
(scope='function')
def estimators():
return numba_interface.Estimators(j_estimator=np.array([0.0, 0.0], dtype=np.float64), nu_bar_estimator=np.array([0.0, 0.0], dtype=np.float64), j_blue_estimator=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=np.float64), Edotlu_estimator=np.array([[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float64), photo_ion_estimator=np.empty((0, 0), dtype=np.float64), stim_recomb_estimator=np.empty((0, 0), dtype=np.float64), bf_heating_estimator=np.empty((0, 0), dtype=np.float64), stim_recomb_cooling_estimator=np.empty((0, 0), dtype=np.float64), photo_ion_estimator_statistics=np.empty((0, 0), dtype=np.int64)) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
if (os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (training_args.local_rank in [(- 1), 0]) else logging.WARN))
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool((training_args.local_rank != (- 1))), training_args.fp16)
logger.info('Training/evaluation parameters %s', training_args)
set_seed(training_args.seed)
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError(('Task not found: %s' % data_args.task_name))
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir)
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir)
train_dataset = (CustomGlueDataset(data_args, tokenizer=tokenizer, cache_dir=model_args.cache_dir) if training_args.do_train else None)
eval_dataset = (CustomGlueDataset(data_args, tokenizer=tokenizer, mode='dev', cache_dir=model_args.cache_dir) if training_args.do_eval else None)
test_dataset = (CustomGlueDataset(data_args, tokenizer=tokenizer, mode='test', cache_dir=model_args.cache_dir) if training_args.do_predict else None)
def build_compute_metrics_fn(task_name: str) -> Callable[([EvalPrediction], Dict)]:
def compute_metrics_fn(p: EvalPrediction):
if (output_mode == 'classification'):
preds = np.argmax(p.predictions, axis=1)
elif (output_mode == 'regression'):
preds = np.squeeze(p.predictions)
return glue_compute_metrics(task_name, preds, p.label_ids)
return compute_metrics_fn
glue_utils.freeze_BERT_parameters(model)
trainer = Trainer(model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=build_compute_metrics_fn(data_args.task_name))
if training_args.do_train:
trainer.train(model_path=(model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None))
trainer.save_model()
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
eval_results = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
eval_datasets = [eval_dataset]
if (data_args.task_name == 'mnli'):
mnli_mm_data_args = dataclasses.replace(data_args, task_name='mnli-mm')
eval_datasets.append(CustomGlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode='dev', cache_dir=model_args.cache_dir))
if (data_args.task_name == 'mnli-2'):
mnli_mm_data_args = dataclasses.replace(data_args, task_name='mnli-2-mm')
eval_datasets.append(CustomGlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode='dev', cache_dir=model_args.cache_dir))
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(training_args.output_dir, f'eval_results_{eval_dataset.args.task_name}.txt')
if trainer.is_world_master():
with open(output_eval_file, 'w') as writer:
logger.info('***** Eval results {} *****'.format(eval_dataset.args.task_name))
for (key, value) in eval_result.items():
logger.info(' %s = %s', key, value)
writer.write(('%s = %s\n' % (key, value)))
eval_results.update(eval_result)
if training_args.do_predict:
logging.info('*** Test ***')
test_datasets = [test_dataset]
if (data_args.task_name == 'mnli'):
mnli_mm_data_args = dataclasses.replace(data_args, task_name='mnli-mm')
test_datasets.append(CustomGlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode='test', cache_dir=model_args.cache_dir))
if (data_args.task_name == 'mnli-2'):
mnli_mm_data_args = dataclasses.replace(data_args, task_name='mnli-2-mm')
test_datasets.append(CustomGlueDataset(mnli_mm_data_args, tokenizer=tokenizer, mode='test', cache_dir=model_args.cache_dir))
for test_dataset in test_datasets:
predictions = trainer.predict(test_dataset=test_dataset).predictions
if (output_mode == 'classification'):
predictions = np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f'test_results_{test_dataset.args.task_name}.txt')
if trainer.is_world_master():
with open(output_test_file, 'w') as writer:
logger.info('***** Test results {} *****'.format(test_dataset.args.task_name))
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
if (output_mode == 'regression'):
writer.write(('%d\t%3.3f\n' % (index, item)))
else:
item = test_dataset.get_labels()[item]
writer.write(('%d\t%s\n' % (index, item)))
return eval_results |
class _SplitOffSimpleInequalities(_TransformHrepresentation):
def _transform_(self):
inequalities = self.inequalities
B = self.B
import logging
logger = logging.getLogger(__name__)
from itertools import takewhile
from .representation import repr_pretty
from sage.graphs.digraph import DiGraph
from sage.matrix.constructor import matrix
from sage.modules.free_module_element import vector
from sage.rings.integer_ring import ZZ
inequalities_filtered = []
chain_links = {}
for coeffs in inequalities:
dim = len(coeffs)
if all(((c >= 0) for c in coeffs)):
logger.debug('skipping %s (all coefficients >= 0)', repr_pretty(coeffs, 0))
continue
constant = coeffs[0]
ones = tuple(((i + 1) for (i, c) in enumerate(coeffs[1:]) if (c == 1)))
mones = tuple(((i + 1) for (i, c) in enumerate(coeffs[1:]) if (c == (- 1))))
absgetwo = tuple(((i + 1) for (i, c) in enumerate(coeffs[1:]) if (abs(c) >= 2)))
if ((len(ones) == 1) and (not mones) and (not absgetwo)):
if (constant < 0):
inequalities_filtered.append(coeffs)
elif ((len(ones) == 1) and (len(mones) == 1) and (not absgetwo) and (constant <= 0)):
logger.debug('handling %s', repr_pretty(coeffs, 0))
chain_links[(mones[0], ones[0])] = constant
else:
inequalities_filtered.append(coeffs)
G = DiGraph(chain_links, format='list_of_edges')
potential = {}
paths = {}
D = {}
inequalities_extra = []
for i in range(dim):
D[(i, i)] = 1
for v in G.topological_sort():
NP = iter(sorted(((n, (potential[n] + chain_links[(n, v)])) for n in G.neighbor_in_iterator(v)), key=(lambda k: (k[1], k[0]))))
(n, p) = next(NP, (None, 0))
potential[v] = p
D[(0, v)] = (- p)
paths[v] = (paths.get(n, ()) + (v,))
for u in paths[v]:
D[(u, v)] = 1
for (n, p) in NP:
ell = len(tuple(takewhile((lambda u: (u[0] == u[1])), zip(paths[n], paths[v]))))
coeffs = (dim * [0])
for u in paths[v][ell:]:
coeffs[u] = 1
for u in paths[n][ell:]:
coeffs[u] = (- 1)
coeffs[0] = (p - potential[v])
inequalities_extra.append(tuple(coeffs))
T = matrix(ZZ, dim, dim, D)
self.inequalities = (list((tuple((T * vector(ieq))) for ieq in inequalities_filtered)) + inequalities_extra)
rules_pre = ((y, B({tuple(row[1:]): 1})) for (y, row) in zip(((1,) + B.gens()), T.rows()))
self.factor = next(rules_pre)[1]
self.rules = dict(rules_pre) |
def let_data_to_variable(variable, data, ctx=None, data_name=None, variable_name=None):
try:
if (data.dtype <= np.float64):
variable.data.cast(data.dtype)[...] = data
else:
variable.d = data
except:
if (variable.shape != data.shape):
logger.critical('Shape does not match between data{} and variable{} ({} != {}).'.format((((' "' + data_name) + '"') if data_name else ''), (((' "' + variable_name) + '"') if variable_name else ''), data.shape, variable.shape))
raise
variable.need_grad = False
if ctx:
try:
variable.data.cast(variable.data.dtype, ctx)
except:
if (ctx.array_class != 'CpuArray'):
ctx.array_class = 'CpuArray'
variable.data.cast(variable.data.dtype, ctx)
else:
raise |
def emulate_int8_histogram(w, scale=None, zero_point=None):
if (scale is None):
obs = torch.quantization.observer.HistogramObserver()
_ = obs(w.float())
(scale, zero_point) = obs.calculate_qparams()
scale = scale.cuda().type_as(w)
zero_point = zero_point.cuda().type_as(w)
return (quantize(w, scale, zero_point), scale, zero_point) |
class NormalizationData(object):
GROUP_INPUTS = 'inputs'
GROUP_OUTPUTS = 'outputs'
DATASET_MEAN = 'mean'
DATASET_MEAN_OF_SQUARES = 'meanOfSquares'
DATASET_VARIANCE = 'variance'
DATASET_TOTAL_FRAMES = 'totalNumberOfFrames'
DATASET_TIME_DIMENSION_INDEX = 0
DATASET_FEATURE_DIMENSION_INDEX = 1
SUMMATION_PRECISION = 1e-05
def createNormalizationFile(bundleFilePath, outputFilePath, dtype=np.float64, flag_includeOutputs=True):
NormalizationData._calculateNormalizationData(bundleFilePath, outputFilePath, NormalizationData.GROUP_INPUTS, dtype=dtype)
if flag_includeOutputs:
NormalizationData._calculateNormalizationData(bundleFilePath, outputFilePath, NormalizationData.GROUP_OUTPUTS, dtype=dtype)
def _calculateNormalizationData(bundleFilePath, outputFilePath, groupName, dtype=np.float64):
accumulatedSum = None
accumulatedSumOfSqr = None
totalFrames = long()
bundle = BundleFile(bundleFilePath)
for filePath in bundle.datasetFilePaths:
with h5py.File(filePath, mode='r') as datasetFile:
(intermSum, intermSumOfSqr, intermTotalFrames) = NormalizationData._accumulateSums(datasetFile, groupName, dtype=dtype)
accumulatedSum = NormalizationData._updateTotalSum(accumulatedSum, intermSum)
accumulatedSumOfSqr = NormalizationData._updateTotalSum(accumulatedSumOfSqr, intermSumOfSqr)
totalFrames += intermTotalFrames
(mean, meanOfSquares, variance) = NormalizationData._calculateMeans(accumulatedSum, accumulatedSumOfSqr, totalFrames)
with h5py.File(outputFilePath, mode='a') as out:
NormalizationData._writeData(out, groupName, mean, meanOfSquares, variance, totalFrames, dtype=dtype)
def _accumulateSums(f, groupName, dtype=np.float64):
sum = None
sumOfSqr = None
totalFrames = np.int64(0)
if (groupName not in f):
return (sum, sumOfSqr, totalFrames)
group = f[groupName]
datasetNames = group.keys()
if (len(datasetNames) == 0):
return (sum, sumOfSqr, totalFrames)
featDims = group[datasetNames[0]].shape[NormalizationData.DATASET_FEATURE_DIMENSION_INDEX]
sum = np.zeros(featDims, dtype=dtype)
sumOfSqr = np.zeros(featDims, dtype=dtype)
for dsName in datasetNames:
dataset = group[dsName][...]
sum += np.sum(dataset, axis=NormalizationData.DATASET_TIME_DIMENSION_INDEX)
sumOfSqr += np.sum(np.square(dataset), axis=NormalizationData.DATASET_TIME_DIMENSION_INDEX)
totalFrames += dataset.shape[NormalizationData.DATASET_TIME_DIMENSION_INDEX]
return (sum, sumOfSqr, totalFrames)
def _updateTotalSum(totalSum, intermediateSum):
if ((totalSum is None) and (intermediateSum is None)):
return None
if (totalSum is None):
return intermediateSum
if (intermediateSum is None):
return totalSum
oldSum = totalSum
newSum = np.add(totalSum, intermediateSum)
sumErr = np.sum(np.abs(((newSum - oldSum) - intermediateSum)))
if (sumErr > NormalizationData.SUMMATION_PRECISION):
raise FloatingPointError('sums have very different orders of magnitude. summation error = {}'.format(sumErr))
return newSum
def _calculateMeans(totalSum, totalSumOfSqr, totalFrames):
mean = None
meanOfSquares = None
variance = None
if (totalSum is not None):
assert (totalFrames > 0)
mean = (totalSum / totalFrames)
if ((mean is not None) and (totalSumOfSqr is not None)):
assert (totalFrames > 0)
meanOfSquares = (totalSumOfSqr / totalFrames)
variance = (meanOfSquares - np.square(mean))
return (mean, meanOfSquares, variance)
def _writeData(f, groupName, mean, meanOfSqr, variance, totalFrames, dtype=np.float64):
if (groupName in f):
del f[groupName]
group = f.create_group(groupName)
dsNames = [NormalizationData.DATASET_MEAN, NormalizationData.DATASET_MEAN_OF_SQUARES, NormalizationData.DATASET_VARIANCE]
datasets = [mean, meanOfSqr, variance]
for (name, ds) in zip(dsNames, datasets):
NormalizationData._writeDataset(group, name, ds, dtype)
if (totalFrames > 0):
group.create_dataset(NormalizationData.DATASET_TOTAL_FRAMES, data=totalFrames)
def _writeDataset(group, datasetName, dataset, dtype=np.float64):
if (dataset is None):
return
group.create_dataset(datasetName, data=dataset, dtype=dtype)
def __init__(self, normalizationFilePath):
self._normalizationFilePath = normalizationFilePath
self._inputMean = None
self._inputVariance = None
self._outputMean = None
self._outputVariance = None
self._readNormalizationData()
def _readNormalizationData(self):
if (not os.path.isfile(self._normalizationFilePath)):
raise IOError((self._normalizationFilePath + ' does not exist'))
with h5py.File(self._normalizationFilePath, mode='r') as f:
(self._inputMean, self._inputVariance) = self._getMeanAndVarianceFromGroup(f, self.GROUP_INPUTS)
(self._outputMean, self._outputVariance) = self._getMeanAndVarianceFromGroup(f, self.GROUP_OUTPUTS)
def _getMeanAndVarianceFromGroup(f, groupName):
mean = None
variance = None
if (groupName not in f):
return (mean, variance)
group = f[groupName]
if (NormalizationData.DATASET_MEAN in group):
mean = group[NormalizationData.DATASET_MEAN][...]
if (NormalizationData.DATASET_VARIANCE in group):
variance = group[NormalizationData.DATASET_VARIANCE][...]
return (mean, variance)
def inputMean(self):
return self._inputMean
def inputVariance(self):
return self._inputVariance
def outputMean(self):
return self._outputMean
def outputVariance(self):
return self._outputVariance |
class TransformerDecoderLayer(nn.Module):
def __init__(self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, drop_path_rate=0.0, use_adapter=False, adapter_dim=200):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.use_adapter = use_adapter
if (use_adapter == True):
self.adapter = Adapter_Layer(d_model=self.embed_dim, down_size=adapter_dim)
self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__)
self.quant_noise = getattr(args, 'quant_noise_pq', 0)
self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8)
self.cross_self_attention = getattr(args, 'cross_self_attention', False)
self.self_attn = self.build_self_attention(self.embed_dim, args, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn)
self.self_attn_ln = (LayerNorm(self.embed_dim) if getattr(args, 'scale_attn', False) else None)
self.cross_attn_ln = (LayerNorm(self.embed_dim) if getattr(args, 'scale_attn', False) else None)
self.nh = self.self_attn.num_heads
self.head_dim = self.self_attn.head_dim
self.activation_fn = utils.get_activation_fn(activation=(str(args.activation_fn) if (getattr(args, 'activation_fn', None) is not None) else 'relu'))
activation_dropout_p = (getattr(args, 'activation_dropout', 0) or 0)
if (activation_dropout_p == 0):
activation_dropout_p = (getattr(args, 'relu_dropout', 0) or 0)
self.activation_dropout_module = FairseqDropout(float(activation_dropout_p), module_name=self.__class__.__name__)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, 'char_inputs', False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.ffn_layernorm = (LayerNorm(args.decoder_ffn_embed_dim) if getattr(args, 'scale_fc', False) else None)
self.w_resid = (nn.Parameter(torch.ones(self.embed_dim), requires_grad=True) if getattr(args, 'scale_resids', False) else None)
self.fc1 = self.build_fc1(self.embed_dim, args.decoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size)
self.fc2 = self.build_fc2(args.decoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.drop_path = (DropPath(drop_path_rate) if (drop_path_rate > 0.0) else nn.Identity())
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(self, embed_dim, args, add_bias_kv=False, add_zero_attn=False):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, dropout=args.attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=(not getattr(args, 'cross_self_attention', False)), q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, scale_factor=args.attn_scale_factor, scale_heads=getattr(args, 'scale_heads', False))
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(embed_dim, args.decoder_attention_heads, kdim=getattr(args, 'encoder_embed_dim', None), vdim=getattr(args, 'encoder_embed_dim', None), dropout=args.attention_dropout, encoder_decoder_attention=True, q_noise=self.quant_noise, qn_block_size=self.quant_noise_block_size, scale_factor=args.attn_scale_factor, scale_heads=getattr(args, 'scale_heads', False))
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return (residual + self.drop_path(x))
def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, incremental_state: Optional[Dict[(str, Dict[(str, Optional[Tensor])])]]=None, prev_self_attn_state: Optional[List[torch.Tensor]]=None, prev_attn_state: Optional[List[torch.Tensor]]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False, self_attn_bias: Optional[Tensor]=None, cross_attn_bias: Optional[Tensor]=None, prompt_kv: Optional[Tensor]=None):
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if (prev_self_attn_state is not None):
(prev_key, prev_value) = prev_self_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_self_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_self_attn_state[2]
assert (incremental_state is not None)
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if (self.cross_self_attention and (not ((incremental_state is not None) and (_self_attn_input_buffer is not None) and ('prev_key' in _self_attn_input_buffer)))):
if (self_attn_mask is not None):
assert (encoder_out is not None)
self_attn_mask = torch.cat((x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1)
if (self_attn_padding_mask is not None):
if (encoder_padding_mask is None):
assert (encoder_out is not None)
encoder_padding_mask = self_attn_padding_mask.new_zeros(encoder_out.size(1), encoder_out.size(0))
self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1)
assert (encoder_out is not None)
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
(x, attn) = self.self_attn(query=x, key=y, value=y, key_padding_mask=self_attn_padding_mask, incremental_state=incremental_state, need_weights=False, attn_mask=self_attn_mask, attn_bias=self_attn_bias, prompt_kv=prompt_kv)
if (self.self_attn_ln is not None):
x = self.self_attn_ln(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.self_attn_layer_norm(x)
if ((self.encoder_attn is not None) and (encoder_out is not None)):
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if (prev_attn_state is not None):
(prev_key, prev_value) = prev_attn_state[:2]
saved_state: Dict[(str, Optional[Tensor])] = {'prev_key': prev_key, 'prev_value': prev_value}
if (len(prev_attn_state) >= 3):
saved_state['prev_key_padding_mask'] = prev_attn_state[2]
assert (incremental_state is not None)
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
(x, attn) = self.encoder_attn(query=x, key=encoder_out, value=encoder_out, key_padding_mask=encoder_padding_mask, incremental_state=incremental_state, static_kv=True, need_weights=(need_attn or ((not self.training) and self.need_attn)), need_head_weights=need_head_weights, attn_bias=cross_attn_bias)
if (self.cross_attn_ln is not None):
x = self.cross_attn_ln(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
if (self.ffn_layernorm is not None):
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = self.dropout_module(x)
if (self.use_adapter == True):
x = self.adapter(x)
if (self.w_resid is not None):
residual = torch.mul(self.w_resid, residual)
x = self.residual_connection(x, residual)
if (not self.normalize_before):
x = self.final_layer_norm(x)
if (self.onnx_trace and (incremental_state is not None)):
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert (saved_state is not None)
if (self_attn_padding_mask is not None):
self_attn_state = [saved_state['prev_key'], saved_state['prev_value'], saved_state['prev_key_padding_mask']]
else:
self_attn_state = [saved_state['prev_key'], saved_state['prev_value']]
return (x, attn, self_attn_state)
return (x, attn, None)
def make_generation_fast_(self, need_attn: bool=False, **kwargs):
self.need_attn = need_attn
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {'0': 'self_attn_layer_norm', '1': 'encoder_attn_layer_norm', '2': 'final_layer_norm'}
for (old, new) in layer_norm_map.items():
for m in ('weight', 'bias'):
k = '{}.layer_norms.{}.{}'.format(name, old, m)
if (k in state_dict):
state_dict['{}.{}.{}'.format(name, new, m)] = state_dict[k]
del state_dict[k]
if (('{}.{}.{}'.format(name, new, m) not in state_dict) and ('{}.{}'.format(new, m) in self.state_dict())):
state_dict['{}.{}.{}'.format(name, new, m)] = self.state_dict()['{}.{}'.format(new, m)]
prefix = ((name + '.') if (name != '') else '')
for (param_name, param_tensor) in self.state_dict().items():
if ((prefix + param_name) not in state_dict):
state_dict[(prefix + param_name)] = self.state_dict()[param_name] |
class NodeMetaType(enum.Enum):
OPTPLAN_NODE = 'optplan_node'
TRANSFORMATION = 'transformation' |
def resnet_adapt101(args, pretrained=True, **kwargs):
model = ResNet3X3(args, **kwargs)
if pretrained:
print(' pretrained ')
model.load_state_dict(torch.load('./pretrained/resnet_adapt101-imagenet.pth', map_location='cpu'))
return model |
_REGISTRY
class FSD50KDataModule(pl.LightningDataModule):
def __init__(self, channels_last: bool=True, random_crop: Optional[int]=None, data_dir: Optional[str]='.cache', num_workers: int=3, batch_size: int=64, normalize: bool=True, pin_memory: bool=False, root='../datasets', *args, **kwargs):
super().__init__()
self.save_hyperparameters()
self._image_shape = [1, 96, 101]
self.num_classes = 200
self.batch_size = batch_size
self.root = root
if channels_last:
self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0])
def prepare_data(self):
pass
def setup(self, stage):
(self.audio_train, self.audio_val, self.audio_test) = load_audio(f'{self.root}/audio', feature='mel', train=True, root=self.root)
def train_dataloader(self):
return DataLoader(self.audio_train, collate_fn=_collate_fn, batch_size=self.batch_size, shuffle=True, num_workers=8)
def val_dataloader(self):
return DataLoader(self.audio_val, collate_fn=_collate_fn_eval, batch_size=self.batch_size, shuffle=False, num_workers=8)
def test_dataloader(self):
return DataLoader(self.audio_test, collate_fn=_collate_fn_eval, batch_size=self.batch_size, shuffle=False, num_workers=8)
def image_shape(self):
return self._image_shape
def default_transforms(self) -> Callable:
return audio_transform(channels_last=self.hparams.channels_last) |
def _random_distributive_lattice(n):
from sage.combinat.posets.hasse_diagram import HasseDiagram
from copy import copy
from sage.combinat.subset import Subsets
from sage.graphs.digraph_generators import digraphs
if (n < 4):
return digraphs.Path((n - 1))
H = HasseDiagram({0: []})
while (sum((1 for _ in H.antichains_iterator())) < n):
D = copy(H)
newcover = Subsets(H).random_element()
new_element = H.order()
D.add_vertex(new_element)
for e in newcover:
D.add_edge(e, new_element)
D = D.transitive_reduction()
H = HasseDiagram(D)
while (sum((1 for _ in H.antichains_iterator())) > n):
D = copy(H)
to_delete = H.random_vertex()
for a in D.neighbors_in(to_delete):
for b in D.neighbors_out(to_delete):
D.add_edge(a, b)
D.delete_vertex(to_delete)
D.relabel({z: (z - 1) for z in range((to_delete + 1), (D.order() + 1))})
H = HasseDiagram(D)
return D |
class TCFCProcessor(DataProcessor):
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
logger.info('LOOKING AT {}'.format(os.path.join(data_dir, 'formal_informal_matching_train')))
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'formal_informal_matching_train')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'formal_informal_matching_test')), 'dev')
def get_labels(self):
return ['0', '1']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
text_a = line[1]
text_b = line[2]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def create_train_examples(X, Y, yspace, num=(- 1), balanced=True):
X_inp = []
Y_inp = []
outp = []
for (x, y) in zip(X, Y):
neg_samples = yspace[:]
neg_samples.remove(y)
if (num == (- 1)):
pass
else:
neg_samples = [i for i in random.sample(neg_samples, num)]
if (not balanced):
X_inp.append(x)
Y_inp.append(y)
outp.append([1.0, 0.0])
for yn in neg_samples:
if balanced:
X_inp.append(x)
Y_inp.append(y)
outp.append([1.0, 0.0])
X_inp.append(x)
Y_inp.append(yn)
outp.append([0.0, 1.0])
return (X_inp, Y_inp, outp) |
def save_npz(file, matrix, compressed=True):
arrays_dict = {}
if (matrix.format in ('csc', 'csr', 'bsr')):
arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
elif (matrix.format == 'dia'):
arrays_dict.update(offsets=matrix.offsets)
elif (matrix.format == 'coo'):
arrays_dict.update(row=matrix.row, col=matrix.col)
else:
raise NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))
arrays_dict.update(format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data)
if compressed:
np.savez_compressed(file, **arrays_dict)
else:
np.savez(file, **arrays_dict) |
def require_access_token(method):
def wrapper(self, *args, **kwargs):
if self.access_token:
return method(self, *args, **kwargs)
else:
raise exceptions.MissingZenodoAccessToken(self.token_name)
return wrapper |
class TFDebertaV2Model(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def test_unet_basic_conv_block():
with pytest.raises(AssertionError):
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicConvBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
BasicConvBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2')]
BasicConvBlock(64, 64, plugins=plugins)
block = BasicConvBlock(16, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 16, 64, 64, requires_grad=True)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 16, 64, 64]))
block = BasicConvBlock(16, 16, with_cp=False)
assert (not block.with_cp)
x = torch.randn(1, 16, 64, 64)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 16, 64, 64]))
block = BasicConvBlock(16, 16, stride=2)
x = torch.randn(1, 16, 64, 64)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 16, 32, 32]))
block = BasicConvBlock(16, 64, num_convs=3, dilation=3)
assert (block.convs[0].conv.in_channels == 16)
assert (block.convs[0].conv.out_channels == 64)
assert (block.convs[0].conv.kernel_size == (3, 3))
assert (block.convs[0].conv.dilation == (1, 1))
assert (block.convs[0].conv.padding == (1, 1))
assert (block.convs[1].conv.in_channels == 64)
assert (block.convs[1].conv.out_channels == 64)
assert (block.convs[1].conv.kernel_size == (3, 3))
assert (block.convs[1].conv.dilation == (3, 3))
assert (block.convs[1].conv.padding == (3, 3))
assert (block.convs[2].conv.in_channels == 64)
assert (block.convs[2].conv.out_channels == 64)
assert (block.convs[2].conv.kernel_size == (3, 3))
assert (block.convs[2].conv.dilation == (3, 3))
assert (block.convs[2].conv.padding == (3, 3)) |
def bind_forward_vars(vars, ssspG, sssp_configs, binding):
for edge in zip(sssp_configs, sssp_configs[1:]):
if (ssspG.edges[edge]['cfg'] is None):
continue
for (var, layout) in ssspG.edges[edge]['cfg'].items():
if (var == 'SB2'):
continue
binding = vars.set_var_binding(binding, var, layout)
binding = vars.set_var_binding(binding, 'VV', binding['KK'])
print('Bound variables after forward optimization:')
for (var, layout) in binding.items():
if (layout is not None):
print(f'{var}: {layout}')
return binding |
class _ReflectionPadNd(Module):
__constants__ = ['padding']
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'reflect')
def extra_repr(self) -> str:
return '{}'.format(self.padding) |
def training_loop(run_dir='.', training_set_kwargs={}, data_loader_kwargs={}, G_kwargs={}, D_kwargs={}, G_opt_kwargs={}, D_opt_kwargs={}, augment_kwargs=None, loss_kwargs={}, metrics=[], random_seed=0, world_size=1, rank=0, gpu=0, batch_gpu=4, batch_size=4, ema_kimg=10, ema_rampup=None, G_reg_interval=4, D_reg_interval=16, augment_p=0, ada_target=None, ada_interval=4, ada_kimg=500, total_kimg=25000, kimg_per_tick=4, image_snapshot_ticks=50, network_snapshot_ticks=50, resume_pkl=None, resume_start=0, cudnn_benchmark=True, allow_tf32=False, abort_fn=None, progress_fn=None, update_cam_prior_ticks=None, generation_with_image=False, **unused):
wandb.init(project='StyleNeRF')
wandb.config = {'batch_gpu': batch_gpu, 'batch_size': batch_size}
start_time = time.time()
device = torch.device('cuda', gpu)
np.random.seed(((random_seed * world_size) + rank))
torch.manual_seed(((random_seed * world_size) + rank))
torch.backends.cudnn.benchmark = cudnn_benchmark
torch.backends.cuda.matmul.allow_tf32 = allow_tf32
torch.backends.cudnn.allow_tf32 = allow_tf32
conv2d_gradfix.enabled = True
grid_sample_gradfix.enabled = True
img_dir = (run_dir + '/images')
os.makedirs(img_dir, exist_ok=True)
assert (batch_gpu <= (batch_size // world_size))
if (rank == 0):
print('Loading training set...')
if (world_size == 1):
data_loader_kwargs.update({'num_workers': 1, 'prefetch_factor': 1})
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs)
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=world_size, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=(batch_size // world_size), **data_loader_kwargs))
if generation_with_image:
backup_data_iterator = iter(torch.utils.data.DataLoader(dataset=copy.deepcopy(training_set), sampler=training_set_sampler, batch_size=(batch_size // world_size), **data_loader_kwargs))
if (rank == 0):
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
if (rank == 0):
print('Constructing networks...')
img_resolution = (training_set.resolution if (resume_pkl is None) else G_kwargs.synthesis_kwargs.resolution_start)
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=img_resolution, img_channels=training_set.num_channels)
if (G_kwargs.get('img_channels', None) is not None):
common_kwargs['img_channels'] = G_kwargs['img_channels']
del G_kwargs['img_channels']
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device)
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device)
G_ema = copy.deepcopy(G).eval()
wandb.watch(G, D)
resize_real_img_early = D_kwargs.get('resize_real_early', False)
disc_enable_ema = D_kwargs.get('enable_ema', False)
if disc_enable_ema:
D_ema = copy.deepcopy(D).eval()
if ((resume_pkl is not None) and (rank == 0)):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
modules = [('G', G), ('D', D), ('G_ema', G_ema)]
if disc_enable_ema:
modules += [('D_ema', D_ema)]
for (name, module) in modules:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
img = misc.print_module_summary(G, [z, c], rank=rank)
misc.print_module_summary(D, [img, c], rank=rank)
if (rank == 0):
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if ((augment_kwargs is not None) and ((augment_p > 0) or (ada_target is not None))):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device)
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if (ada_target is not None):
ada_stats = training_stats.Collector(regex='Loss/signs/real')
if (rank == 0):
print(f'Distributing across {world_size} GPUs...')
ddp_modules = dict()
module_list = [('G_mapping_nerf', G.mapping_nerf), ('G_mapping', G.mapping), ('G_synthesis', G.synthesis), ('D', D), (None, G_ema), ('augment_pipe', augment_pipe)]
if (G.encoder is not None):
module_list += [('G_encoder', G.encoder)]
if disc_enable_ema:
module_list += [('D_ema', D_ema)]
for (name, module) in module_list:
if ((world_size > 1) and (module is not None) and (len(list(module.parameters())) != 0)):
module.requires_grad_(True)
module = torch.nn.parallel.DistributedDataParallel(module, device_ids=[device], broadcast_buffers=False, find_unused_parameters=True)
module.requires_grad_(False)
print('torch.nn.parallel.DistributedDataParallel success')
if (name is not None):
ddp_modules[name] = module
if (rank == 0):
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, **ddp_modules, **loss_kwargs)
phases = []
for (name, module, opt_kwargs, reg_interval) in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
if (reg_interval is None):
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs)
phases += [dnnlib.EasyDict(name=(name + 'both'), module=module, opt=opt, interval=1, scaler=None)]
else:
mb_ratio = (reg_interval / (reg_interval + 1))
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = (opt_kwargs.lr * mb_ratio)
opt_kwargs.betas = [(beta ** mb_ratio) for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs)
phases += [dnnlib.EasyDict(name=(name + 'main'), module=module, opt=opt, interval=1, scaler=None)]
phases += [dnnlib.EasyDict(name=(name + 'reg'), module=module, opt=opt, interval=reg_interval, scaler=None)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if (rank == 0):
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
grid_size = None
grid_z = None
grid_c = None
grid_i = None
if (rank == 0):
print(f'Exporting sample images... {batch_gpu}')
(grid_size, images, labels) = setup_snapshot_image_grid(training_set=training_set)
grid_z = torch.from_numpy(np.random.RandomState(seed=0).randn(labels.shape[0], G.z_dim)).to(device).split(batch_gpu)
grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
grid_i = ((torch.from_numpy(images).float() / 127.5) - 1).to(device).split(batch_gpu)
if (not os.path.exists(os.path.join(img_dir, 'reals.png'))):
save_image_grid(images, os.path.join(img_dir, 'reals.png'), drange=[0, 255], grid_size=grid_size)
if (not os.path.exists(os.path.join(img_dir, 'fakes_init.png'))):
with torch.no_grad():
images = torch.cat([G_ema.get_final_output(z=z, c=c, noise_mode='const', img=img).cpu() for (z, c, img) in zip(grid_z, grid_c, grid_i)]).numpy()
save_image_grid(images, os.path.join(img_dir, 'fakes_init.png'), drange=[(- 1), 1], grid_size=grid_size)
if (rank == 0):
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if (rank == 0):
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'at')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
if (rank == 0):
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = resume_start
cur_tick = (cur_nimg // (1000 * kimg_per_tick))
cur_iter = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - start_time)
batch_idx = 0
if (progress_fn is not None):
progress_fn(0, total_kimg)
while True:
loss.set_alpha(cur_nimg)
curr_res = loss.resolution
if (hasattr(training_set_sampler, 'update_dataset_cameras') and ((cur_nimg == resume_start) and (resume_start > 0) and (cur_tick > update_cam_prior_ticks))):
training_set_sampler.update_dataset_cameras(D.get_estimated_camera)
with torch.autograd.profiler.record_function('data_fetch'):
def load_data(iterator):
(img, c, _) = next(iterator)
if resize_real_img_early:
img = resize_image(img, curr_res)
img = [{'img': img} for img in ((img.to(device).to(torch.float32) / 127.5) - 1).split(batch_gpu)]
c = c.to(device).split(batch_gpu)
return (img, c)
(phase_real_img, phase_real_c) = load_data(training_set_iterator)
all_gen_z = torch.randn([(len(phases) * batch_size), G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range((len(phases) * batch_size))]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
all_gen_img = [[None for _ in range(len(phase_real_img))] for _ in range(len(phases))]
for (phase, phase_gen_z, phase_gen_c, phase_gen_img) in zip(phases, all_gen_z, all_gen_c, all_gen_img):
if ((batch_idx % phase.interval) != 0):
continue
if generation_with_image:
(phase_gen_img, phase_gen_c) = load_data(backup_data_iterator)
if (phase.start_event is not None):
phase.start_event.record(torch.cuda.current_stream(device))
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
for (round_idx, (real_img, real_c, gen_z, gen_c, fake_img)) in enumerate(zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c, phase_gen_img)):
sync = (round_idx == ((batch_size // (batch_gpu * world_size)) - 1))
gain = phase.interval
losses = loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, fake_img=fake_img, sync=sync, gain=gain, scaler=phase.scaler)
for (loss_key, loss_value) in losses.items():
wandb.log({loss_key: loss_value.item()})
phase.module.requires_grad_(False)
with torch.autograd.profiler.record_function((phase.name + '_opt')):
if (len(losses) > 0):
if (phase.scaler is not None):
phase.scaler.unscale_(phase.opt)
all_grads = []
for param in phase.module.parameters():
if (param.grad is not None):
misc.nan_to_num(param.grad, nan=0, posinf=100000.0, neginf=(- 100000.0), out=param.grad)
all_grads += [torch.norm(param.grad.detach(), p=2)]
grad_norm = torch.stack(all_grads).norm(p=2)
if (phase.scaler is not None):
phase.scaler.step(phase.opt)
phase.scaler.update()
training_stats.report(f'Scaler/{phase.name}', phase.scaler.get_scale())
else:
phase.opt.step()
training_stats.report(f'Gradient/{phase.name}', grad_norm)
if (phase.end_event is not None):
phase.end_event.record(torch.cuda.current_stream(device))
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = (ema_kimg * 1000)
if (ema_rampup is not None):
ema_nimg = min(ema_nimg, (cur_nimg * ema_rampup))
ema_beta = (0.5 ** (batch_size / max(ema_nimg, 1e-08)))
for (p_ema, p) in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for (b_ema, b) in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
if disc_enable_ema:
for (p_ema, p) in zip(D_ema.parameters(), D.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for (b_ema, b) in zip(D_ema.buffers(), D.buffers()):
b_ema.copy_(b)
cur_nimg += batch_size
batch_idx += 1
cur_iter += 1
if ((ada_stats is not None) and ((batch_idx % ada_interval) == 0)):
ada_stats.update()
adjust = ((np.sign((ada_stats['Loss/signs/real'] - ada_target)) * (batch_size * ada_interval)) / (ada_kimg * 1000))
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
done = (cur_nimg >= (total_kimg * 1000))
if ((not done) and (cur_tick != 0) and (cur_nimg < (tick_start_nimg + (kimg_per_tick * 1000)))):
continue
tick_end_time = time.time()
fields = [f'[{run_dir}]:']
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', (cur_nimg / 1000.0)):<8.1f}"]
if (loss.alpha is not None):
fields += [f"alpha {training_stats.report0('Progress/alpha', loss.alpha):<8.5f}"]
fields += [f"res {training_stats.report0('Progress/res', loss.resolution):<5d}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', (tick_end_time - start_time))):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', (tick_end_time - tick_start_time)):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (((tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg)) * 1000.0)):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintain_sec', maintenance_time):<5.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', (psutil.Process(os.getpid()).memory_info().rss / (2 ** 30))):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', (torch.cuda.max_memory_allocated(device) / (2 ** 30))):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', (float(augment_pipe.p.cpu()) if (augment_pipe is not None) else 0)):.3f}"]
training_stats.report0('Timing/total_hours', ((tick_end_time - start_time) / (60 * 60)))
training_stats.report0('Timing/total_days', ((tick_end_time - start_time) / ((24 * 60) * 60)))
if (rank == 0):
print(' '.join(fields))
if ((not done) and (abort_fn is not None) and abort_fn()):
done = True
if (rank == 0):
print()
print('Aborting...')
if (hasattr(training_set_sampler, 'update_dataset_cameras') and (((cur_tick % update_cam_prior_ticks) == 0) and (cur_tick > 0))):
training_set_sampler.update_dataset_cameras(D.get_estimated_camera)
if ((rank == 0) and (image_snapshot_ticks is not None) and (done or ((cur_tick % image_snapshot_ticks) == 0))):
with torch.no_grad():
images = torch.cat([G_ema.get_final_output(z=z, c=c, noise_mode='const', img=None).cpu() for (z, c, img) in zip(grid_z, grid_c, grid_i)]).numpy()
save_image_grid(images, os.path.join(img_dir, f'fakes{cur_iter:06d}.png'), drange=[(- 1), 1], grid_size=grid_size)
wandb.log({f'fakes{cur_iter:06d}.png': wandb.Image(image_grid(images, drange=[(- 1), 1], grid_size=grid_size))})
snapshot_pkl = None
snapshot_data = None
if ((network_snapshot_ticks is not None) and (done or ((cur_tick % network_snapshot_ticks) == 0))):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
modules = [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe)]
if disc_enable_ema:
modules += [('D_ema', D_ema)]
for (name, module) in modules:
if (module is not None):
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
snapshot_data[name] = module
del module
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{(cur_iter // 1000):06d}k.pkl')
if (rank == 0):
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
shutil.copy(snapshot_pkl, os.path.join(run_dir, 'latest-network-snapshot.pkl'))
if ((snapshot_data is not None) and (len(metrics) > 0) and (cur_tick > 1)):
if (rank == 0):
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'], dataset_kwargs=training_set_kwargs, num_gpus=world_size, rank=rank, device=device)
if (rank == 0):
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data
for phase in phases:
value = []
if ((phase.start_event is not None) and (phase.end_event is not None)):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0(('Timing/' + phase.name), value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
timestamp = time.time()
if (stats_jsonl is not None):
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write((json.dumps(fields) + '\n'))
stats_jsonl.flush()
if (rank == 0):
losses = [(key, fields[key]) for key in fields if ('Loss/' in key)]
losses = ['{}: {:.4f}'.format(key[5:], loss['mean']) for (key, loss) in losses]
print('\t'.join(losses))
if (stats_tfevents is not None):
global_step = int((cur_nimg / 1000.0))
walltime = (timestamp - start_time)
for (name, value) in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for (name, value) in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if (progress_fn is not None):
progress_fn((cur_nimg // 1000), total_kimg)
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = (tick_start_time - tick_end_time)
if done:
break
if (rank == 0):
print()
print('Exiting...') |
_test(assert_ii_1=False)
def test_fusion_with_transient_fpga():
A = np.random.rand(2, 20)
expected = ((A * A) * 2)
sdfg = fusion_with_transient.to_sdfg()
sdfg.simplify()
assert (sdfg.apply_transformations_repeated(MapFusion) >= 2)
assert (sdfg.apply_transformations_repeated(FPGATransformSDFG) == 1)
sdfg(A=A)
assert np.allclose(A, expected)
return sdfg |
def P9():
A = Matrix(GF(2), [[1, 0, 0, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 1, 1, 0, 0, 1], [0, 0, 1, 0, 0, 1, 1, 0, 1], [0, 0, 0, 1, 0, 0, 1, 1, 0]])
M = BinaryMatroid(A, 'abcdefghi')
M.rename(('P9: ' + repr(M)))
return M |
class HaydnOp20Dataset(RemoteFolderDataset):
_info = DatasetInfo(_NAME, _DESCRIPTION, _HOMEPAGE)
_citation = _CITATION
_sources = {'haydn': {'filename': 'haydnop20v1.3_annotated.zip', 'url': ' 'archive': True, 'size': 130954, 'md5': '1c65c8da312e1c9dda681d0496bf527f', 'sha256': '96986cccebfd37a36cc97a2fc0ebcfbe22d5136e622b21e04ea125d589f5073b'}}
_extension = 'hrm'
def read(self, filename: Union[(str, Path)]) -> Music:
score = music21.converter.parse(filename, format='humdrum')
roman_numerals = list(score.flat.getElementsByClass('RomanNumeral'))
annotations = get_annotations(roman_numerals)
score.remove(roman_numerals, recurse=True)
music = from_music21_score(score)
music.annotations = annotations
return music |
def sample_gaussian(mean, std):
return (mean + std.mul(gaussian_noise(std.size(0), std.size(1)).to(std))) |
def train_controller(xloader, network, criterion, optimizer, prev_baseline, epoch_str, print_freq, logger, normalizer):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(GradnormMeter, LossMeter, ValAccMeter, EntropyMeter, BaselineMeter, RewardMeter, xend) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), time.time())
controller_num_aggregate = 20
controller_train_steps = 50
controller_bl_dec = 0.99
controller_entropy_weight = 0.0001
network.eval()
network.controller.train()
network.controller.zero_grad()
loader_iter = iter(xloader)
for step in range((controller_train_steps * controller_num_aggregate)):
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
data_time.update((time.time() - xend))
(log_prob, entropy, sampled_arch) = network.controller()
with torch.no_grad():
network.set_cal_mode('dynamic', sampled_arch)
(_, logits) = network(inputs)
logits = logits.squeeze()
logits = normalizer.decode(logits)
targets = normalizer.decode(targets)
model_loss = criterion(logits.view(logits.size(0), (- 1)), targets.view(targets.size(0), (- 1)))
(val_top1, val_top5) = ((1 - (model_loss / logits.size(0))), 0)
reward = (val_top1 + (controller_entropy_weight * entropy))
if (prev_baseline is None):
baseline = val_top1
else:
baseline = (prev_baseline - ((1 - controller_bl_dec) * (prev_baseline - reward)))
loss = (((- 1) * log_prob) * (reward - baseline))
RewardMeter.update(reward.item())
BaselineMeter.update(baseline.item())
ValAccMeter.update((val_top1.item() * 100))
LossMeter.update(loss.item())
EntropyMeter.update(entropy.item())
loss = (loss / controller_num_aggregate)
loss.backward(retain_graph=True)
batch_time.update((time.time() - xend))
xend = time.time()
if (((step + 1) % controller_num_aggregate) == 0):
grad_norm = torch.nn.utils.clip_grad_norm_(network.controller.parameters(), 5.0)
GradnormMeter.update(grad_norm)
optimizer.step()
network.controller.zero_grad()
if ((step % print_freq) == 0):
Sstr = (('*Train-Controller* ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, (controller_train_steps * controller_num_aggregate)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Wstr = '[Loss {loss.val:.3f} ({loss.avg:.3f}) {top1.val:.2f} ({top1.avg:.2f}) Reward {reward.val:.2f} ({reward.avg:.2f})] Baseline {basel.val:.2f} ({basel.avg:.2f})'.format(loss=LossMeter, top1=ValAccMeter, reward=RewardMeter, basel=BaselineMeter)
Estr = 'Entropy={:.4f} ({:.4f})'.format(EntropyMeter.val, EntropyMeter.avg)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Wstr) + ' ') + Estr))
return (LossMeter.avg, ValAccMeter.avg, BaselineMeter.avg, RewardMeter.avg) |
class Video(object):
def __init__(self, name, root, video_dir, init_rect, img_names, gt_rect, attr, load_img=False):
self.name = name
self.video_dir = video_dir
self.init_rect = init_rect
self.gt_traj = gt_rect
self.attr = attr
self.pred_trajs = {}
self.img_names = [os.path.join(root, x) for x in img_names]
self.imgs = None
if ('ITB' in root):
self.img_names = [os.path.join(root, video_dir, x) for x in img_names]
else:
self.img_names = [os.path.join(root, x) for x in img_names]
if load_img:
self.imgs = [cv2.imread(x) for x in self.img_names]
self.width = self.imgs[0].shape[1]
self.height = self.imgs[0].shape[0]
else:
img = cv2.imread(self.img_names[0])
assert (img is not None), self.img_names[0]
self.width = img.shape[1]
self.height = img.shape[0]
def load_tracker(self, path, tracker_names=None, store=True):
if (not tracker_names):
tracker_names = [x.split('/')[(- 1)] for x in glob(path) if os.path.isdir(x)]
if isinstance(tracker_names, str):
tracker_names = [tracker_names]
for name in tracker_names:
traj_file = os.path.join(path, name, (self.name + '.txt'))
if os.path.exists(traj_file):
with open(traj_file, 'r') as f:
pred_traj = [list(map(float, x.strip().split(','))) for x in f.readlines()]
if (len(pred_traj) != len(self.gt_traj)):
print(name, len(pred_traj), len(self.gt_traj), self.name)
if store:
self.pred_trajs[name] = pred_traj
else:
return pred_traj
else:
print(traj_file)
self.tracker_names = list(self.pred_trajs.keys())
def load_img(self):
if (self.imgs is None):
self.imgs = [cv2.imread(x) for x in self.img_names]
self.width = self.imgs[0].shape[1]
self.height = self.imgs[0].shape[0]
def free_img(self):
self.imgs = None
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
if (self.imgs is None):
return (cv2.imread(self.img_names[idx]), self.gt_traj[idx])
else:
return (self.imgs[idx], self.gt_traj[idx])
def __iter__(self):
for i in range(len(self.img_names)):
if (self.imgs is not None):
(yield (self.imgs[i], self.gt_traj[i]))
else:
(yield (cv2.imread(self.img_names[i]), self.gt_traj[i]))
def draw_box(self, roi, img, linewidth, color, name=None):
if ((len(roi) > 6) and ((len(roi) % 2) == 0)):
pts = np.array(roi, np.int32).reshape((- 1), 1, 2)
color = tuple(map(int, color))
img = cv2.polylines(img, [pts], True, color, linewidth)
pt = (pts[(0, 0, 0)], (pts[(0, 0, 1)] - 5))
if name:
img = cv2.putText(img, name, pt, cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1)
elif (len(roi) == 4):
if (not np.isnan(roi[0])):
roi = list(map(int, roi))
color = tuple(map(int, color))
img = cv2.rectangle(img, (roi[0], roi[1]), ((roi[0] + roi[2]), (roi[1] + roi[3])), color, linewidth)
if name:
img = cv2.putText(img, name, (roi[0], (roi[1] - 5)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, color, 1)
return img
def show(self, pred_trajs={}, linewidth=2, show_name=False):
assert (self.imgs is not None)
video = []
cv2.namedWindow(self.name, cv2.WINDOW_NORMAL)
colors = {}
if ((len(pred_trajs) == 0) and (len(self.pred_trajs) > 0)):
pred_trajs = self.pred_trajs
for (i, (roi, img)) in enumerate(zip(self.gt_traj, self.imgs[self.start_frame:(self.end_frame + 1)])):
img = img.copy()
if (len(img.shape) == 2):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = self.draw_box(roi, img, linewidth, (0, 255, 0), ('gt' if show_name else None))
for (name, trajs) in pred_trajs.items():
if (name not in colors):
color = tuple(np.random.randint(0, 256, 3))
colors[name] = color
else:
color = colors[name]
img = self.draw_box(trajs[0][i], img, linewidth, color, (name if show_name else None))
cv2.putText(img, str((i + self.start_frame)), (5, 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 0), 2)
cv2.imshow(self.name, img)
cv2.waitKey(40)
video.append(img.copy())
return video |
def formatted_holistic_pose(width: int, height: int, additional_face_points: int=0):
dimensions = PoseHeaderDimensions(width=width, height=height, depth=1000)
header = PoseHeader(version=0.1, dimensions=dimensions, components=holistic_components('XYZC', additional_face_points))
body = NumPyPoseBody(fps=0, data=np.zeros(shape=(1, 1, header.total_points(), 3)), confidence=np.zeros(shape=(1, 1, header.total_points())))
pose = Pose(header, body)
return pose.get_components(['POSE_LANDMARKS', 'FACE_LANDMARKS', 'LEFT_HAND_LANDMARKS', 'RIGHT_HAND_LANDMARKS'], {'FACE_LANDMARKS': FACEMESH_CONTOURS_POINTS}) |
class DenseNet161(nn.Module):
def __init__(self):
super(DenseNet161, self).__init__()
self.net = timm.create_model('densenet161', pretrained=True)
self.net.global_pool = nn.Identity()
self.net.classifier = nn.Identity()
self._handles = []
self._features = {}
self.create_hook('conv0', 0)
self.create_hook('denseblock1', 1)
self.create_hook('denseblock2', 2)
self.create_hook('denseblock3', 3)
self.create_hook('norm5', 4)
def create_hook(self, layername, activation_name):
def hook(module, input, output):
self._features[activation_name] = output
return output
handle = getattr(self.net.features, layername).register_forward_hook(hook)
self._handles.append(handle)
def remove_handles(self):
for handle in self._handles:
handle.remove()
def forward(self, x):
self.net(x)
return [self._features[i] for i in range(5)] |
def isogeny_degrees_cm(E, verbose=False):
if (not E.has_cm()):
raise ValueError('possible_isogeny_degrees_cm(E) requires E to be an elliptic curve with CM')
d = E.cm_discriminant()
if verbose:
print(('CM case, discriminant = %s' % d))
from sage.libs.pari.all import pari
from sage.sets.set import Set
from sage.arith.misc import kronecker as kronecker_symbol
n = E.base_field().absolute_degree()
if (not E.has_rational_cm()):
n *= 2
if (d == (- 4)):
n *= 2
if (d == (- 3)):
n *= 3
divs = n.divisors()
data = pari(d).quadclassunit()
h = data[0].sage()
n_over_2h = (n // (2 * h))
L = (Set([ZZ(2), ZZ(3)]) if (d == (- 3)) else Set([ZZ(2)]))
if verbose:
print(('initial primes: %s' % L))
ram_l = d.odd_part().prime_factors()
if (not E.has_rational_cm()):
L1 = Set(ram_l)
L += L1
if verbose:
print(('ramified primes: %s' % L1))
else:
L1 = Set([l for l in ram_l if (d.valuation(l) > 1)])
L += L1
if verbose:
print(('upward primes: %s' % L1))
L1 = Set([l for l in ram_l if l.divides(n_over_2h)])
L += L1
if verbose:
print(('downward ramified primes: %s' % L1))
L1 = Set([(lm1 + 1) for lm1 in divs if ((lm1 + 1).is_prime() and (kronecker_symbol(d, (lm1 + 1)) == (+ 1)))])
L += L1
if verbose:
print(('downward split primes: %s' % L1))
L1 = Set([(lp1 - 1) for lp1 in divs if ((lp1 - 1).is_prime() and (kronecker_symbol(d, (lp1 - 1)) == (- 1)))])
L += L1
if verbose:
print(('downward inert primes: %s' % L1))
if E.has_rational_cm():
from sage.quadratic_forms.binary_qf import BinaryQF
Qs = [BinaryQF(list(q)) for q in data[2]]
L1 = [Q.small_prime_value() for Q in Qs]
if verbose:
print(('primes generating the class group: %s' % L1))
L += Set(L1)
if verbose:
print(('Set of primes before filtering: %s' % L))
from .gal_reps_number_field import Frobenius_filter
L = Frobenius_filter(E, sorted(L))
if verbose:
print(('List of primes after filtering: %s' % L))
return L |
class Observation(object):
def __init__(self, left_shoulder_rgb: np.ndarray, left_shoulder_depth: np.ndarray, left_shoulder_mask: np.ndarray, left_shoulder_point_cloud: np.ndarray, right_shoulder_rgb: np.ndarray, right_shoulder_depth: np.ndarray, right_shoulder_mask: np.ndarray, right_shoulder_point_cloud: np.ndarray, overhead_rgb: np.ndarray, overhead_depth: np.ndarray, overhead_mask: np.ndarray, overhead_point_cloud: np.ndarray, wrist_rgb: np.ndarray, wrist_depth: np.ndarray, wrist_mask: np.ndarray, wrist_point_cloud: np.ndarray, front_rgb: np.ndarray, front_depth: np.ndarray, front_mask: np.ndarray, front_point_cloud: np.ndarray, joint_velocities: np.ndarray, joint_positions: np.ndarray, joint_forces: np.ndarray, gripper_open: float, gripper_pose: np.ndarray, gripper_matrix: np.ndarray, gripper_joint_positions: np.ndarray, gripper_touch_forces: np.ndarray, task_low_dim_state: np.ndarray, misc: dict):
self.left_shoulder_rgb = left_shoulder_rgb
self.left_shoulder_depth = left_shoulder_depth
self.left_shoulder_mask = left_shoulder_mask
self.left_shoulder_point_cloud = left_shoulder_point_cloud
self.right_shoulder_rgb = right_shoulder_rgb
self.right_shoulder_depth = right_shoulder_depth
self.right_shoulder_mask = right_shoulder_mask
self.right_shoulder_point_cloud = right_shoulder_point_cloud
self.overhead_rgb = overhead_rgb
self.overhead_depth = overhead_depth
self.overhead_mask = overhead_mask
self.overhead_point_cloud = overhead_point_cloud
self.wrist_rgb = wrist_rgb
self.wrist_depth = wrist_depth
self.wrist_mask = wrist_mask
self.wrist_point_cloud = wrist_point_cloud
self.front_rgb = front_rgb
self.front_depth = front_depth
self.front_mask = front_mask
self.front_point_cloud = front_point_cloud
self.joint_velocities = joint_velocities
self.joint_positions = joint_positions
self.joint_forces = joint_forces
self.gripper_open = gripper_open
self.gripper_pose = gripper_pose
self.gripper_matrix = gripper_matrix
self.gripper_joint_positions = gripper_joint_positions
self.gripper_touch_forces = gripper_touch_forces
self.task_low_dim_state = task_low_dim_state
self.misc = misc
def get_low_dim_data(self) -> np.ndarray:
low_dim_data = ([] if (self.gripper_open is None) else [[self.gripper_open]])
for data in [self.joint_velocities, self.joint_positions, self.joint_forces, self.gripper_pose, self.gripper_joint_positions, self.gripper_touch_forces, self.task_low_dim_state]:
if (data is not None):
low_dim_data.append(data)
return (np.concatenate(low_dim_data) if (len(low_dim_data) > 0) else np.array([])) |
_BODY.register('pfpn')
class PFPN(nn.Module):
def __init__(self, cfg, dim_in, spatial_in):
super().__init__()
panoptic_dim = cfg.FPN.PANOPTIC.CONV_DIM
norm = cfg.FPN.PANOPTIC.NORM
self.spatial_in = spatial_in
self.use_fpn = cfg.FPN.PANOPTIC.USE_FPN
if self.use_fpn:
self.fpn = FPN(cfg, dim_in, self.spatial_in)
if self.use_fpn:
self.dim_in = self.fpn.dim_out
else:
self.dim_in = dim_in
self.scale1_block = panoptic_upsampler_block(self.dim_in[3], panoptic_dim, 3, norm=norm)
self.scale2_block = panoptic_upsampler_block(self.dim_in[2], panoptic_dim, 2, norm=norm)
self.scale3_block = panoptic_upsampler_block(self.dim_in[1], panoptic_dim, 1, norm=norm)
self.scale4_block = panoptic_upsampler_block(self.dim_in[0], panoptic_dim, 0, norm=norm)
self.dim_out = [panoptic_dim]
if self.use_fpn:
self.spatial_out = self.fpn.spatial_out[:1]
else:
self.spatial_out = spatial_in[:1]
def forward(self, x):
if self.use_fpn:
x = self.fpn(x)
x1 = self.scale1_block(x[3])
x2 = self.scale2_block(x[2])
x3 = self.scale3_block(x[1])
x4 = self.scale4_block(x[0])
x = (((x1 + x2) + x3) + x4)
return [x] |
class Literal(Token):
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn('null string passed to Literal; use Empty() instead', SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = ('"%s"' % _ustr(self.match))
self.errmsg = ('Expected ' + self.name)
self.mayReturnEmpty = False
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if ((instring[loc] == self.firstMatchChar) and ((self.matchLen == 1) or instring.startswith(self.match, loc))):
return ((loc + self.matchLen), self.match)
raise ParseException(instring, loc, self.errmsg, self) |
def construction_3_4(k, n, m, r, s, explain_construction=False):
if explain_construction:
return (((('Construction 3.4 with n={},m={},r={},s={} from:\n' + ' Julian R. Abel, Nicholas Cavenagh\n') + ' Concerning eight mutually orthogonal latin squares,\n') + ' Vol. 15, n.3, pp. 255-261,\n') + ' Journal of Combinatorial Designs, 2007').format(n, m, r, s)
from .orthogonal_arrays import wilson_construction, OA_relabel
assert (s < n)
master_design = orthogonal_array(((k + r) + 1), n)
matrix = ((([list(range(n))] * k) + ([([None] * n)] * r)) + [([None] * n)])
B0 = master_design[0]
for i in range(k, (k + r)):
matrix[i][B0[i]] = 0
if orthogonal_array(k, (m + r), existence=True):
last_group = [x for x in range((s + 1)) if (x != B0[(- 1)])][:s]
elif orthogonal_array(k, ((m + r) + 1), existence=True):
last_group = ([x for x in range((s + 1)) if (x != B0[(- 1)])][:(s - 1)] + [B0[(- 1)]])
else:
raise RuntimeError
for (i, x) in enumerate(last_group):
matrix[(- 1)][x] = i
OA = OA_relabel(master_design, ((k + r) + 1), n, matrix=matrix)
OA = wilson_construction(OA, k, n, m, (([1] * r) + [s]), check=False)
return OA |
def display_results(df, sorted_cols=['data', 'feature', 'type', 'l-val_top1'], max_num=1):
cols = [c for c in df.columns if (c not in [])]
df = df[cols]
if (max_num is not None):
df = filter_df(df, sorted_cols[3:], max_num)
return df.sort_values(sorted_cols).reset_index(drop=True) |
def _construct_lookups():
for (name, info) in _concrete_typeinfo.items():
obj = info.type
nbytes[obj] = (info.bits // 8)
_alignment[obj] = info.alignment
if (len(info) > 5):
_maxvals[obj] = info.max
_minvals[obj] = info.min
else:
_maxvals[obj] = None
_minvals[obj] = None |
def ppo_benchmarks():
iterate_experiments(ppo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(ppo_garage_tf, MuJoCo1M_ENV_SET) |
def save(nntagger, args):
outdir = args.save
modelname = (outdir + '.model')
nntagger.model.save(modelname)
import pickle
print(nntagger.task2tag2idx)
myparams = {'num_words': len(nntagger.w2i), 'num_chars': len(nntagger.c2i), 'tasks_ids': nntagger.tasks_ids, 'w2i': nntagger.w2i, 'c2i': nntagger.c2i, 'task2tag2idx': nntagger.task2tag2idx, 'activation': nntagger.activation, 'in_dim': nntagger.in_dim, 'h_dim': nntagger.h_dim, 'c_in_dim': nntagger.c_in_dim, 'h_layers': nntagger.h_layers, 'embeds_file': nntagger.embeds_file, 'pred_layer': nntagger.pred_layer}
pickle.dump(myparams, open((modelname + '.pickle'), 'wb'))
print('model stored: {}'.format(modelname), file=sys.stderr) |
class PrimarySimilarityClassType(Element, metaclass=InheritComparisonClasscallMetaclass):
def __classcall_private__(cls, deg, par):
par = Partition(par)
P = PrimarySimilarityClassTypes((par.size() * deg))
return P(deg, par)
def __init__(self, parent, deg, par):
self._deg = deg
self._par = par
Element.__init__(self, parent)
def __repr__(self):
return ('%s' % ([self._deg, self._par],))
def __hash__(self):
return (hash(self._deg) ^ hash(tuple(self._par)))
def __eq__(self, other):
return (isinstance(other, PrimarySimilarityClassType) and (self.degree() == other.degree()) and (self.partition() == other.partition()))
def __ne__(self, other):
return ((not isinstance(other, PrimarySimilarityClassType)) or (self.degree() != other.degree()) or (self.partition() != other.partition()))
def size(self):
return self.parent().size()
def degree(self):
return self._deg
def partition(self):
return Partition(self._par)
def centralizer_algebra_dim(self):
return (self.degree() * centralizer_algebra_dim(self.partition()))
_in_parent_method
def statistic(self, func, q=None):
if (q is None):
q = ZZ['q'].gen()
return q.parent()(func(self.partition()).substitute(q=(q ** self.degree())))
_in_parent_method
def centralizer_group_card(self, q=None):
if (q is None):
q = FractionField(ZZ['q']).gen()
return self.statistic(centralizer_group_cardinality, q=q)
def invariant_subspace_generating_function(self, q=None, t=None):
if (q is None):
q = PolynomialRing(QQ, 'q').gen()
S = q.parent()
if (t is None):
t = PolynomialRing(S, 't').gen()
return invariant_subspace_generating_function(self.partition()).substitute(q=(q ** self.degree()), t=(t ** self.degree())) |
def test_runningmeanstd():
for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]:
rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:])
x = np.concatenate([x1, x2, x3], axis=0)
ms1 = [x.mean(axis=0), x.var(axis=0)]
rms.update(x1)
rms.update(x2)
rms.update(x3)
ms2 = [rms.mean, rms.var]
assert np.allclose(ms1, ms2) |
class StringToLongTensor():
def __init__(self, tokenizer, max_len=None):
self.tokenizer = tokenizer
self.max_len = max_len
def __call__(self, x: str):
tok_idxs = self.tokenizer.encode(x)
tok_idxs = torch.LongTensor(tok_idxs)
num_tokens = tok_idxs.size(0)
if ((self.max_len is not None) and (num_tokens < self.max_len)):
len_diff = (self.max_len - num_tokens)
padding = LongTensor(([self.tokenizer.padding_idx] * len_diff))
tok_idxs = torch.cat([tok_idxs, padding])
elif ((self.max_len is not None) and (num_tokens > self.max_len)):
tok_idxs = tok_idxs[:self.max_len]
return tok_idxs |
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if (data is None):
return True
if (isinstance(data, tuple) and (data == ())):
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e+300
while (repr(inf_value) != repr((inf_value * inf_value))):
inf_value *= inf_value
def represent_float(self, data):
if ((data != data) or ((data == 0.0) and (data == 1.0))):
value = '.nan'
elif (data == self.inf_value):
value = '.inf'
elif (data == (- self.inf_value)):
value = '-.inf'
else:
value = repr(data).lower()
if (('.' not in value) and ('e' in value)):
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
return self.represent_sequence('tag:yaml.org,2002:seq', data)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError('cannot represent an object', data) |
class Attention(torch.nn.Module):
def __init__(self, dim, key_dim, num_heads, attn_ratio=4, activation=None, norm_cfg=dict(type='BN', requires_grad=True)):
super().__init__()
self.num_heads = num_heads
self.scale = (key_dim ** (- 0.5))
self.key_dim = key_dim
self.nh_kd = nh_kd = (key_dim * num_heads)
self.d = int((attn_ratio * key_dim))
self.dh = (int((attn_ratio * key_dim)) * num_heads)
self.attn_ratio = attn_ratio
self.to_q = Conv2d_BN(dim, nh_kd, 1, norm_cfg=norm_cfg)
self.to_k = Conv2d_BN(dim, nh_kd, 1, norm_cfg=norm_cfg)
self.to_v = Conv2d_BN(dim, self.dh, 1, norm_cfg=norm_cfg)
self.proj = torch.nn.Sequential(activation(), Conv2d_BN(self.dh, dim, bn_weight_init=0, norm_cfg=norm_cfg))
def forward(self, x):
(B, C, H, W) = get_shape(x)
qq = self.to_q(x).reshape(B, self.num_heads, self.key_dim, (H * W)).permute(0, 1, 3, 2)
kk = self.to_k(x).reshape(B, self.num_heads, self.key_dim, (H * W))
vv = self.to_v(x).reshape(B, self.num_heads, self.d, (H * W)).permute(0, 1, 3, 2)
attn = torch.matmul(qq, kk)
attn = attn.softmax(dim=(- 1))
xx = torch.matmul(attn, vv)
xx = xx.permute(0, 1, 3, 2).reshape(B, self.dh, H, W)
xx = self.proj(xx)
return xx |
def _seg_79():
return [(195097, 'M', u''), (195098, 'M', u''), (195099, 'M', u''), (195100, 'M', u''), (195101, 'M', u''), (195102, 'X'), (917760, 'I'), (918000, 'X')] |
def get_trans_list():
trans_list = ['Invert', 'Sharpness', 'AutoContrast', 'Posterize', 'ShearX', 'TranslateX', 'TranslateY', 'ShearY', 'Cutout', 'Rotate', 'Equalize', 'Contrast', 'Color', 'Solarize', 'Brightness']
return trans_list |
def lowercase_and_remove_accent(text):
text = ' '.join(text)
text = text.lower()
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output).lower().split(' ') |
def DatasetManager(dataset: str, root: str, split: str='public', train_samples_per_class: Optional[Union[(float, int)]]=None, val_samples_per_class: Optional[Union[(float, int)]]=None, test_samples_per_class: Optional[Union[(float, int)]]=None, train_size: Optional[int]=None, val_size: Optional[int]=None, test_size: Optional[int]=None, **_):
supported_datasets = {'CoauthorCS', 'CoauthorPhysics', 'AmazonComputers', 'AmazonPhotos', 'CoraFull', 'CoraML', 'PubMedFull', 'CiteSeerFull', 'Cora', 'PubMed', 'CiteSeer', 'ogbn-arxiv'}
default_transform = T.Compose([T.NormalizeFeatures(), ToUndirected()])
if (dataset == 'CoauthorCS'):
assert (split == 'random')
root = os.path.join(root, 'CoauthorCS')
data = D.Coauthor(root, 'CS', default_transform, None)
elif (dataset == 'CoauthorPhysics'):
assert (split == 'random')
root = os.path.join(root, 'CoauthorPhysics')
data = D.Coauthor(root, 'Physics', default_transform, None)
elif (dataset == 'AmazonComputers'):
assert (split == 'random')
root = os.path.join(root, 'AmazonComputers')
data = D.Amazon(root, 'Computers', default_transform, None)
elif (dataset == 'AmazonPhotos'):
assert (split == 'random')
root = os.path.join(root, 'AmazonPhotos')
data = D.Amazon(root, 'Photo', default_transform, None)
elif (dataset == 'CoraFull'):
assert (split == 'random')
data = D.CitationFull(root, 'Cora', default_transform, None)
elif (dataset == 'CoraML'):
assert (split == 'random')
data = D.CitationFull(root, 'Cora_ML', default_transform, None)
elif (dataset == 'PubMedFull'):
assert (split == 'random')
data = D.CitationFull(root, 'PubMed', default_transform, None)
elif (dataset == 'CiteSeerFull'):
assert (split == 'random')
data = D.CitationFull(root, 'CiteSeer', default_transform, None)
elif (dataset == 'Cora'):
data = D.Planetoid(root, 'Cora', pre_transform=None, transform=default_transform, split='public', num_train_per_class=train_samples_per_class, num_test=test_size, num_val=val_size)
elif (dataset == 'PubMed'):
transform = T.Compose([BinarizeFeatures(), T.NormalizeFeatures(), ToUndirected()])
data = D.Planetoid(root, 'PubMed', pre_transform=None, transform=transform, split='public', num_train_per_class=train_samples_per_class, num_test=test_size, num_val=val_size)
elif (dataset == 'CiteSeer'):
data = D.Planetoid(root, 'CiteSeer', pre_transform=None, transform=default_transform, split='public', num_train_per_class=train_samples_per_class, num_test=test_size, num_val=val_size)
elif (dataset == 'ogbn-arxiv'):
assert (split == 'public')
transform = T.Compose([ToUndirected()])
data = ogbn.PygNodePropPredDataset(name='ogbn-arxiv', root='./data', transform=transform)
data = get_idx_split_arxiv(data)
data.data.y = data.data.y.squeeze()
return data
else:
raise ValueError(f'{dataset} not in set of supported datasets {supported_datasets}!')
data = get_idx_split(data, split=split, train_samples_per_class=train_samples_per_class, val_samples_per_class=val_samples_per_class, test_samples_per_class=test_samples_per_class, train_size=train_size, val_size=val_size, test_size=test_size)
return data |
def test_timm_backbone():
with pytest.raises(TypeError):
model = TIMMBackbone()
model.init_weights(pretrained=0)
model = TIMMBackbone(model_name='resnet18', features_only=True, pretrained=False, output_stride=32, norm_layer='BN2d')
model = TIMMBackbone(model_name='resnet18', features_only=True, pretrained=False, output_stride=32, norm_layer='SyncBN')
model = TIMMBackbone(model_name='resnet18', features_only=True, pretrained=False, output_stride=32)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 64, 112, 112)))
assert (feats[1] == torch.Size((1, 64, 56, 56)))
assert (feats[2] == torch.Size((1, 128, 28, 28)))
assert (feats[3] == torch.Size((1, 256, 14, 14)))
assert (feats[4] == torch.Size((1, 512, 7, 7)))
model = TIMMBackbone(model_name='resnet18', features_only=True, pretrained=False, output_stride=16)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 64, 112, 112)))
assert (feats[1] == torch.Size((1, 64, 56, 56)))
assert (feats[2] == torch.Size((1, 128, 28, 28)))
assert (feats[3] == torch.Size((1, 256, 14, 14)))
assert (feats[4] == torch.Size((1, 512, 14, 14)))
model = TIMMBackbone(model_name='resnet18', features_only=True, pretrained=False, output_stride=8)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 64, 112, 112)))
assert (feats[1] == torch.Size((1, 64, 56, 56)))
assert (feats[2] == torch.Size((1, 128, 28, 28)))
assert (feats[3] == torch.Size((1, 256, 28, 28)))
assert (feats[4] == torch.Size((1, 512, 28, 28)))
model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True)
model = TIMMBackbone(model_name='resnetv2_50x1_bitm', features_only=True, pretrained=False, output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 64, 4, 4)))
assert (feats[1] == torch.Size((1, 256, 2, 2)))
assert (feats[2] == torch.Size((1, 512, 1, 1)))
assert (feats[3] == torch.Size((1, 1024, 1, 1)))
assert (feats[4] == torch.Size((1, 2048, 1, 1)))
model = TIMMBackbone(model_name='resnetv2_50x3_bitm', features_only=True, pretrained=False, output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 192, 4, 4)))
assert (feats[1] == torch.Size((1, 768, 2, 2)))
assert (feats[2] == torch.Size((1, 1536, 1, 1)))
assert (feats[3] == torch.Size((1, 3072, 1, 1)))
assert (feats[4] == torch.Size((1, 6144, 1, 1)))
model = TIMMBackbone(model_name='resnetv2_101x1_bitm', features_only=True, pretrained=False, output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert (len(feats) == 5)
assert (feats[0] == torch.Size((1, 64, 4, 4)))
assert (feats[1] == torch.Size((1, 256, 2, 2)))
assert (feats[2] == torch.Size((1, 512, 1, 1)))
assert (feats[3] == torch.Size((1, 1024, 1, 1)))
assert (feats[4] == torch.Size((1, 2048, 1, 1))) |
def query_weibull(category_name, weibull_model, distance_type='eucos'):
category_weibull = []
category_weibull += [weibull_model[category_name]['mean_vec']]
category_weibull += [weibull_model[category_name][('distances_%s' % distance_type)]]
category_weibull += [weibull_model[category_name]['weibull_model']]
return category_weibull |
class ShapenetCaptionEvalDataset(ShapenetCaptionDataset):
def __getitem__(self, index):
data = super().__getitem__(index)
if (data != None):
del data['text_input']
return data |
def load_forbidden_symbols(dataset):
if (dataset == 'guacamol'):
forbidden_symbols = {'Ag', 'Al', 'Am', 'Ar', 'At', 'Au', 'D', 'E', 'Fe', 'G', 'K', 'L', 'M', 'Ra', 'Re', 'Rf', 'Rg', 'Rh', 'Ru', 'T', 'U', 'V', 'W', 'Xe', 'Y', 'Zr', 'a', 'd', 'f', 'g', 'h', 'k', 'm', 'si', 't', 'te', 'u', 'v', 'y'}
else:
forbidden_symbols = set()
return forbidden_symbols |
def parse_args():
parser = argparse.ArgumentParser(description='Matting demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('img_path', help='path to input image file')
parser.add_argument('trimap_path', help='path to input trimap file')
parser.add_argument('save_path', help='path to save alpha matte result')
parser.add_argument('--imshow', action='store_true', help='whether show image with opencv')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
args = parser.parse_args()
return args |
def test_get_query_results_from_db_wrong_query(metric_evaluator):
dict_pred = {'db_id': [f'{DB_NAME}_1', f'{DB_NAME}_2', f'{DB_NAME}_3'], 'query': ([f'SELECT * FROM wrong_table_name'] * 3), 'prediction': ([f'SELECT * FROM {TABLE_NAME}'] * 3)}
pred_df = pd.DataFrame(dict_pred)
with pytest.raises(sqlite3.OperationalError):
metric_evaluator._get_query_results_from_db(pred_df) |
def News20_dataset(args=None):
dataset = Dataset(name='20News_sports', path='preprocess/20News/vec_20news_sports.p', min_length=6, max_length=500, args=args)
set_balanced_pos_weight(dataset)
return dataset |
def basis_seq(V, vecs):
for z in vecs:
z.set_immutable()
return Sequence(vecs, universe=V, check=False, immutable=True, cr=True) |
def test_polygamma():
assert (polygamma(0, (- 9)) == zoo)
assert (polygamma(0, (- 9)) == zoo)
assert (polygamma(0, (- 1)) == zoo)
assert (polygamma(0, 0) == zoo)
assert (polygamma(0, 1) == (- EulerGamma))
assert (polygamma(0, 7) == (Rational(49, 20) - EulerGamma))
assert (polygamma(1, 1) == ((pi ** 2) / 6))
assert (polygamma(1, 2) == (((pi ** 2) / 6) - 1))
assert (polygamma(1, 3) == (((pi ** 2) / 6) - Rational(5, 4)))
assert (polygamma(3, 1) == ((pi ** 4) / 15))
assert (polygamma(3, 5) == (6 * (Rational((- 22369), 20736) + ((pi ** 4) / 90))))
assert (polygamma(5, 1) == ((8 * (pi ** 6)) / 63)) |
class CountFeaturizer(Featurizer):
def __init__(self, is_ontology_expansion: bool=False, excluded_codes: Iterable[str]=[], excluded_event_filter: Optional[Callable[([Event], bool)]]=None, time_bins: Optional[List[datetime.timedelta]]=None, numeric_value_decile: bool=False, string_value_combination: bool=False, characters_for_string_values: int=100):
self.is_ontology_expansion: bool = is_ontology_expansion
self.excluded_event_filter = functools.partial(exclusion_helper, fallback_function=excluded_event_filter, excluded_codes_set=set(excluded_codes))
self.time_bins: Optional[List[datetime.timedelta]] = time_bins
self.characters_for_string_values: int = characters_for_string_values
self.numeric_value_decile = numeric_value_decile
self.string_value_combination = string_value_combination
if (self.time_bins is not None):
assert (len(set(self.time_bins)) == len(self.time_bins)), f'You cannot have duplicate values in the `time_bins` argument. You passed in: {self.time_bins}'
self.observed_codes: Set[str] = set()
self.observed_string_value: Dict[(Tuple[(str, str)], int)] = collections.defaultdict(int)
self.observed_numeric_value: Dict[(str, ReservoirSampler)] = collections.defaultdict(functools.partial(ReservoirSampler, 10000, 100))
self.finalized = False
def get_codes(self, code: str, ontology: extension_datasets.Ontology) -> Iterator[str]:
if self.is_ontology_expansion:
for subcode in ontology.get_all_parents(code):
(yield subcode)
else:
(yield code)
def get_columns(self, event, ontology: extension_datasets.Ontology) -> Iterator[int]:
if (event.value is None):
for code in self.get_codes(event.code, ontology):
if (code in self.code_to_column_index):
(yield self.code_to_column_index[code])
elif (type(event.value) is str):
k = (event.code, event.value[:self.characters_for_string_values])
if (k in self.code_string_to_column_index):
(yield self.code_string_to_column_index[k])
elif (event.code in self.code_value_to_column_index):
(column, quantiles) = self.code_value_to_column_index[event.code]
for (i, (start, end)) in enumerate(zip(quantiles, quantiles[1:])):
if (start <= event.value < end):
(yield (i + column))
def preprocess(self, patient: Patient, labels: List[Label], ontology: extension_datasets.Ontology):
for event in patient.events:
if ((self.excluded_event_filter is not None) and self.excluded_event_filter(event)):
continue
if (event.value is None):
for code in self.get_codes(event.code, ontology):
self.observed_codes.add(code)
elif (type(event.value) is str):
if self.string_value_combination:
self.observed_string_value[(event.code, event.value[:self.characters_for_string_values])] += 1
elif self.numeric_value_decile:
self.observed_numeric_value[event.code].add(event.value)
def aggregate_preprocessed_featurizers(cls, featurizers: List[CountFeaturizer]) -> CountFeaturizer:
if (len(featurizers) == 0):
raise ValueError('You must pass in at least one featurizer to `aggregate_preprocessed_featurizers`')
template_featurizer: CountFeaturizer = featurizers[0]
for featurizer in featurizers[1:]:
template_featurizer.observed_codes |= featurizer.observed_codes
for (k1, v1) in template_featurizer.observed_string_value.items():
featurizer.observed_string_value[k1] += v1
for (k2, v2) in template_featurizer.observed_numeric_value.items():
featurizer.observed_numeric_value[k2].values += v2.values
return template_featurizer
def finalize(self):
if self.finalized:
return
self.finalized = True
self.code_to_column_index = {}
self.code_string_to_column_index = {}
self.code_value_to_column_index = {}
self.num_columns = 0
for code in sorted(list(self.observed_codes)):
self.code_to_column_index[code] = self.num_columns
self.num_columns += 1
for ((code, val), count) in sorted(list(self.observed_string_value.items())):
if (count > 1):
self.code_string_to_column_index[(code, val)] = self.num_columns
self.num_columns += 1
for (code, values) in sorted(list(self.observed_numeric_value.items())):
quantiles = sorted(list(set(np.quantile(values.values, np.linspace(0, 1, num=11)[1:(- 1)]))))
quantiles = (([float('-inf')] + quantiles) + [float('inf')])
self.code_value_to_column_index[code] = (self.num_columns, quantiles)
self.num_columns += (len(quantiles) - 1)
def get_num_columns(self) -> int:
self.finalize()
if (self.time_bins is None):
return self.num_columns
else:
return (self.num_columns * len(self.time_bins))
def featurize(self, patient: Patient, labels: List[Label], ontology: Optional[extension_datasets.Ontology]) -> List[List[ColumnValue]]:
self.finalize()
if (ontology is None):
raise ValueError("`ontology` can't be `None` for CountFeaturizer")
all_columns: List[List[ColumnValue]] = []
if (self.time_bins is None):
code_counter: Dict[(int, int)] = defaultdict(int)
label_idx = 0
for event in patient.events:
if ((self.excluded_event_filter is not None) and self.excluded_event_filter(event)):
continue
while (event.start > labels[label_idx].time):
label_idx += 1
all_columns.append([ColumnValue(code, count) for (code, count) in code_counter.items()])
if (label_idx >= len(labels)):
return all_columns
for column_idx in self.get_columns(event, ontology):
code_counter[column_idx] += 1
for _ in labels[label_idx:]:
all_columns.append([ColumnValue(code, count) for (code, count) in code_counter.items()])
else:
time_bins: List[datetime.timedelta] = sorted([x for x in self.time_bins if (x is not None)])
codes_per_bin: Dict[(int, Deque[Tuple[(int, datetime.datetime)]])] = {i: deque() for i in range((len(self.time_bins) + 1))}
code_counts_per_bin: Dict[(int, Dict[(int, int)])] = {i: defaultdict(int) for i in range((len(self.time_bins) + 1))}
label_idx = 0
for event in patient.events:
if ((self.excluded_event_filter is not None) and self.excluded_event_filter(event)):
continue
while (event.start > labels[label_idx].time):
_reshuffle_count_time_bins(time_bins, codes_per_bin, code_counts_per_bin, labels[label_idx])
label_idx += 1
all_columns.append([ColumnValue((code + (i * self.num_columns)), count) for i in range(len(self.time_bins)) for (code, count) in code_counts_per_bin[i].items()])
if (label_idx >= len(labels)):
return all_columns
for column_idx in self.get_columns(event, ontology):
codes_per_bin[0].append((column_idx, event.start))
code_counts_per_bin[0][column_idx] += 1
for label in labels[label_idx:]:
_reshuffle_count_time_bins(time_bins, codes_per_bin, code_counts_per_bin, label)
all_columns.append([ColumnValue((code + (i * self.num_columns)), count) for i in range(len(self.time_bins)) for (code, count) in code_counts_per_bin[i].items()])
return all_columns
def is_needs_preprocessing(self) -> bool:
return True
def __repr__(self) -> str:
return f'CountFeaturizer(number of included codes={self.num_columns})'
def get_column_name(self, column_idx: int) -> str:
def helper(actual_idx):
for (code, idx) in self.code_to_column_index.items():
if (idx == actual_idx):
return code
for ((code, val), idx) in self.code_string_to_column_index.items():
if (idx == actual_idx):
return f'{code} {val}'
for (code, (idx, quantiles)) in self.code_value_to_column_index.items():
offset = (actual_idx - idx)
if (0 <= offset < (len(quantiles) - 1)):
return f'{code} [{quantiles[offset]}, {quantiles[(offset + 1)]})'
raise RuntimeError(('Could not find name for ' + str(actual_idx)))
if (self.time_bins is None):
return helper(column_idx)
else:
return (helper((column_idx % self.num_columns)) + f'_{self.time_bins[(column_idx // self.num_columns)]}') |
_module()
class FeatureRelayHead(nn.Module):
def __init__(self, in_channels=1024, out_conv_channels=256, roi_feat_size=7, scale_factor=2):
super(FeatureRelayHead, self).__init__()
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = ((roi_feat_size ** 2) * out_conv_channels)
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True)
def init_weights(self):
kaiming_init(self.fc)
_fp16()
def forward(self, x):
(N, in_C) = x.shape
if (N > 0):
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None |
def register_Ns3LteRrcSapRrcConnectionReestablishmentRequest_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::LteRrcSap::RrcConnectionReestablishmentRequest const &', 'arg0')])
cls.add_instance_attribute('reestablishmentCause', 'ns3::LteRrcSap::ReestablishmentCause', is_const=False)
cls.add_instance_attribute('ueIdentity', 'ns3::LteRrcSap::ReestabUeIdentity', is_const=False)
return |
def get_reward(purchased_product, goal, price, options, **kwargs):
r_type_dict = get_type_reward(purchased_product, goal)
r_price = ((price <= goal['price_upper']) if (goal['price_upper'] > 0) else None)
(r_att, num_attr_matches) = get_attribute_reward(purchased_product, goal)
(r_option, num_option_matches) = get_option_reward(list(options.values()), (goal['goal_options'].items() if isinstance(goal['goal_options'], dict) else goal['goal_options']))
total_reward = (((num_attr_matches + num_option_matches) + r_price) / ((len(goal['attributes']) + len(goal['goal_options'])) + 1))
total_reward *= r_type_dict['r_type']
if kwargs.get('verbose', False):
info = {'r_type': r_type_dict['r_type'], 'r_att': r_att, 'w_att': (len(goal['attributes']) / ((len(goal['attributes']) + len(goal['goal_options'])) + 1)), 'query_match': r_type_dict['query_match'], 'category_match': r_type_dict['category_match'], 'title_score': r_type_dict['title_score']}
if (r_option is not None):
info['r_option'] = r_option
info['w_option'] = (len(goal['goal_options']) / ((len(goal['attributes']) + len(goal['goal_options'])) + 1))
if (r_price is not None):
info['r_price'] = r_price
info['w_price'] = (1 / ((len(goal['attributes']) + len(goal['goal_options'])) + 1))
return (total_reward, info)
return total_reward |
.parametrize('ST,quad', all_trial_bases_and_quads)
def test_eval(ST, quad):
kwargs = {}
if (not (ST.family() == 'fourier')):
kwargs['quad'] = quad
ST = ST(N, **kwargs)
(points, weights) = ST.points_and_weights(N)
fk = shenfun.Function(ST)
fk[:4] = 1
ST.eval(np.array([0.0]), fk)
f = ST.eval(points, fk)
fj = fk.backward()
assert np.allclose(fj, f, rtol=1e-05, atol=1e-06), np.linalg.norm((fj - f))
fj = ST.backward(fk, fj, kind='vandermonde')
fk = ST.forward(fj, fk, kind='vandermonde')
f = ST.eval(points, fk)
assert np.allclose(fj, f, rtol=1e-05, atol=1e-06), np.linalg.norm((fj - f)) |
def recursive_split(segment, bpe_codes, vocab, separator, final=False):
try:
if final:
(left, right) = bpe_codes[(segment + '</w>')]
right = right[:(- 4)]
else:
(left, right) = bpe_codes[segment]
except:
(yield segment)
return
if ((left + separator) in vocab):
(yield left)
else:
for item in recursive_split(left, bpe_codes, vocab, separator, False):
(yield item)
if ((final and (right in vocab)) or ((not final) and ((right + separator) in vocab))):
(yield right)
else:
for item in recursive_split(right, bpe_codes, vocab, separator, final):
(yield item) |
class Circuit():
def __init__(self, size: int) -> None:
self.size: int = size
self.gates: List[GATE_INFO_TYPE] = []
self.measured_qubits: List[int] = []
self._cache: Optional[np.ndarray] = None
def get_unitary_matrix(self) -> np.ndarray:
if (self._cache is None):
if (len(self.gates) == 0):
self._cache = np.identity((2 ** self.size))
return self._cache
qc = QubitCircuit(self.size)
qc.user_gates = {'X': x_gate, 'Y': y_gate, 'Z': z_gate, 'S': s_gate, 'T': t_gate}
for gate in self.gates:
(name, indices, arg) = gate
if (name == 'h'):
qc.add_gate('SNOT', indices[0])
elif (name == 'x'):
qc.add_gate('X', indices[0])
elif (name == 'y'):
qc.add_gate('Y', indices[0])
elif (name == 'z'):
qc.add_gate('Z', indices[0])
elif (name == 'cx'):
qc.add_gate('CNOT', controls=indices[0], targets=indices[1])
elif (name == 'ccx'):
qc.add_gate('TOFFOLI', controls=indices[:2], targets=indices[2])
elif (name == 'swap'):
qc.add_gate('SWAP', indices)
elif (name == 't'):
qc.add_gate('T', indices[0])
elif (name == 's'):
qc.add_gate('S', indices[0])
elif (name == 'phase'):
qc.add_gate('PHASEGATE', indices[0], arg_value=arg)
else:
raise NotImplementedError
self._cache = gate_sequence_product(qc.propagators()).full()
return self._cache
def serialize(self) -> Dict:
gates = [{'name': g_name, 'indices': indices, 'arg': arg} for (g_name, indices, arg) in self.gates]
return {'size': self.size, 'gates': gates, 'measured_qubits': self.measured_qubits}
def deserialize(self, json_data: Dict):
self.size = json_data['size']
for gate in json_data['gates']:
name: str = gate['name']
indices: List[int] = gate['indices']
arg: float = gate['arg']
self.gates.append([name, indices, arg])
self.measured_qubits = json_data['measured_qubits']
self._cache = None
def h(self, qubit: int):
self.gates.append(['h', [qubit], None])
def x(self, qubit: int):
self.gates.append(['x', [qubit], None])
def y(self, qubit: int):
self.gates.append(['y', [qubit], None])
def z(self, qubit: int):
self.gates.append(['z', [qubit], None])
def cx(self, control: int, target: int):
self.gates.append(['cx', [control, target], None])
def ccx(self, control1: int, control2: int, target: int):
self.gates.append(['ccx', [control1, control2, target], None])
def swap(self, qubit1: int, qubit2: int):
self.gates.append(['swap', [qubit1, qubit2], None])
def t(self, qubit: int):
self.gates.append(['t', [qubit], None])
def s(self, qubit: int):
self.gates.append(['s', [qubit], None])
def phase(self, qubit: int, theta: float):
self.gates.append(['phase', [qubit], theta])
def measure(self, qubit: int):
self.measured_qubits.append(qubit) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.