code stringlengths 101 5.91M |
|---|
def get_args():
parser = argparse.ArgumentParser('Fed-BEiT pre-training', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--save_ckpt_freq', default=50, type=int)
parser.add_argument('--discrete_vae_weight_path', default='/home/yan/data/SSL-FL/tokenizer_weight', type=str)
parser.add_argument('--discrete_vae_type', type=str, default='dall-e')
parser.add_argument('--model_name', default='beit', type=str)
parser.add_argument('--model', default='beit_base_patch16_224_8k_vocab', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help='0.1 for base, 1e-5 for large. set 0 to disable layer scale')
parser.add_argument('--mask_ratio', default=0.4, type=float, help='Masking ratio (percentage of removed patches).')
parser.add_argument('--max_mask_patches_per_block', type=int, default=None)
parser.add_argument('--min_mask_patches_per_block', type=int, default=16)
parser.add_argument('--input_size', default=224, type=int, help='images input size for backbone')
parser.add_argument('--second_input_size', default=112, type=int, help='images input size for discrete vae')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-08, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help='Final value of the\n weight decay. We use a cosine schedule for WD. \n (Set the same value with args.weight_decay to keep weight decay no change)')
parser.add_argument('--lr', type=float, default=0.002, metavar='LR', help='learning rate (default: 2e-3)')
parser.add_argument('--warmup_lr', type=float, default=1e-06, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-05, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=(- 1), metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--second_interpolation', type=str, default='lanczos', help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")')
parser.add_argument('--data_path', default='../../data/Retina', type=str, help='dataset path')
parser.add_argument('--data_set', default='Retina', type=str, help='dataset for pretraining')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', help='')
parser.set_defaults(pin_mem=True)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--local_rank', default=(- 1), type=int)
parser.add_argument('--sync_bn', default=False, action='store_true')
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--n_clients', default=5, type=int, help='Number of clients')
parser.add_argument('--E_epoch', default=1, type=int, help='Local training epoch in FL')
parser.add_argument('--max_communication_rounds', default=100, type=int, help='Total communication rounds')
parser.add_argument('--num_local_clients', default=(- 1), choices=[10, (- 1)], type=int, help='Num of local clients joined in each FL train. -1 indicates all clients')
parser.add_argument('--split_type', type=str, default='central', help='Which data partitions to use')
return parser.parse_args() |
class Word2VecTraining():
def train(self, data_path, model_name, vector_name, vector_size=100, alpha=0.025, min_alpha=0.0001, sg=0, hs=0, negative=5, ns_exponent=0.75, window=5, min_count=5, max_vocab_size=None, workers=3, epochs=5, sample=0.001, cbow_mean=1, compute_loss=True, callbacks=()):
if isinstance(data_path, list):
sentences = data_path
else:
sentences = MyCorpus(data_path)
print('training started.......')
print('please wait.....it will take time according to your data size and computation capability')
model = Word2Vec(sentences=sentences, vector_size=vector_size, alpha=alpha, min_alpha=min_alpha, sg=sg, hs=hs, negative=negative, ns_exponent=ns_exponent, sample=sample, cbow_mean=cbow_mean, window=window, min_count=min_count, max_vocab_size=max_vocab_size, workers=workers, epochs=epochs, compute_loss=compute_loss, callbacks=callbacks)
training_loss = model.get_latest_training_loss()
print('train completed successfully')
print(f'trianing loss: {training_loss}')
print('model and vector saving...')
model.save(model_name)
model.wv.save_word2vec_format(vector_name, binary=False)
print(f'model and vector saved as {model_name} and {vector_name}')
def pretrain(self, model_path, new_sentences, output_model_name, output_vector_name, epochs=5):
if isinstance(new_sentences, str):
new_sentences = MyCorpus(new_sentences)
print('model loading ....')
model = Word2Vec.load(model_path)
print('vocab building with new sentences')
model.build_vocab(new_sentences, update=True)
print('pre-training started.......')
print('please wait.....it will take time according to your data size and computation capability')
model.train(new_sentences, total_examples=model.corpus_count, epochs=epochs)
training_loss = model.get_latest_training_loss()
print('pre-train completed successfully')
print(f'pre-trianing loss: {training_loss}')
print('model and vector saving...')
model.save(output_model_name)
model.wv.save_word2vec_format(output_vector_name, binary=False)
print(f'model and vector saved as {output_model_name} and {output_vector_name}') |
def list_results(args):
model_name = args.model_name
config_name = args.config_name.zfill(2)
data_type = args.data_type
num_per_page = args.num_per_page
data_dir = args.data_dir
task = args.task.zfill(2)
mem_size = args.mem_size
run_id = args.run_id.zfill(2)
trial_num = args.trial_num.zfill(2)
target_dir = os.path.join(data_dir, task.zfill(2))
epoch = args.epoch
subdir_name = '-'.join([task, config_name, run_id, trial_num])
evals_dir = os.path.join('evals', model_name, subdir_name)
evals_name = ('%s_%s.json' % (data_type, str(epoch).zfill(4)))
evals_path = os.path.join(evals_dir, evals_name)
evals = json.load(open(evals_path, 'r'))
_id = 0
html_dir = ('visualize/%s-%s' % (task, trial_num))
if (not os.path.exists(html_dir)):
os.makedirs(html_dir)
'\n while os.path.exists(html_dir):\n _id += 1\n html_dir = "/tmp/list_results%d" % _id\n '
if os.path.exists(html_dir):
shutil.rmtree(html_dir)
os.mkdir(html_dir)
cur_dir = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(cur_dir, 'templates')
env = Environment(loader=FileSystemLoader(templates_dir))
env.globals.update(zip=zip, reversed=reversed)
template = env.get_template(args.template_name)
data_path = os.path.join(target_dir, 'data.json')
mode2idxs_path = os.path.join(target_dir, 'mode2idxs.json')
word2idx_path = os.path.join(target_dir, 'word2idx.json')
metadata_path = os.path.join(target_dir, 'metadata.json')
data = json.load(open(data_path, 'r'))
(X, Q, Y) = data[:3]
mode2idxs_dict = json.load(open(mode2idxs_path, 'r'))
word2idx_dicts = json.load(open(word2idx_path, 'r'))
word2idx_dicts = ([word2idx_dicts[0]] + word2idx_dicts[1])
idx2word_dicts = [{idx: word for (word, idx) in word2idx_dict.items()} for word2idx_dict in word2idx_dicts]
(idx2word_dict_fact, idx2word_dict_a, idx2word_dict_a1, idx2word_dict_a2, idx2word_dict_a3, idx2word_dict_a4, idx2word_dict_a5, idx2word_dict_a6) = tuple(idx2word_dicts[:8])
metadata = json.load(open(metadata_path, 'r'))
eval_dd = {}
for (idx, id_) in enumerate(evals['ids']):
eval_d = {}
for (name, d) in list(evals['values'].items()):
eval_d[name] = d[idx]
eval_dd[id_] = eval_d
rows = []
for (i, (id_, eval_d)) in enumerate(eval_dd.items()):
question = _decode(idx2word_dict_fact, Q[id_])
correct = eval_d['correct']
a_raw = np.transpose(np.mean(eval_d['a'], 2))
a = [[('%.2f' % val) for val in l] for l in a_raw]
of_raw = np.transpose(np.mean(eval_d['rf'], 2))
of = [[('%.2f' % val) for val in l] for l in of_raw]
ob_raw = np.transpose(np.mean(eval_d['rb'], 2))
ob = [[('%.2f' % val) for val in l] for l in ob_raw]
para = X[id_]
if (len(para) > len(a_raw)):
para = para[(- len(a_raw)):]
facts = [_decode(idx2word_dict_fact, x) for x in para]
y = []
yp = []
YP = eval_d['yp']
Dlist = (idx2word_dicts[1:6] + idx2word_dicts[5:])
for (j, (Y_, yp_, dict_)) in enumerate(zip(Y[i], YP, Dlist)):
y.append(dict_.get(Y_, None))
yp.append(yp_)
row = {'id': id_, 'facts': facts, 'question': question, 'a': a, 'of': of, 'ob': ob, 'num_layers': len(a[0]), 'correct': correct, 'task': task[(- 1)], 'y': y[0], 'yp': yp[0]}
if (correct == 1):
rows.append(row)
if ((i % num_per_page) == 0):
html_path = os.path.join(html_dir, ('%s.html' % str(id_).zfill(8)))
if ((((i + 1) % num_per_page) == 0) or ((i + 1) == len(eval_dd))):
var_dict = {'title': 'Sentence List', 'rows': rows}
with open(html_path, 'wb') as f:
f.write(template.render(**var_dict).encode('UTF-8'))
rows = []
'\n os.chdir(html_dir)\n port = args.port\n host = args.host\n # Overriding to suppress log message\n class MyHandler( def log_message(self, format, *args):\n pass\n handler = MyHandler\n = socketserver.TCPServer((host, port), handler)\n if args.open == \'True\':\n os.system("open % (args.host, args.port))\n print("serving at %s:%d" % (host, port))\n ' |
def load_checkpoint_to_cpu(path, arg_overrides=None):
try:
with open(path, 'rb') as f:
state = torch.load(f, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
except (ModuleNotFoundError, ImportError):
state = torch.load(path, map_location=(lambda s, l: default_restore_location(s, 'cpu')))
args = state['args']
if (arg_overrides is not None):
for (arg_name, arg_val) in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state |
def test_mutation_insert_twice_no_success(default_test_case):
test_factory = MagicMock(tf.TestFactory)
def side_effect(tc, pos):
return (- 1)
test_factory.insert_random_statement.side_effect = side_effect
chromosome = tcc.TestCaseChromosome(default_test_case, test_factory=test_factory)
config.configuration.search_algorithm.statement_insertion_probability = 0.5
config.configuration.search_algorithm.chromosome_length = 10
with mock.patch('pynguin.utils.randomness.next_float') as float_mock:
float_mock.side_effect = [0.2, 0.2, 0.2]
assert (not chromosome._mutation_insert())
test_factory.insert_random_statement.assert_has_calls([call(default_test_case, 0), call(default_test_case, 0)]) |
class sdist(old_sdist):
def add_defaults(self):
old_sdist.add_defaults(self)
dist = self.distribution
if dist.has_data_files():
for data in dist.data_files:
self.filelist.extend(get_data_files(data))
if dist.has_headers():
headers = []
for h in dist.headers:
if isinstance(h, str):
headers.append(h)
else:
headers.append(h[1])
self.filelist.extend(headers)
return |
def register_dataset(mod_name, dset_name, data_cfg=None):
register_func = eval(mod_name)
register_func.register_with_name_cfg(dset_name, data_cfg) |
def probability_variance(sampled_probabilities, mean_probabilities=None):
if (mean_probabilities is None):
mean_probabilities = np.mean(sampled_probabilities, axis=1)
mean_probabilities = np.expand_dims(mean_probabilities, axis=1)
return ((sampled_probabilities - mean_probabilities) ** 2).mean(1).sum((- 1)) |
class Config():
size = attr.ib(type=str)
dataset = attr.ib(type=str)
single_resolution = attr.ib(type=int)
def two_d_resolution(self):
return f'{self.single_resolution}x{self.single_resolution}'
def gcs_folder_name(self):
return f'convnext_{self.size}_{self.dataset}_{self.single_resolution}_fe'
def handle(self):
return f'sayakpaul/{self.gcs_folder_name()}/1'
def rel_doc_file_path(self):
return f'assets/docs/{self.handle()}.md' |
def generate_uncertainty_qes(args, question):
if (args.method == 'few_shot_cot'):
given_prompt = create_input_prompt(args, True)
if (args.dataset in ('gsm8k', 'asdiv', 'svamp', 'singleeq', 'addsub', 'multiarith')):
uncertainty_record = {'dataset_idx': question['question_idx'], 'variance': float, 'entropy': float, 'occurrence': {}}
elif (args.dataset == 'strategyqa'):
uncertainty_record = {'dataset_idx': question['question_idx'], 'entropy': float, 'occurrence': {'yes': 0, 'no': 0}}
else:
uncertainty_record = {'dataset_idx': question['question_idx'], 'entropy': float, 'occurrence': {}}
for trail in range(args.num_trails):
if (args.method == 'few_shot_cot'):
prompt = (((given_prompt + 'Q: ') + question['question']) + "\nA: Let's think step by step.")
elif (args.method == 'zero_shot_cot'):
prompt = (('Q: ' + question['question']) + "\nA: Let's think step by step.")
prompt_list = [prompt]
responses = GPT3_request(model=args.model, input_prompt=prompt_list, max_tokens=args.max_length_cot, time_interval=args.api_time_interval, temperature=args.temperature, stop=['Question:', 'Q:'])
if (args.method == 'zero_shot_cot'):
prompt_list[0] += (responses['choices'][0]['text'] + args.direct_answer_trigger)
responses = GPT3_request(model=args.model, input_prompt=prompt_list, max_tokens=args.max_length_cot, time_interval=args.api_time_interval, temperature=args.temperature, stop='.')
pred_ans = answer_extraction(args, responses)
if (pred_ans != ''):
if (pred_ans in uncertainty_record['occurrence']):
uncertainty_record['occurrence'][pred_ans] += 1
else:
uncertainty_record['occurrence'][pred_ans] = 1
elif (NO_SOLUTION in uncertainty_record['occurrence']):
uncertainty_record['occurrence'][NO_SOLUTION] += 1
else:
uncertainty_record['occurrence'][NO_SOLUTION] = 1
if (args.dataset in ('gsm8k', 'asdiv', 'svamp', 'singleeq', 'addsub', 'multiarith')):
ans_list = []
for (ans, occurs) in uncertainty_record['occurrence'].items():
for i in range(int(occurs)):
ans_list.append(float(ans))
uncertainty_record['variance'] = np.var(ans_list)
frequency_list = list(uncertainty_record['occurrence'].values())
uncertainty_record['entropy'] = entropy(frequency_list)
uncertainty_record['disagreement'] = len(uncertainty_record['occurrence'])
return uncertainty_record |
class PPO(NPO):
def __init__(self, env_spec, policy, baseline, scope=None, max_path_length=500, discount=0.99, gae_lambda=1, center_adv=True, positive_adv=False, fixed_horizon=False, pg_loss='surrogate_clip', lr_clip_range=0.01, max_kl_step=0.01, optimizer=None, optimizer_args=None, policy_ent_coeff=0.0, use_softplus_entropy=False, use_neg_logli_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy', flatten_input=True, name='PPO'):
if (optimizer is None):
optimizer = FirstOrderOptimizer
if (optimizer_args is None):
optimizer_args = dict()
super().__init__(env_spec=env_spec, policy=policy, baseline=baseline, scope=scope, max_path_length=max_path_length, discount=discount, gae_lambda=gae_lambda, center_adv=center_adv, positive_adv=positive_adv, fixed_horizon=fixed_horizon, pg_loss=pg_loss, lr_clip_range=lr_clip_range, max_kl_step=max_kl_step, optimizer=optimizer, optimizer_args=optimizer_args, policy_ent_coeff=policy_ent_coeff, use_softplus_entropy=use_softplus_entropy, use_neg_logli_entropy=use_neg_logli_entropy, stop_entropy_gradient=stop_entropy_gradient, entropy_method=entropy_method, flatten_input=flatten_input, name=name) |
class ValueFunction(nn.Module):
def __init__(self, state_dim, hidden_dim=256, n_hidden=2):
super().__init__()
dims = [state_dim, *([hidden_dim] * n_hidden), 1]
self.v = mlp(dims, squeeze_output=True)
def forward(self, state):
return self.v(state) |
def safe_divide(numerator, denominator, name='safe_divide'):
return tf.where(math_ops.greater(denominator, 0), math_ops.divide(numerator, denominator), tf.zeros_like(numerator), name=name) |
class CorefTest(ModelTestCase):
def setUp(self):
super(CorefTest, self).setUp()
self.set_up_model('tests/fixtures/coref/experiment.json', 'tests/fixtures/data/coref/sample.gold_conll')
def test_coref_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_decode(self):
spans = torch.LongTensor([[1, 2], [3, 4], [3, 7], [5, 6], [14, 56], [17, 80]])
antecedent_indices = torch.LongTensor([[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0], [2, 1, 0, 0, 0, 0], [3, 2, 1, 0, 0, 0], [4, 3, 2, 1, 0, 0]])
spans = Variable(spans.unsqueeze(0))
antecedent_indices = Variable(antecedent_indices)
predicted_antecedents = torch.LongTensor([(- 1), 0, (- 1), (- 1), 1, 3])
predicted_antecedents = Variable(predicted_antecedents.unsqueeze(0))
output_dict = {'top_spans': spans, 'antecedent_indices': antecedent_indices, 'predicted_antecedents': predicted_antecedents}
output = self.model.decode(output_dict)
clusters = output['clusters'][0]
gold1 = [(1, 2), (3, 4), (17, 80)]
gold2 = [(3, 7), (14, 56)]
assert (len(clusters) == 2)
assert (gold1 in clusters)
assert (gold2 in clusters) |
def create_pipeline_configuration(DEBUG=False, batch_size=8):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (T5LayerNorm, Linear, Dropout, StatelessEmbedding, Embedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___333': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___335': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___600': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___602': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___867': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[9]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___869': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1134': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1136': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1401': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1403': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1668': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1670': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1935': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___1937': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/T5LayerNorm[final_layer_norm]': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/Dropout[dropout]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___2239': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([8, 320, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2784': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2786': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___2788': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3267': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3269': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3271': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3750': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3752': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___3754': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4233': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4235': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4237': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4716': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4718': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___4720': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([8, 8, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5199': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5201': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5203': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]': {'shape': torch.Size([8, 8, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[21]/ModuleList[layer]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]': {'shape': torch.Size([8, 320, 4096]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5682': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5684': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___5686': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_6186': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
def BaulieuVII_calc(TP, FP, FN, TN):
try:
n = (((TP + FP) + FN) + TN)
return ((FP + FN) / (n + ((TP * (TP - 4)) * (TP - 4))))
except Exception:
return 'None' |
class SignalToNoiseRatioContrastiveLoss(ContrastiveLoss):
def __init__(self, **kwargs):
super().__init__(**kwargs)
c_f.assert_distance_type(self, SNRDistance)
def get_default_distance(self):
return SNRDistance() |
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
self.pdelu = PDELU()
def forward(self, x):
out = self.conv(self.pdelu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out |
_numpy_output(positive=True, check_dtype=True)
def test_ufunc_log2_u(A: dace.uint32[10]):
return np.log2(A) |
.pure
.gpu
.parametrize('bn_impl', ['cuDNN', 'pure'])
def test_mbconv(bn_impl, use_cpp_dispatcher):
with change_default(donnx.ONNXConv, 'cuDNN'), change_default(donnx.ONNXBatchNormalization, bn_impl):
with torch.no_grad():
dace_inputs = torch.rand(8, 32, 224, 224).cuda()
torch_inputs = torch.clone(dace_inputs)
(block_params, global_params) = get_model_params('efficientnet-b0', {})
torch_model = MBConvBlock(block_params[0], global_params).cuda()
torch_model.set_swish(memory_efficient=False)
dace_model = MBConvBlock(block_params[0], global_params).cuda()
dace_model.set_swish(memory_efficient=False)
dace_model = DaceModule(dace_model, training=True, compile_torch_extension=use_cpp_dispatcher)
dace_model.model.load_state_dict(torch_model.state_dict())
for ((dace_name, dace_value), (torch_name, value)) in zip(dace_model.model.state_dict().items(), torch_model.state_dict().items()):
assert (dace_name == torch_name)
torch_tensors_close(dace_name, value, dace_value)
CudnnConvolution.default_algorithm = 'gemm'
dace_output = dace_model(dace_inputs)
torch_output = torch_model(torch_inputs)
torch_tensors_close('output', torch_output, dace_output, rtol=0.001, atol=0.001)
for ((dace_name, dace_value), (torch_name, value)) in zip(dace_model.model.state_dict().items(), torch_model.state_dict().items()):
assert (dace_name == torch_name)
if ('num_batches_tracked' in dace_name):
continue
torch_tensors_close(dace_name, value, dace_value) |
def simGetStackInt32Value(stackHandle):
value = ffi.new('int *')
ret = lib.simGetStackInt32Value(stackHandle, value)
_check_return(ret)
return value[0] |
def test_gpu_schedule_scalar_autodetect_2():
def add(a: (dace.float32[(10, 10)] dace.StorageType.GPU_Global), b: dace.float32):
return (a + b)
sdfg = add.to_sdfg()
set_default_schedule_and_storage_types(sdfg, None)
for (node, _) in sdfg.all_nodes_recursive():
if isinstance(node, (dace.nodes.LibraryNode, dace.nodes.MapEntry)):
assert (node.schedule == dace.ScheduleType.GPU_Device) |
def last_relevant_time_slice(output, sequence_length):
shape = output.get_shape()
if (len(shape) == 3):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[1]
out_size = int(output.get_shape()[2])
index = ((tf.range(0, batch_size) * max_length) + tf.subtract(sequence_length, 1))
flat = tf.reshape(output, [(- 1), out_size])
relevant = tf.gather(flat, index)
elif (len(shape) == 2):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[1]
index = ((tf.range(0, batch_size) * max_length) + tf.subtract(sequence_length, 1))
flat = tf.reshape(output, [(- 1)])
relevant = tf.gather(flat, index)
else:
raise ValueError('Illegal shape type {0}'.format(shape))
return relevant |
def eval(args):
cfg = CommonConfiguration.from_yaml(args.setting)
dictionary = CommonConfiguration.from_yaml(cfg.DATASET.DICTIONARY)
dictionary = next(dictionary.items())[1]
prefix = 'infer'
transforms = prepare_transforms_seg()
(*dataset_str_parts, dataset_class_str) = cfg.DATASET.CLASS.split('.')
dataset_class = getattr(importlib.import_module('.'.join(dataset_str_parts)), dataset_class_str)
dataset = dataset_class(data_cfg=cfg.DATASET[prefix.upper()], dictionary=dictionary, transform=transforms[prefix], stage=prefix)
dataloader = DataLoader(dataset, batch_size=2, num_workers=cfg.NUM_WORKERS, shuffle=False, pin_memory=True)
(*model_mod_str_parts, model_class_str) = cfg.USE_MODEL.split('.')
model_class = getattr(importlib.import_module('.'.join(model_mod_str_parts)), model_class_str)
model_ft = model_class(dictionary=dictionary)
load_checkpoint(args.model_path, model_ft)
if torch.cuda.is_available():
model_ft = model_ft.cuda()
model_ft.eval()
for (imgs, imageids) in tqdm(dataloader):
if cfg.HALF:
imgs = imgs.half()
with torch.no_grad():
imgs = imgs.cuda()
masks = model_ft(imgs, None, prefix)
for (mask, imageid) in zip(masks, imageids):
out_img = Image.fromarray(mask.astype(np.uint8))
out_img.putpalette(cityspallete)
outname = (os.path.splitext(os.path.split(imageid)[(- 1)])[0] + '.png')
out_img.save(os.path.join(args.imgs_savedir, outname))
'\n rst_text = results["text"] if "text" in results.keys() else []\n rst_image = results["image"] if "image" in results.keys() else []\n\n if cfg.save_dir is not None:\n for im_path, res_im in zip_longest(im_paths, result_images):\n if results_save_dir is not None and res_im is not None:\n if args.flatten_output_images:\n _fname = str(P(im_path).relative_to(dataset_dir)).replace(\'/\', \'#\')\n if args.prepend_order_number_to_out_image_names:\n _fname = f"{i:>06}#{_fname}"\n else:\n _fname = P(im_path).name\n\n cv2.imwrite(str(dataset_res_save_dir / _fname), res_im)\n to_text_file(result_texts, dataset_dir / (res_save_name + ".txt"), mode="a")\n if results_save_dir is not None:\n to_text_file(result_texts, dataset_res_save_dir / (res_save_name + ".txt"), mode="a")\n\n print(f"Text file has been saved to "\n f"{dataset_dir / (res_save_name + \'.txt\')} and "\n f"{dataset_res_save_dir / (res_save_name + \'.txt\')}")\n ' |
def trickledown(array, i, size):
if ((level(i) % 2) == 0):
trickledownmin(array, i, size)
else:
trickledownmax(array, i, size) |
class LegacyFairseqLRScheduler(FairseqLRScheduler):
def __init__(self, args: Namespace, optimizer):
if (not isinstance(optimizer, FairseqOptimizer)):
raise ValueError('optimizer must be an instance of FairseqOptimizer')
self.args = args
self.optimizer = optimizer
self.best = None |
def make_env(env_name, terminates=True, **kwargs):
env = None
base_env = None
env_infos = dict()
if (env_name == 'maze'):
from lifelong_rl.envs.environments.maze_env import MazeEnv
base_env = MazeEnv
env_infos['mujoco'] = False
elif (env_name == 'half_cheetah'):
from lifelong_rl.envs.environments.half_cheetah import HalfCheetahEnv
base_env = HalfCheetahEnv
env_infos['mujoco'] = True
kwargs.update(expose_all_qpos=True)
elif (env_name == 'ant-v3'):
from lifelong_rl.envs.environments.ant_v3 import AntEnv
base_env = AntEnv
env_infos['mujoco'] = True
kwargs.update(exclude_current_positions_from_observation=False)
if ('exclude_contact_forces' not in kwargs):
kwargs.update(exclude_contact_forces=True)
elif (env_name == 'hopper-v3'):
from lifelong_rl.envs.environments.hopper_v3 import HopperEnv
base_env = HopperEnv
env_infos['mujoco'] = True
kwargs.update(exclude_current_positions_from_observation=False)
elif (env_name == 'walker2d-v3'):
from lifelong_rl.envs.environments.walker2d_v3 import Walker2dEnv
base_env = Walker2dEnv
env_infos['mujoco'] = True
kwargs.update(exclude_current_positions_from_observation=False)
elif (env_name == 'ip'):
from lifelong_rl.envs.environments.inverted_pendulum import InvertedPendulumEnv
base_env = InvertedPendulumEnv
env_infos['mujoco'] = True
elif (env_name == 'idp'):
from lifelong_rl.envs.environments.inverted_double_pendulum import InvertedDoublePendulumEnv
base_env = InvertedDoublePendulumEnv
env_infos['mujoco'] = True
elif (env_name == 'reacher'):
from lifelong_rl.envs.environments.reacher import ReacherEnv
base_env = ReacherEnv
env_infos['mujoco'] = True
if ((env is None) and (base_env is None)):
raise NameError('env_name not recognized')
if ('cp_info' in kwargs):
cp_info = kwargs.pop('cp_info')
else:
cp_info = None
if (env is None):
env = base_env(**kwargs)
if (not isinstance(env.action_space, gym.spaces.Discrete)):
env = NormalizedBoxEnv(env)
if (not terminates):
env = NonTerminatingEnv(env)
if (cp_info is not None):
env = ChildPolicyEnv(env, **cp_info)
return (env, env_infos) |
def get_trained_data_separated_model(args, id, local_train_loader, local_test_loader, test_loader, base_net=None):
torch.backends.cudnn.enabled = False
if (base_net is not None):
network = copy.deepcopy(base_net)
else:
network = get_model_from_name(args, idx=id)
optimizer = optim.SGD(network.parameters(), lr=args.learning_rate, momentum=args.momentum)
if (args.gpu_id != (- 1)):
network = network.cuda(args.gpu_id)
log_dict = {}
log_dict['train_losses'] = []
log_dict['train_counter'] = []
log_dict['local_test_losses'] = []
log_dict['test_losses'] = []
acc = test(args, network, test_loader, log_dict)
local_acc = test(args, network, local_test_loader, log_dict, is_local=True)
for epoch in range(1, (args.n_epochs + 1)):
train(args, network, optimizer, local_train_loader, log_dict, epoch, model_id=str(id))
acc = test(args, network, test_loader, log_dict)
local_acc = test(args, network, local_test_loader, log_dict, is_local=True)
return (network, acc, local_acc) |
def test_g2p():
g2p = G2P()
char_sent = 'HELLO WORLD'
phn_sent = g2p.encode(char_sent)
logging.info(phn_sent) |
def ParseSynset(canon):
if (len(canon) == 0):
return None
return Synset(canon[0]['synset_name'], canon[0]['synset_definition']) |
class TFBertForSequenceClassification(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def train(train_list, model, criterion, optimizer, epoch):
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
train_loader = torch.utils.data.DataLoader(dataset.listDataset(train_list, shuffle=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]), train=True, seen=model.seen, batch_size=args.batch_size, num_workers=args.workers), batch_size=args.batch_size)
print(('epoch %d, processed %d samples, lr %.10f' % (epoch, (epoch * len(train_loader.dataset)), args.lr)))
model.train()
end = time.time()
for (i, (img, target)) in enumerate(train_loader):
data_time.update((time.time() - end))
img = img.cuda()
img = Variable(img)
output = model(img)
target = target.type(torch.FloatTensor).unsqueeze(0).cuda()
target = Variable(target)
loss = criterion(output, target)
losses.update(loss.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
if ((i % args.print_freq) == 0):
print('Epoch: [{0}][{1}/{2}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tData {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) |
class SplitByNode():
def __init__(self, group=None):
self.rank = (- 1)
self.size = (- 1)
try:
import torch
if ((not torch.distributed.is_available()) or (not torch.distributed.is_initialized())):
return
except Exception as e:
print(e)
return
if (group is None):
try:
import torch.distributed.distributed_c10d
group = torch.distributed.distributed_c10d._default_pg
except:
pass
self.rank = torch.distributed.get_rank(group=group)
self.size = torch.distributed.get_world_size(group=group)
def __call__(self, urls):
urls = [url for url in urls]
assert isinstance(urls, list)
if (self.size > 1):
import socket
gopen.info['rank'] = self.rank
gopen.info['size'] = self.size
gopen.info['host'] = socket.gethostname()
gopen.info['pid'] = os.getpid()
if ((self.rank == 0) and (len(urls) < self.size)):
warnings.warn(f'world_size {self.size} > num_shards {len(urls)}')
return urls[self.rank::self.size]
else:
return urls |
class DatasetISIC(Dataset):
def __init__(self, datapath, fold, transform, split, shot, num=600):
self.split = split
self.benchmark = 'isic'
self.shot = shot
self.num = num
self.base_path = os.path.join(datapath, 'ISIC')
self.categories = ['1', '2', '3']
self.class_ids = range(0, 3)
self.img_metadata_classwise = self.build_img_metadata_classwise()
self.transform = transform
def __len__(self):
return self.num
def __getitem__(self, idx):
(query_name, support_names, class_sample) = self.sample_episode(idx)
(query_img, query_mask, support_imgs, support_masks) = self.load_frame(query_name, support_names)
query_img = self.transform(query_img)
query_mask = F.interpolate(query_mask.unsqueeze(0).unsqueeze(0).float(), query_img.size()[(- 2):], mode='nearest').squeeze()
support_imgs = torch.stack([self.transform(support_img) for support_img in support_imgs])
support_masks_tmp = []
for smask in support_masks:
smask = F.interpolate(smask.unsqueeze(0).unsqueeze(0).float(), support_imgs.size()[(- 2):], mode='nearest').squeeze()
support_masks_tmp.append(smask)
support_masks = torch.stack(support_masks_tmp)
batch = {'query_img': query_img, 'query_mask': query_mask, 'query_name': query_name, 'support_imgs': support_imgs, 'support_masks': support_masks, 'support_names': support_names, 'class_id': torch.tensor(class_sample)}
return batch
def load_frame(self, query_name, support_names):
query_img = Image.open(query_name).convert('RGB')
support_imgs = [Image.open(name).convert('RGB') for name in support_names]
query_id = query_name.split('/')[(- 1)].split('.')[0]
ann_path = os.path.join(self.base_path, 'ISIC2018_Task1_Training_GroundTruth')
query_name = (os.path.join(ann_path, query_id) + '_segmentation.png')
support_ids = [name.split('/')[(- 1)].split('.')[0] for name in support_names]
support_names = [(os.path.join(ann_path, sid) + '_segmentation.png') for (name, sid) in zip(support_names, support_ids)]
query_mask = self.read_mask(query_name)
support_masks = [self.read_mask(name) for name in support_names]
return (query_img, query_mask, support_imgs, support_masks)
def read_mask(self, img_name):
mask = torch.tensor(np.array(Image.open(img_name).convert('L')))
mask[(mask < 128)] = 0
mask[(mask >= 128)] = 1
return mask
def sample_episode(self, idx):
class_id = (idx % len(self.class_ids))
class_sample = self.categories[class_id]
query_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0]
support_names = []
while True:
support_name = np.random.choice(self.img_metadata_classwise[class_sample], 1, replace=False)[0]
if (query_name != support_name):
support_names.append(support_name)
if (len(support_names) == self.shot):
break
return (query_name, support_names, class_id)
def build_img_metadata(self):
img_metadata = []
for cat in self.categories:
os.path.join(self.base_path, cat)
img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, 'ISIC2018_Task1-2_Training_Input', cat)))])
for img_path in img_paths:
if (os.path.basename(img_path).split('.')[1] == 'jpg'):
img_metadata.append(img_path)
return img_metadata
def build_img_metadata_classwise(self):
img_metadata_classwise = {}
for cat in self.categories:
img_metadata_classwise[cat] = []
for cat in self.categories:
img_paths = sorted([path for path in glob.glob(('%s/*' % os.path.join(self.base_path, 'ISIC2018_Task1-2_Training_Input', cat)))])
for img_path in img_paths:
if (os.path.basename(img_path).split('.')[1] == 'jpg'):
img_metadata_classwise[cat] += [img_path]
return img_metadata_classwise |
def prepare_student_data(dataset, nb_teachers, save=False):
assert input.create_dir_if_needed(FLAGS.train_dir)
if (dataset == 'svhn'):
(test_data, test_labels) = input.ld_svhn(test_only=True)
elif (dataset == 'cifar10'):
(test_data, test_labels) = input.ld_cifar10(test_only=True)
elif (dataset == 'mnist'):
(test_data, test_labels) = input.ld_mnist(test_only=True)
else:
print('Check value of dataset flag')
return False
assert (FLAGS.stdnt_share < len(test_data))
stdnt_data = test_data[:FLAGS.stdnt_share]
teachers_preds = ensemble_preds(dataset, nb_teachers, stdnt_data)
if (not save):
stdnt_labels = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale)
else:
(stdnt_labels, clean_votes, labels_for_dump) = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale, return_clean_votes=True)
filepath = (((((((FLAGS.data_dir + '/') + str(dataset)) + '_') + str(nb_teachers)) + '_student_clean_votes_lap_') + str(FLAGS.lap_scale)) + '.npy')
filepath_labels = (((((((FLAGS.data_dir + '/') + str(dataset)) + '_') + str(nb_teachers)) + '_teachers_labels_lap_') + str(FLAGS.lap_scale)) + '.npy')
with tf.gfile.Open(filepath, mode='w') as file_obj:
np.save(file_obj, clean_votes)
with tf.gfile.Open(filepath_labels, mode='w') as file_obj:
np.save(file_obj, labels_for_dump)
ac_ag_labels = metrics.accuracy(stdnt_labels, test_labels[:FLAGS.stdnt_share])
print(('Accuracy of the aggregated labels: ' + str(ac_ag_labels)))
stdnt_test_data = test_data[FLAGS.stdnt_share:]
stdnt_test_labels = test_labels[FLAGS.stdnt_share:]
if save:
filepath = (((((((FLAGS.data_dir + '/') + str(dataset)) + '_') + str(nb_teachers)) + '_student_labels_lap_') + str(FLAGS.lap_scale)) + '.npy')
with tf.gfile.Open(filepath, mode='w') as file_obj:
np.save(file_obj, stdnt_labels)
return (stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels) |
def example_hin_random(feature_size_by_type=None, nodes_by_type={}, n_isolates_by_type={}, edges_by_type={}):
check_isolates = False
while (not check_isolates):
G = nx.Graph()
node_dict = {}
for nt in nodes_by_type:
nodes = ['{}_{}'.format(nt, ii) for ii in range(nodes_by_type[nt])]
node_dict[nt] = nodes
G.add_nodes_from(nodes, label=nt)
for (nt1, nt2) in edges_by_type:
nodes1 = node_dict[nt1]
nodes2 = node_dict[nt2]
niso1 = n_isolates_by_type.get(nt1, 0)
niso2 = n_isolates_by_type.get(nt2, 0)
nodes1 = (nodes1[:(- niso1)] if (niso1 > 0) else nodes1)
nodes2 = (nodes2[:(- niso2)] if (niso2 > 0) else nodes1)
edges = [(random.choice(nodes1), random.choice(nodes2)) for _ in range(edges_by_type[(nt1, nt2)])]
G.add_edges_from(edges, label='{}_{}'.format(nt1, nt2))
check_isolates = all(((sum(((deg[1] == 0) for deg in nx.degree(G, nodes))) == n_isolates_by_type[nt]) for (nt, nodes) in node_dict.items()))
if (feature_size_by_type is not None):
nt_jj = 0
for (nt, nodes) in node_dict.items():
for (ii, n) in enumerate(nodes):
G.nodes[n]['feature'] = ((ii + (10 * nt_jj)) * np.ones(feature_size_by_type[nt], dtype='int'))
nt_jj += 1
G = StellarGraph.from_networkx(G, node_features='feature')
else:
G = StellarGraph.from_networkx(G)
return (G, node_dict) |
class MyLMHead(torch.nn.Module):
def __init__(self, lm_head, mapping):
super().__init__()
self.my_lm_head = torch.nn.Linear(lm_head.in_features, len(mapping), bias=False)
indices = [mapping[i] for i in range(len(mapping))]
init_weight = lm_head.state_dict()['weight'][indices]
self.my_lm_head._load_from_state_dict({'weight': init_weight}, '', None, True, [], [], '')
def forward(self, input):
return self.my_lm_head(input) |
def freeze_BERT_parameters(model: BertForSequenceClassification, verbose: bool=True) -> None:
if (not isinstance(model, BertForSequenceClassification)):
raise TypeError
params_to_freeze = ['bert.embeddings.', 'bert.encoder.layer.0.', 'bert.encoder.layer.1.', 'bert.encoder.layer.2.', 'bert.encoder.layer.3.', 'bert.encoder.layer.4.', 'bert.encoder.layer.5.', 'bert.encoder.layer.6.', 'bert.encoder.layer.7.', 'bert.encoder.layer.8.', 'bert.encoder.layer.9.']
for (name, param) in model.named_parameters():
if any(((pfreeze in name) for pfreeze in params_to_freeze)):
param.requires_grad = False
if (verbose is True):
num_trainable_params = sum([p.numel() for (n, p) in model.named_parameters() if p.requires_grad])
trainable_param_names = [n for (n, p) in model.named_parameters() if p.requires_grad]
print((f'''Params Trainable: {num_trainable_params}
''' + f'''
'''.join(trainable_param_names))) |
class SingleFileSanitizedNames(SingleFileSnapshotExtension):
_write_mode = WriteMode.TEXT
_file_extension = 'txt'
def get_snapshot_name(cls, *, test_location: 'PyTestLocation', index: 'SnapshotIndex') -> str:
original_name = SingleFileSnapshotExtension.get_snapshot_name(test_location=test_location, index=index)
double_under = '[:\\[\\]{}]'
no_space = '[,"\\\']'
name = re.sub(double_under, '__', original_name)
name = re.sub(no_space, '', name)
return f'{name}' |
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None, color=False, resize=None):
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.decode().strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if (len(sl) > 2)]
n_pairs = len(pair_specs)
target = np.zeros(n_pairs, dtype=int)
file_paths = list()
for (i, components) in enumerate(pair_specs):
if (len(components) == 3):
target[i] = 1
pair = ((components[0], (int(components[1]) - 1)), (components[0], (int(components[2]) - 1)))
elif (len(components) == 4):
target[i] = 0
pair = ((components[0], (int(components[1]) - 1)), (components[2], (int(components[3]) - 1)))
else:
raise ValueError(('invalid line %d: %r' % ((i + 1), components)))
for (j, (name, idx)) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, (n_faces // 2))
pairs.shape = shape
return (pairs, target, np.array(['Different persons', 'Same person'])) |
def get_slide_prob_label(csv_file):
pred_corpus = {}
label_corpus = {}
slide_id_list = []
with open(csv_file, 'r') as f:
reader = csv.reader(f)
for row in reader:
if (len(row) == 5):
slide_id = row[0].split('_')[0]
prob_list = [float(row[3]), float(row[4])]
if (slide_id not in pred_corpus):
pred_corpus[slide_id] = []
label_corpus[slide_id] = int(row[1])
pred_corpus[slide_id].append(prob_list)
if (slide_id not in slide_id_list):
slide_id_list.append(slide_id)
slide_prob = []
true_label_list = []
pred_label_list = []
for slide_id in slide_id_list:
prob_list = pred_corpus[slide_id]
bag_num = len(prob_list)
add_DLBCL = 0.0
add_non_DLBCL = 0.0
for prob in prob_list:
add_non_DLBCL = (add_non_DLBCL + np.log(float(prob[0])))
add_DLBCL = (add_DLBCL + np.log(float(prob[1])))
DLBCL_prob = np.exp((add_DLBCL / bag_num))
non_DLBCL_prob = np.exp((add_non_DLBCL / bag_num))
slide_prob.append([non_DLBCL_prob, DLBCL_prob])
true_label_list.append(label_corpus[slide_id])
if (DLBCL_prob > non_DLBCL_prob):
pred_label_list.append(1)
else:
pred_label_list.append(0)
return (slide_id_list, slide_prob, true_label_list, pred_label_list) |
class FC_Q(nn.Module):
def __init__(self, state_dim, num_actions):
super(FC_Q, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, num_actions)
def forward(self, state):
q = F.relu(self.l1(state))
q = F.relu(self.l2(q))
return self.l3(q) |
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(bos_tokens, _) = batch.tokens_bos
if self.hparams.gradient_checkpointing:
wavs.requires_grad_()
enc_out = torch.utils.checkpoint.checkpoint(self.modules.whisper.forward_encoder, wavs)
enc_out = torch.utils.checkpoint.checkpoint(self.modules.prompt_pool, enc_out, self.hparams.forced_decoder_locale)
(logits, _) = torch.utils.checkpoint.checkpoint(self.modules.whisper.forward_decoder, enc_out, bos_tokens)
else:
enc_out = self.modules.whisper.forward_encoder(wavs)
enc_out = self.modules.prompt_pool(enc_out, self.hparams.forced_decoder_locale)
(logits, _) = self.modules.whisper.forward_decoder(enc_out, bos_tokens)
hyps = None
if (stage != sb.Stage.TRAIN):
locale = self.hparams.forced_decoder_locale
if (locale not in self.hparams.base_locales):
locale = self.hparams.base_locales[0]
(hyps, _) = self.modules.whisper.generate(audio_features=enc_out, forced_decoder_locale=locale, max_gen_tokens=self.hparams.max_gen_tokens)
return (logits, hyps)
def compute_objectives(self, predictions, batch, stage):
(logits, hyps) = predictions
ids = batch.id
(tokens_eos, _) = batch.tokens_eos
loss = self.hparams.ce_loss(logits.flatten(end_dim=(- 2)), tokens_eos.flatten())
if (stage != sb.Stage.TRAIN):
target_words = batch.target_wrd
predicted_words = self.tokenizer.batch_decode(hyps, skip_special_tokens=True)
if self.hparams.normalize_transcripts:
predicted_words = [self.tokenizer._normalize(text).split(' ') for text in predicted_words]
else:
predicted_words = [text.split(' ') for text in predicted_words]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def on_stage_start(self, stage, epoch=None):
if (stage != sb.Stage.TRAIN):
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.wer_computer()
def on_stage_end(self, stage, stage_loss, epoch=None):
stage_stats = {'loss': stage_loss}
if (stage == sb.Stage.TRAIN):
self.train_stats = stage_stats
else:
stage_stats['CER'] = self.cer_metric.summarize('error_rate')
stage_stats['WER'] = self.wer_metric.summarize('error_rate')
if (stage == sb.Stage.VALID):
(old_lr, new_lr) = self.hparams.lr_annealing(stage_stats['loss'])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
stats_meta_data = {'epoch': epoch, 'lr': old_lr}
self.hparams.train_logger.log_stats(stats_meta=stats_meta_data, train_stats=self.train_stats, valid_stats=stage_stats)
self.checkpointer.save_and_keep_only(meta={'WER': stage_stats['WER']}, min_keys=['WER'])
elif (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats(stats_meta={'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stage_stats)
with open(self.hparams.wer_file, 'w', encoding='utf-8') as w:
self.wer_metric.write_stats(w) |
class Decoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, dropout):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, dropout=dropout)
self.layers = get_clones(DecoderLayer(d_model, heads, dropout), N)
self.norm = Norm(d_model)
def forward(self, trg, e_outputs, src_mask, trg_mask):
x = self.embed(trg)
x = self.pe(x)
for i in range(self.N):
x = self.layers[i](x, e_outputs, src_mask, trg_mask)
return self.norm(x) |
def load_phi_id(phi_id, timeout, output, client, headers, alphabet):
file_path = os.path.join(output, '{}.html'.format(phi_id))
path_exists = (FLAGS.local or os.path.exists(file_path))
if path_exists:
with open(file_path, 'r') as f:
req_text = f.read().strip()
else:
req_text = None
retries = 0
while ((retries <= FLAGS.max_retries_per_inscription) and ((req_text is None) or ('520: Web server is returning an unknown error' in req_text))):
req = client.get(' timeout=timeout, headers=headers)
req_text = req.text
if ('Invalid PHI Inscription Number' not in req_text):
try:
soup = BeautifulSoup(req_text, 'lxml')
lines = []
table = soup.find('table', attrs={'class': 'grk'})
for row in table.find_all('tr'):
tds = row.find_all('td')
for (td_i, td) in enumerate(tds):
if (('class' in td.attrs) and (td.attrs['class'][0] == 'id')):
continue
lines.append(td.get_text().strip())
text = '\n'.join(lines)
text = text_clean_phi(text, alphabet)
sentences = text_to_sentences(text, alphabet)
text = ' '.join([(s + '.') for s in sentences])
text = strip_accents(text)
(region_main, region_sub) = ('', '')
(region_main_id, region_sub_id) = ((- 1), (- 1))
hdr1 = soup.find('div', attrs={'class': 'hdr1'})
if hdr1:
hdr1_a = hdr1.find_all('a')
if (hdr1_a and (len(hdr1_a) == 3)):
region_main_id = hdr1_a[1]['href'].replace('/regions/', '')
region_main = hdr1_a[1].get_text()
region_sub_id = hdr1_a[2]['href'].replace('/regions/', '')
region_sub = hdr1_a[2].get_text()
elif (hdr1_a and (len(hdr1_a) == 2)):
region_main_id = hdr1_a[1]['href'].replace('/regions/', '')
region_main = hdr1_a[1].get_text()
metadata = soup.find('span', attrs={'class': 'ti'})
if metadata:
metadata = metadata.get_text()
else:
metadata = ''
date_str = ''
date_min = None
date_max = None
date_circa = None
for tok in metadata.split(''):
if re.search('\\W(BC|AD|period|reign|a\\.|p\\.(?!\\s+\\d)|aet\\.)(\\W|$)', tok):
date_str = tok
(date_range, circa) = date_parser_phi(tok)
if date_range:
(date_min, date_max) = date_range.split(' ')
date_circa = circa
output = {'id': phi_id, 'text': text, 'metadata': metadata, 'region_main_id': region_main_id, 'region_main': region_main, 'region_sub_id': region_sub_id, 'region_sub': region_sub, 'date_str': date_str, 'date_min': date_min, 'date_max': date_max, 'date_circa': date_circa}
if (not path_exists):
with open(file_path, 'w') as f:
f.write(req_text)
if (len(output['text'].replace(alphabet.missing, '')) >= FLAGS.min_text_len):
return output
return
except:
print(req_text)
return |
class LazyOperatorNormInfo():
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self, scale):
self._scale = scale
def onenorm(self):
if (self._A_1_norm is None):
self._A_1_norm = _exact_1_norm(self._A)
return (self._scale * self._A_1_norm)
def d(self, p):
if (p not in self._d):
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = (est ** (1.0 / p))
return (self._scale * self._d[p])
def alpha(self, p):
return max(self.d(p), self.d((p + 1))) |
def make_padic_poly(parent, x, version):
if (version == 0):
return parent(x, construct=True)
else:
raise ValueError('unknown pickling version') |
class XTagger(BaseXTagger):
def __call__(self, vocabs, moving_params=None):
top_recur = super(XTagger, self).__call__(vocabs, moving_params=moving_params)
int_tokens_to_keep = tf.to_int32(self.tokens_to_keep)
with tf.variable_scope('MLP'):
(tag_mlp, xtag_mlp) = self.MLP(top_recur, self.mlp_size, n_splits=2)
with tf.variable_scope('Tag'):
tag_logits = self.linear(tag_mlp, len(self.vocabs['tags']))
tag_probs = tf.nn.softmax(tag_logits)
tag_preds = tf.to_int32(tf.argmax(tag_logits, axis=(- 1)))
tag_targets = self.vocabs['tags'].placeholder
tag_correct = (tf.to_int32(tf.equal(tag_preds, tag_targets)) * int_tokens_to_keep)
tag_loss = tf.losses.sparse_softmax_cross_entropy(tag_targets, tag_logits, self.tokens_to_keep)
with tf.variable_scope('XTag'):
xtag_logits = self.linear(xtag_mlp, len(self.vocabs['xtags']))
xtag_probs = tf.nn.softmax(xtag_logits)
xtag_preds = tf.to_int32(tf.argmax(xtag_logits, axis=(- 1)))
xtag_targets = self.vocabs['xtags'].placeholder
xtag_correct = (tf.to_int32(tf.equal(xtag_preds, xtag_targets)) * int_tokens_to_keep)
xtag_loss = tf.losses.sparse_softmax_cross_entropy(xtag_targets, xtag_logits, self.tokens_to_keep)
correct = (tag_correct * xtag_correct)
n_correct = tf.reduce_sum(correct)
n_tag_correct = tf.reduce_sum(tag_correct)
n_xtag_correct = tf.reduce_sum(xtag_correct)
n_seqs_correct = tf.reduce_sum(tf.to_int32(tf.equal(tf.reduce_sum(correct, axis=1), (self.sequence_lengths - 1))))
loss = (tag_loss + xtag_loss)
outputs = {'tag_logits': tag_logits, 'tag_probs': tag_probs, 'tag_preds': tag_preds, 'tag_targets': tag_targets, 'tag_correct': tag_correct, 'tag_loss': tag_loss, 'n_tag_correct': n_tag_correct, 'xtag_logits': xtag_logits, 'xtag_probs': xtag_probs, 'xtag_preds': xtag_preds, 'xtag_targets': xtag_targets, 'xtag_correct': xtag_correct, 'xtag_loss': xtag_loss, 'n_xtag_correct': n_xtag_correct, 'n_tokens': self.n_tokens, 'n_seqs': self.batch_size, 'tokens_to_keep': self.tokens_to_keep, 'n_correct': n_correct, 'n_seqs_correct': n_seqs_correct, 'loss': loss}
return outputs |
def test_downcast():
x = np.arange(10).astype(np.uint64)
with expected_warnings(['Downcasting']):
y = img_as_int(x)
assert np.allclose(y, x.astype(np.int16))
assert (y.dtype == np.int16), y.dtype |
.experimental
def test_cat_features_transformer_empty_list(long_log_with_features, short_log_with_features):
transformed = get_transformed_features(transformer=CatFeaturesTransformer([]), train=long_log_with_features, test=short_log_with_features)
assert (len(transformed.columns) == 4)
assert ('timestamp' in transformed.columns) |
def do_log_training_loss(iteration, loss, *, lr_scheduler, grad_norm, num_examples, len_contexts, len_answers, logger, train_task, round_progress, epochs, task_progress, timestamp, writer, log_prefix):
avg_batch_size = f'avbatch_{num_examples:.0f}_{len_contexts:.0f}_{len_answers:.0f}:'
logger.info(f'{timestamp}:{elapsed_time(logger)}:iteration_{iteration}:epoch_{epochs:.2f}:{round_progress}train_{train_task.name}:{task_progress}{avg_batch_size}{log_prefix}/loss_{loss:.4f}')
if (writer is not None):
writer.add_scalar(f'{log_prefix}/loss/{train_task.name}', loss, iteration)
if (lr_scheduler is not None):
writer.add_scalar(f'{log_prefix}/lr', np.array(lr_scheduler.get_last_lr()), iteration)
if (grad_norm is not None):
writer.add_scalar(f'{log_prefix}/norm', grad_norm, iteration) |
def process(sentence, annotation, use_fine_grained_null=True):
def compare(a, b):
if (a[0] > b[0]):
return 1
elif (a[0] == b[0]):
if (a[1] > b[1]):
return (- 1)
else:
return 1
else:
return (- 1)
def compare2(a, b):
if (int(a[0]) >= int(b[1])):
return 1
elif (b[0] <= a[0] <= a[1] <= b[1]):
return (- 1)
elif (a[0] <= b[0] <= b[1] <= a[1]):
return 1
elif (b[0] >= a[1]):
return (- 1)
else:
raise ValueError
if (annotation[0] == ''):
return [(0, len(sentence), 'NULL')]
spans = []
sentence_len = len(sentence)
results = []
for (idx, a) in enumerate(annotation):
(span, label) = a.split(' ')
span = span.split(',')
spans.append((int(span[0]), int(span[1]), label))
spans.sort(key=cmp_to_key(compare))
idx = (- 1)
def helper(nest=False):
nonlocal idx
idx += 1
if (idx > (len(spans) - 1)):
return
p = spans[idx]
(i, j, label) = p
children = []
while (((idx + 1) < len(spans)) and (i <= spans[(idx + 1)][0]) and (spans[(idx + 1)][1] <= j)):
children.append(spans[(idx + 1)])
helper(True)
for c in range(len(children)):
label = ((p[2] + '<>') if use_fine_grained_null else 'NULL')
if ((c == (len(children) - 1)) and (children[(- 1)][1] < p[1])):
results.append((children[(- 1)][1], p[1], label))
if (c == 0):
if (children[c][0] > p[0]):
results.append((p[0], children[c][0], label))
elif (children[c][0] > children[(c - 1)][1]):
results.append((children[(c - 1)][1], children[c][0], label))
if (nest is False):
if (idx < (len(spans) - 1)):
if (spans[(idx + 1)][0] > j):
results.append((j, spans[(idx + 1)][0], 'NULL'))
helper(False)
elif ((idx == (len(spans) - 1)) and (p[1] < sentence_len)):
results.append((p[1], sentence_len, 'NULL'))
return
spans.insert(0, (0, 0, '<START>'))
helper()
spans.pop(0)
spans.append((0, sentence_len, 'NULL'))
spans.extend(results)
try:
spans.sort(key=cmp_to_key(compare2))
except:
return None
return spans |
class MethodNotAllowed(HTTPException):
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ=None):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers |
def parse_config(filename):
config = configparser.ConfigParser()
config.read(filename)
output = {}
for section in config.sections():
output[section] = {}
for key in config[section]:
val_str = str(config[section][key])
if (len(val_str) > 0):
val = parse_value_from_string(val_str)
else:
val = None
print(section, key, val_str, val)
output[section][key] = val
return output |
def validate(passages: Dict[(object, Tuple[(str, str)])], answers: List[List[str]], result_ctx_ids: List[Tuple[(List[object], List[float])]], workers_num: int, match_type: str) -> List[List[bool]]:
match_stats = calculate_matches(passages, answers, result_ctx_ids, workers_num, match_type)
top_k_hits = match_stats.top_k_hits
logger.info('Validation results: top k documents hits %s', top_k_hits)
top_k_hits = [(v / len(result_ctx_ids)) for v in top_k_hits]
logger.info('Validation results: top k documents hits accuracy %s', top_k_hits)
return match_stats.questions_doc_hits |
def encode_for_summarization(story_lines, summary_lines, tokenizer):
story_lines_token_ids = [tokenizer.encode(line) for line in story_lines]
story_token_ids = [token for sentence in story_lines_token_ids for token in sentence]
summary_lines_token_ids = [tokenizer.encode(line) for line in summary_lines]
summary_token_ids = [token for sentence in summary_lines_token_ids for token in sentence]
return (story_token_ids, summary_token_ids) |
def _get_feature(model, loaders, device):
views_features = defaultdict((lambda : defaultdict(list)))
print('extracting features')
with torch.no_grad():
for loader in loaders.values():
for (views, labels) in tqdm(loader, ncols=80):
outputs = []
for (view_index, view) in enumerate(views):
view = view.to(device)
outputs = model(view)
outputs = outputs.detach().cpu()
for i in range(len(labels)):
views_features[view_index][labels[i].item()].append(outputs[i].detach().cpu())
dataset_size = sum((len(class_features) for class_features in views_features[0].values()))
nclasses = len(views_features[0])
for (view_index, view_features) in views_features.items():
for (class_index, class_feature) in view_features.items():
views_features[view_index][class_index] = torch.stack(class_feature, dim=0)
return (views_features, dataset_size, nclasses) |
def get_monitor_physical_size(monitor):
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetMonitorPhysicalSize(monitor, width, height)
return (width_value.value, height_value.value) |
def KL_divergence(mu1: Tensor, log_var1: Tensor, mu2: Tensor, log_var2: Tensor, reduce_axis: int=(- 1)):
log_det = (log_var1 - log_var2)
trace_cov = (- log_det).exp()
mean_diff = (((mu1 - mu2) ** 2) / log_var1.exp())
return (0.5 * (((trace_cov + mean_diff) + log_det).sum(reduce_axis) - mu1.shape[reduce_axis])) |
def taichi_scope(func):
(func)
def wrapped(*args, **kwargs):
assert in_taichi_scope(), f'{func.__name__} cannot be called in Python-scope'
return func(*args, **kwargs)
return wrapped |
class MHCABlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=3, drop_path=0.0, qkv_bias=True, qk_scale=None, norm_layer=partial(nn.LayerNorm, eps=1e-06), shared_cpe=None, shared_crpe=None):
super().__init__()
self.cpe = shared_cpe
self.crpe = shared_crpe
self.factoratt_crpe = FactorAtt_ConvRelPosEnc(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, shared_crpe=shared_crpe)
self.mlp = Mlp(in_features=dim, hidden_features=(dim * mlp_ratio))
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm1 = norm_layer(dim)
self.norm2 = norm_layer(dim)
def forward(self, x, size):
if (self.cpe is not None):
x = self.cpe(x, size)
cur = self.norm1(x)
x = (x + self.drop_path(self.factoratt_crpe(cur, size)))
cur = self.norm2(x)
x = (x + self.drop_path(self.mlp(cur)))
return x |
def read_langs(file_name, max_line=None):
print('Reading lines from {}'.format(file_name))
(data, context_arr, conv_arr, kb_arr, conv_arr_plain) = ([], [], [], [], [])
(node2id, neighbors_info) = ({}, {})
node_cnt = 0
max_resp_len = 0
(total_node_cnt, total_dep_cnt) = (0, 0)
with open('data/MULTIWOZ2.1/multiwoz_entities.json') as f:
global_entity = json.load(f)
with open(file_name) as fin:
(cnt_lin, sample_counter) = (1, 1)
for line in fin:
line = line.strip()
if line:
if line.startswith('#'):
line = line.replace('#', '')
task_type = line
continue
if ('\t' in line):
(nid, line) = line.split(' ', 1)
(u, r, gold_ent) = line.split('\t')
gen_u = generate_memory(u, '$u', str(nid))
context_arr += gen_u
conv_arr += gen_u
conv_arr_plain.append(u)
gold_ent = ast.literal_eval(gold_ent)
(ent_idx_cal, ent_idx_nav, ent_idx_wet) = ([], [], [])
(ent_idx_restaurant, ent_idx_hotel, ent_idx_attraction, ent_idx_train, ent_idx_hospital) = ([], [], [], [], [])
if (task_type == 'restaurant'):
ent_idx_restaurant = gold_ent
elif (task_type == 'hotel'):
ent_idx_hotel = gold_ent
elif (task_type == 'attraction'):
ent_idx_attraction = gold_ent
elif (task_type == 'train'):
ent_idx_train = gold_ent
elif (task_type == 'hospital'):
ent_idx_hospital = gold_ent
ent_index = list(set(((((ent_idx_restaurant + ent_idx_hotel) + ent_idx_attraction) + ent_idx_train) + ent_idx_hospital)))
ent_head_mapping = {}
if (task_type == 'navigate'):
for word_arr in kb_arr:
n = 0
for elm in word_arr:
if (elm != 'PAD'):
n += 1
if (n == 5):
head = word_arr[0]
entity = word_arr[0]
else:
head = word_arr[2]
entity = word_arr[0]
if (entity not in ent_head_mapping):
ent_head_mapping[entity] = [head]
else:
ent_head_mapping[entity].append(head)
elif (task_type == 'weather'):
for word_arr in kb_arr:
n = 0
for elm in word_arr:
if (elm != 'PAD'):
n += 1
if (n == 2):
continue
elif (n == 3):
head = word_arr[2]
entity = word_arr[0]
elif (n == 4):
head = word_arr[3]
entity = word_arr[0]
else:
continue
if (entity not in ent_head_mapping):
ent_head_mapping[entity] = [head]
else:
ent_head_mapping[entity].append(head)
elif (task_type == 'schedule'):
if (len(kb_arr) != 0):
for word_arr in kb_arr:
head = word_arr[2]
entity = word_arr[0]
if (entity not in ent_head_mapping):
ent_head_mapping[entity] = [head]
else:
ent_head_mapping[entity].append(head)
head_ent_mapping = {}
if ent_head_mapping:
for ent in ent_head_mapping.keys():
head_list = ent_head_mapping[ent]
for head in head_list:
if (head not in head_ent_mapping):
head_ent_mapping[head] = [ent]
elif (ent not in head_ent_mapping[head]):
head_ent_mapping[head].append(ent)
else:
continue
r_list = r.split(' ')
head_lists = []
for word in r_list:
head_list = []
if (word in ent_head_mapping):
for head in ent_head_mapping[word]:
if (head not in head_list):
head_list.append(head)
if head_list:
head_lists.append(head_list)
final_list = []
if head_lists:
final_list = head_lists[0]
for elm in head_lists:
final_list = list(set(final_list).intersection(set(elm)))
entity_list = []
for head in final_list:
if (head in head_ent_mapping):
entities = head_ent_mapping[head]
for ent in entities:
if (ent not in entity_list):
entity_list.append(ent)
head_pointer = ([(1 if (((word_arr[0] in entity_list) and (set(final_list).intersection(set(word_arr)) != set([])) and ('$u' not in word_arr) and ('$s' not in word_arr)) or ((word_arr[0] in r.split()) and (word_arr[0] in ent_index) and (('$u' in word_arr) or ('$s' in word_arr)))) else 0) for word_arr in context_arr] + [1])
ptr_index = []
a = 0
b = 0
for key in r.split():
a += 1
index = [loc for (loc, val) in enumerate(context_arr) if ((val[0] == key) and (key in ent_index))]
if index:
index = max(index)
else:
index = len(context_arr)
ptr_index.append(index)
selector_index = ([(1 if ((word_arr[0] in ent_index) or (word_arr[0] in r.split())) else 0) for word_arr in context_arr] + [1])
sketch_response = generate_template(global_entity, r, gold_ent, kb_arr, task_type)
(dep_info, dep_info_hat, max_len) = dependency_parsing(conv_arr_plain)
(dep_node_info, dep_relation_info, cell_mask, all_cnt, path_len_info) = generate_subgraph(dep_info_hat, max_len, False)
(dep_node_info_reverse, dep_relation_info_reverse, cell_mask_reverse, all_cnt_reverse, path_len_info_reverse) = generate_subgraph(dep_info_hat, max_len, True)
deps = [dep_node_info, dep_node_info_reverse]
deps_type = [dep_relation_info, dep_relation_info_reverse]
masks = [cell_mask, cell_mask_reverse]
total_node_cnt = (total_node_cnt + max_len)
total_dep_cnt = ((total_dep_cnt + all_cnt) + all_cnt_reverse)
adj = np.eye((len(context_arr) + 1))
for node in neighbors_info.keys():
neighbor = neighbors_info[node]
neighbor_list = neighbor.lstrip('[').rstrip(']').split(',')
neighbor = [ne.strip().strip("'") for ne in neighbor_list]
node_id = ((((- 1) * node2id[node]) + node_cnt) - 1)
for elm in neighbor:
elm_id = ((((- 1) * node2id[elm]) + node_cnt) - 1)
adj[(node_id, elm_id)] = 1
data_detail = {'context_arr': list((context_arr + [(['$$$$'] * MEM_TOKEN_SIZE)])), 'response': r, 'sketch_response': sketch_response, 'ptr_index': (ptr_index + [len(context_arr)]), 'selector_index': selector_index, 'ent_index': ent_index, 'ent_idx_cal': list(set(ent_idx_cal)), 'ent_idx_nav': list(set(ent_idx_nav)), 'ent_idx_wet': list(set(ent_idx_wet)), 'conv_arr': list(conv_arr), 'conv_arr_plain': list(conv_arr_plain), 'deps': list(deps), 'deps_type': list(deps_type), 'cell_masks': list(masks), 'kb_arr': list((kb_arr + [(['$$$$'] * MEM_TOKEN_SIZE)])), 'id': int(sample_counter), 'ID': int(cnt_lin), 'domain': task_type, 'adj': list(adj), 'head_pointer': head_pointer, 'ent_idx_restaurant': list(set(ent_idx_restaurant)), 'ent_idx_hotel': list(set(ent_idx_hotel)), 'ent_idx_attraction': list(set(ent_idx_attraction)), 'ent_idx_train': list(set(ent_idx_train)), 'ent_idx_hospital': list(set(ent_idx_hospital))}
data.append(data_detail)
gen_r = generate_memory(r, '$s', str(nid))
context_arr += gen_r
conv_arr += gen_r
conv_arr_plain.append(r)
if (max_resp_len < len(r.split())):
max_resp_len = len(r.split())
sample_counter += 1
else:
(nid, node, neighbors) = line.split('|')
r = node.lstrip('[').rstrip(']')
kb_info = generate_memory(r, '', str(nid))
context_arr = (kb_info + context_arr)
kb_arr += kb_info
node2id[node] = node_cnt
node_cnt += 1
neighbors_info[node] = neighbors
else:
cnt_lin += 1
(context_arr, conv_arr, kb_arr, conv_arr_plain) = ([], [], [], [])
(node2id, neighbors_info) = ({}, {})
node_cnt = 0
if (max_line and (cnt_lin >= max_line)):
break
print('{} avg dependencies per node is: {}'.format(file_name, (total_dep_cnt / total_node_cnt)))
return (data, max_resp_len) |
((not have_sympy), 'SymPy not installed')
def test_abs():
x = Symbol('x')
e1 = abs(sympy.Symbol('x'))
e2 = abs(x)
assert (sympify(e1) == e2)
assert (e1 == e2._sympy_())
e1 = abs((2 * sympy.Symbol('x')))
e2 = (2 * abs(x))
assert (sympify(e1) == e2)
assert (e1 == e2._sympy_())
y = Symbol('y')
e1 = abs((sympy.Symbol('y') * sympy.Symbol('x')))
e2 = abs((y * x))
assert (sympify(e1) == e2)
assert (e1 == e2._sympy_()) |
def AllCusps(N):
N = ZZ(N)
if (N <= 0):
raise ValueError('N must be positive')
c = []
for d in divisors(N):
n = num_cusps_of_width(N, d)
if (n == 1):
c.append(CuspFamily(N, d))
elif (n > 1):
for i in range(n):
c.append(CuspFamily(N, d, label=str((i + 1))))
return c |
def cnn_large(in_ch, in_dim, num_classes=10):
return nn.Sequential(nn.Conv2d(in_ch, 64, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(64, 64, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(64, 128, 3, stride=2, padding=1), nn.ReLU(), nn.Conv2d(128, 128, 3, stride=1, padding=1), nn.ReLU(), nn.Conv2d(128, 128, 3, stride=1, padding=1), nn.ReLU(), Flatten(), nn.Linear(((128 * (in_dim // 2)) * (in_dim // 2)), 512), nn.ReLU(), nn.Linear(512, num_classes))
return model |
def get_tokens_with_boxes(unnormalized_word_boxes, list_of_words, token_label, tokenizer, pad_token_id=0, pad_token_box=[0, 0, 0, 0], max_seq_len=512, pad_token_class=7):
assert (len(unnormalized_word_boxes) == len(list_of_words) == len(token_label)), f'Length of Bounding box: {len(unnormalized_word_boxes)}, words: {len(list_of_words)}, token: {len(token_label)}'
length_of_box = len(unnormalized_word_boxes)
unnormalized_token_boxes = []
tokenized_words = []
final_token_label = []
for (box, word, token) in zip(unnormalized_word_boxes, list_of_words, token_label):
current_tokens = tokenizer(word, add_special_tokens=False).input_ids
unnormalized_token_boxes.extend(([box] * len(current_tokens)))
tokenized_words.extend(current_tokens)
final_token_label.extend(([token] * len(current_tokens)))
if (len(unnormalized_token_boxes) < max_seq_len):
unnormalized_token_boxes.extend(([pad_token_box] * (max_seq_len - len(unnormalized_token_boxes))))
if (len(tokenized_words) < max_seq_len):
tokenized_words.extend(([pad_token_id] * (max_seq_len - len(tokenized_words))))
if (len(final_token_label) < max_seq_len):
final_token_label.extend(([pad_token_class] * (max_seq_len - len(final_token_label))))
return (unnormalized_token_boxes[:max_seq_len], tokenized_words[:max_seq_len], final_token_label[:max_seq_len]) |
def upload_files(data_root, data_dir, upload_func):
for (root, dirs, files) in os.walk(data_dir):
prefix = os.path.relpath(root, data_root)
for file in files:
file_name = ((prefix + '/') + file)
filepath = os.path.join(root, file)
upload_func(0, file_name, filepath) |
def to_device(sample_list: Union[(SampleList, Dict[(str, Any)])], device: device_type='cuda'):
if isinstance(sample_list, collections.Mapping):
sample_list = convert_batch_to_sample_list(sample_list)
if (not isinstance(sample_list, SampleList)):
warnings.warn('You are not returning SampleList/Sample from your dataset. MMF expects you to move your tensors to cuda yourself.')
return sample_list
if isinstance(device, str):
device = torch.device(device)
if ((device.type == 'cuda') and (not torch.cuda.is_available())):
warnings.warn('Selected device is cuda, but it is NOT available!!! Falling back on cpu.')
device = torch.device('cpu')
if (sample_list.get_device() != device):
sample_list = sample_list.to(device)
return sample_list |
def main_mlp():
parser = argparse.ArgumentParser(description='GNN baselines on ogbgmol* data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--num_mlp_layers', type=int, default=6, help='number of mlp layers (default: 6)')
parser.add_argument('--drop_ratio', type=float, default=0.2, help='dropout ratio (default: 0.2)')
parser.add_argument('--batch_size', type=int, default=256, help='input batch size for training (default: 256)')
parser.add_argument('--emb_dim', type=int, default=1600, help='embedding dimensionality (default: 1600)')
parser.add_argument('--train_subset', action='store_true')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train (default: 100)')
parser.add_argument('--num_workers', type=int, default=0, help='number of workers (default: 0)')
parser.add_argument('--radius', type=int, default=2, help='radius (default: 2)')
parser.add_argument('--log_dir', type=str, default='', help='tensorboard log directory')
parser.add_argument('--checkpoint_dir', type=str, default='', help='directory to save checkpoint')
parser.add_argument('--save_test_dir', type=str, default='', help='directory to save test submission file')
args = parser.parse_args()
print(args)
np.random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
random.seed(42)
device = (torch.device(('cuda:' + str(args.device))) if torch.cuda.is_available() else torch.device('cpu'))
dataset = PCQM4Mv2Dataset(root='dataset/', only_smiles=True)
fp_processed_file = preprocess_fp(dataset, args.radius)
data_dict = torch.load(fp_processed_file)
(X, Y) = (data_dict['X'], data_dict['Y'])
split_idx = dataset.get_idx_split()
evaluator = PCQM4Mv2Evaluator()
if args.train_subset:
print('train subset')
subset_ratio = 0.1
subset_idx = torch.randperm(len(split_idx['train']))[:int((subset_ratio * len(split_idx['train'])))]
train_dataset = TensorDataset(X[split_idx['train'][subset_idx]], Y[split_idx['train'][subset_idx]])
else:
train_dataset = TensorDataset(X[split_idx['train']], Y[split_idx['train']])
valid_dataset = TensorDataset(X[split_idx['valid']], Y[split_idx['valid']])
testdev_dataset = TensorDataset(X[split_idx['test-dev']], Y[split_idx['test-dev']])
testchallenge_dataset = TensorDataset(X[split_idx['test-challenge']], Y[split_idx['test-challenge']])
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valid_loader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
if (args.save_test_dir != ''):
testdev_loader = DataLoader(testdev_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
testchallenge_loader = DataLoader(testchallenge_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
if (args.checkpoint_dir != ''):
os.makedirs(args.checkpoint_dir, exist_ok=True)
model = MLP(num_mlp_layers=args.num_mlp_layers, emb_dim=args.emb_dim, drop_ratio=args.drop_ratio).to(device)
num_params = sum((p.numel() for p in model.parameters()))
print(f'#Params: {num_params}')
optimizer = optim.Adam(model.parameters(), lr=0.001)
if (args.log_dir != ''):
writer = SummaryWriter(log_dir=args.log_dir)
best_valid_mae = 1000
if args.train_subset:
scheduler = StepLR(optimizer, step_size=300, gamma=0.25)
args.epochs = 1000
else:
scheduler = StepLR(optimizer, step_size=30, gamma=0.25)
for epoch in range(1, (args.epochs + 1)):
print('=====Epoch {}'.format(epoch))
print('Training...')
train_mae = train(model, device, train_loader, optimizer)
print('Evaluating...')
valid_mae = eval(model, device, valid_loader, evaluator)
print({'Train': train_mae, 'Validation': valid_mae})
if (args.log_dir != ''):
writer.add_scalar('valid/mae', valid_mae, epoch)
writer.add_scalar('train/mae', train_mae, epoch)
if (valid_mae < best_valid_mae):
best_valid_mae = valid_mae
if (args.checkpoint_dir != ''):
print('Saving checkpoint...')
checkpoint = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'best_val_mae': best_valid_mae, 'num_params': num_params}
torch.save(checkpoint, osp.join(args.checkpoint_dir, 'checkpoint.pt'))
if (args.save_test_dir != ''):
testdev_pred = test(model, device, testdev_loader)
testdev_pred = testdev_pred.cpu().detach().numpy()
testchallenge_pred = test(model, device, testchallenge_loader)
testchallenge_pred = testchallenge_pred.cpu().detach().numpy()
print('Saving test submission file...')
evaluator.save_test_submission({'y_pred': testdev_pred}, args.save_test_dir, mode='test-dev')
evaluator.save_test_submission({'y_pred': testchallenge_pred}, args.save_test_dir, mode='test-challenge')
scheduler.step()
print(f'Best validation MAE so far: {best_valid_mae}')
if (args.log_dir != ''):
writer.close() |
class CustomModuleQuantizeHandler(QuantizeHandler):
def convert(self, quantizer, node, load_arg, debug=False):
assert (node.op == 'call_module')
observed_custom_module = quantizer.modules[node.target]
if (node.name in quantizer.activation_post_process_map):
observed_custom_module.activation_post_process = quantizer.activation_post_process_map[node.name]
quantized_custom_module_class = get_quantized_custom_module_class(observed_custom_module._FLOAT_MODULE)
quantized_custom_module = quantized_custom_module_class.from_observed(observed_custom_module)
(parent_name, name) = _parent_name(node.target)
setattr(quantizer.modules[parent_name], name, quantized_custom_module)
return quantizer.quantized_graph.node_copy(node, load_arg(quantized=None)) |
def save_config_file(ppo_config, env, file_path):
task_config = env._task.get_task_params()
for task_param in task_config:
if (not isinstance(task_config[task_param], str)):
task_config[task_param] = str(task_config[task_param])
env_config = env.get_world_params()
env.close()
configs_to_save = [task_config, env_config, ppo_config]
with open(file_path, 'w') as fout:
json.dump(configs_to_save, fout) |
def _kl_error_function(x: np.ndarray, range_min: float, range_max: float, n_bins: int=2048, n_bits: int=8) -> np.float32:
if (range_max <= range_min):
return np.inf
(bc, bv) = np.histogram(x, bins=n_bins)
if (not _is_range_valid(bv, range_min, range_max)):
return np.inf
q_bins = uniform_quantize_tensor(bv, range_min, range_max, n_bits)
(bcq, _) = np.histogram(q_bins, bins=bv, weights=np.concatenate([bc.flatten(), np.array([0])]))
return _kl_error_histogram(q_bins, bcq, bv, bc, range_min=range_min, range_max=range_max) |
class Evaluator():
def initialize(cls):
cls.ignore_index = 255
def classify_prediction(cls, pred_mask, batch):
gt_mask = batch.get('query_mask')
query_ignore_idx = batch.get('query_ignore_idx')
if (query_ignore_idx is not None):
assert (torch.logical_and(query_ignore_idx, gt_mask).sum() == 0)
query_ignore_idx *= cls.ignore_index
gt_mask = (gt_mask + query_ignore_idx)
pred_mask[(gt_mask == cls.ignore_index)] = cls.ignore_index
(area_inter, area_pred, area_gt) = ([], [], [])
for (_pred_mask, _gt_mask) in zip(pred_mask, gt_mask):
_inter = _pred_mask[(_pred_mask == _gt_mask)]
if (_inter.size(0) == 0):
_area_inter = torch.tensor([0, 0], device=_pred_mask.device)
else:
_area_inter = torch.histc(_inter, bins=2, min=0, max=1)
area_inter.append(_area_inter)
area_pred.append(torch.histc(_pred_mask, bins=2, min=0, max=1))
area_gt.append(torch.histc(_gt_mask, bins=2, min=0, max=1))
area_inter = torch.stack(area_inter).t()
area_pred = torch.stack(area_pred).t()
area_gt = torch.stack(area_gt).t()
area_union = ((area_pred + area_gt) - area_inter)
return (area_inter, area_union) |
def _verify_python3_env():
if PY2:
return
try:
import locale
fs_enc = codecs.lookup(locale.getpreferredencoding()).name
except Exception:
fs_enc = 'ascii'
if (fs_enc != 'ascii'):
return
extra = ''
if (os.name == 'posix'):
import subprocess
try:
rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
except OSError:
rv = b''
good_locales = set()
has_c_utf8 = False
if isinstance(rv, bytes):
rv = rv.decode('ascii', 'replace')
for line in rv.splitlines():
locale = line.strip()
if locale.lower().endswith(('.utf-8', '.utf8')):
good_locales.add(locale)
if (locale.lower() in ('c.utf8', 'c.utf-8')):
has_c_utf8 = True
extra += '\n\n'
if (not good_locales):
extra += 'Additional information: on this system no suitable UTF-8 locales were discovered. This most likely requires resolving by reconfiguring the locale system.'
elif has_c_utf8:
extra += 'This system supports the C.UTF-8 locale which is recommended. You might be able to resolve your issue by exporting the following environment variables:\n\n export LC_ALL=C.UTF-8\n export LANG=C.UTF-8'
else:
extra += 'This system lists a couple of UTF-8 supporting locales that you can pick from. The following suitable locales were discovered: {}'.format(', '.join(sorted(good_locales)))
bad_locale = None
for locale in (os.environ.get('LC_ALL'), os.environ.get('LANG')):
if (locale and locale.lower().endswith(('.utf-8', '.utf8'))):
bad_locale = locale
if (locale is not None):
break
if (bad_locale is not None):
extra += "\n\nClick discovered that you exported a UTF-8 locale but the locale system could not pick up from it because it does not exist. The exported locale is '{}' but it is not supported".format(bad_locale)
raise RuntimeError('Click will abort further execution because Python 3 was configured to use ASCII as encoding for the environment. Consult for mitigation steps.{}'.format(extra)) |
class RandomSampler(Sampler[int]):
data_source: Sized
replacement: bool
def __init__(self, data_source: Sized, replacement: bool=False, num_samples: Optional[int]=None, generator=None) -> None:
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
if (not isinstance(self.replacement, bool)):
raise TypeError('replacement should be a boolean value, but got replacement={}'.format(self.replacement))
if ((self._num_samples is not None) and (not replacement)):
raise ValueError('With replacement=False, num_samples should not be specified, since a random permute will be performed.')
if ((not isinstance(self.num_samples, int)) or (self.num_samples <= 0)):
raise ValueError('num_samples should be a positive integer value, but got num_samples={}'.format(self.num_samples))
def num_samples(self) -> int:
if (self._num_samples is None):
return len(self.data_source)
return self._num_samples
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
if (self.generator is None):
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = self.generator
if self.replacement:
for _ in range((self.num_samples // 32)):
(yield from torch.randint(high=n, size=(32,), dtype=torch.int64, generator=generator).tolist())
(yield from torch.randint(high=n, size=((self.num_samples % 32),), dtype=torch.int64, generator=generator).tolist())
else:
(yield from torch.randperm(n, generator=generator).tolist())
def __len__(self) -> int:
return self.num_samples |
def compare_spectra(actual, desired):
test_helper.assert_quantity_allclose(actual.frequency, desired.frequency)
test_helper.assert_quantity_allclose(actual.luminosity, desired.luminosity)
if getattr(actual, 'distance', None):
test_helper.assert_quantity_allclose(actual.distance, desired.distance) |
class WhisperProcessor(ProcessorMixin):
feature_extractor_class = 'WhisperFeatureExtractor'
tokenizer_class = 'WhisperTokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop('audio', None)
sampling_rate = kwargs.pop('sampling_rate', None)
text = kwargs.pop('text', None)
if (len(args) > 0):
audio = args[0]
args = args[1:]
if ((audio is None) and (text is None)):
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if (audio is not None):
inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if (text is not None):
encodings = self.tokenizer(text, **kwargs)
if (text is None):
return inputs
elif (audio is None):
return encodings
else:
inputs['labels'] = encodings['input_ids']
return inputs
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs) |
class Encoder(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(p)
self.hidden_size = hidden_size
self.num_layers = num_layers
self.embedding = nn.Embedding(input_size, embedding_size)
self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)
self.pdelu = PDELU()
def forward(self, x):
embedding = self.dropout(self.pdelu(self.embedding(x)))
(outputs, (hidden, cell)) = self.rnn(embedding)
return (hidden, cell) |
def plot_stuff(images, labels, filename):
plt.figure(figsize=(7, 7))
for (i, image) in enumerate(images):
ax = plt.subplot(3, 3, (i + 1))
plt.imshow(image.numpy().astype('int'))
plt.title(int(labels[i]))
plt.axis('off')
plt.savefig(filename) |
def non_sphere_rejection(partitionZ):
orientation = Quaternion([0.0, 0.0, 0.0, 0.0])
new_location = np.array([0.0, 0.0, 0.0])
while True:
orientation.random_orientation()
new_location[2] = np.random.uniform(A, (max_height - A))
acceptance_prob = (non_sphere_GB(new_location, orientation) / partitionZ)
if (acceptance_prob > 1):
raise InvalidProbability('Acceptance Probability is greater than 1')
if (np.random.uniform(0.0, 1.0) < acceptance_prob):
return [new_location, orientation] |
def read_sparse_matrix_hdf5(filename, output_format=None):
with pt.open_file(filename, mode='r') as fd:
out = read_sparse_matrix_from_hdf5(fd, fd.root, output_format)
return out |
def subsample_dataset(dataset, idxs, absolute=True):
mask = np.zeros(len(dataset)).astype('bool')
if (absolute == True):
mask[idxs] = True
else:
idxs = set(idxs)
mask = np.array([(i in idxs) for i in dataset.uq_idxs])
dataset.data = dataset.data[mask]
dataset.targets = np.array(dataset.targets)[mask].tolist()
dataset.uq_idxs = dataset.uq_idxs[mask]
return dataset |
class SplineCoupling(tf.keras.Model):
def __init__(self, dim_out, settings_dict, **kwargs):
super().__init__(**kwargs)
self.dim_out = dim_out
self.bins = settings_dict['bins']
self.default_domain = settings_dict['default_domain']
self.spline_params_counts = {'left_edge': 1, 'bottom_edge': 1, 'widths': self.bins, 'heights': self.bins, 'derivatives': (self.bins - 1)}
self.num_total_spline_params = (sum(self.spline_params_counts.values()) * self.dim_out)
self.net = DenseCouplingNet(settings_dict, self.num_total_spline_params)
def call(self, split1, split2, condition, inverse=False, **kwargs):
if (not inverse):
return self._forward(split1, split2, condition, **kwargs)
return self._inverse(split1, split2, condition, **kwargs)
def _forward(self, u1, u2, condition, **kwargs):
spline_params = self.net(u2, condition, **kwargs)
spline_params = self._semantic_spline_parameters(spline_params)
spline_params = self._constrain_parameters(spline_params)
(v, log_det_J) = self._calculate_spline(u1, spline_params, inverse=False)
return (v, log_det_J)
def _inverse(self, v1, v2, condition, **kwargs):
spline_params = self.net(v1, condition, **kwargs)
spline_params = self._semantic_spline_parameters(spline_params)
spline_params = self._constrain_parameters(spline_params)
u = self._calculate_spline(v2, spline_params, inverse=True)
return u
def _calculate_spline(self, target, spline_params, inverse=False):
(left_edge, bottom_edge, widths, heights, derivatives) = spline_params
result = tf.zeros_like(target)
log_jac = tf.zeros_like(target)
total_width = tf.reduce_sum(widths, axis=(- 1), keepdims=True)
total_height = tf.reduce_sum(heights, axis=(- 1), keepdims=True)
knots_x = tf.concat([left_edge, (left_edge + tf.math.cumsum(widths, axis=(- 1)))], axis=(- 1))
knots_y = tf.concat([bottom_edge, (bottom_edge + tf.math.cumsum(heights, axis=(- 1)))], axis=(- 1))
if (not inverse):
target_in_domain = tf.logical_and((knots_x[(..., 0)] < target), (target <= knots_x[(..., (- 1))]))
higher_indices = tf.searchsorted(knots_x, target[(..., None)])
else:
target_in_domain = tf.logical_and((knots_y[(..., 0)] < target), (target <= knots_y[(..., (- 1))]))
higher_indices = tf.searchsorted(knots_y, target[(..., None)])
target_in = target[target_in_domain]
target_in_idx = tf.where(target_in_domain)
target_out = target[(~ target_in_domain)]
target_out_idx = tf.where((~ target_in_domain))
if (tf.size(target_in_idx) > 0):
higher_indices = tf.gather_nd(higher_indices, target_in_idx)
higher_indices = tf.cast(higher_indices, tf.int32)
lower_indices = (higher_indices - 1)
lower_idx_tuples = tf.concat([tf.cast(target_in_idx, tf.int32), lower_indices], axis=(- 1))
higher_idx_tuples = tf.concat([tf.cast(target_in_idx, tf.int32), higher_indices], axis=(- 1))
dk = tf.gather_nd(derivatives, lower_idx_tuples)
dkp = tf.gather_nd(derivatives, higher_idx_tuples)
xk = tf.gather_nd(knots_x, lower_idx_tuples)
xkp = tf.gather_nd(knots_x, higher_idx_tuples)
yk = tf.gather_nd(knots_y, lower_idx_tuples)
ykp = tf.gather_nd(knots_y, higher_idx_tuples)
x = target_in
dx = (xkp - xk)
dy = (ykp - yk)
sk = (dy / dx)
xi = ((x - xk) / dx)
if (not inverse):
numerator = (dy * ((sk * (xi ** 2)) + ((dk * xi) * (1 - xi))))
denominator = (sk + ((((dkp + dk) - (2 * sk)) * xi) * (1 - xi)))
result_in = (yk + (numerator / denominator))
numerator = ((sk ** 2) * (((dkp * (xi ** 2)) + (((2 * sk) * xi) * (1 - xi))) + (dk * ((1 - xi) ** 2))))
denominator = ((sk + ((((dkp + dk) - (2 * sk)) * xi) * (1 - xi))) ** 2)
log_jac_in = (tf.math.log((numerator + 1e-10)) - tf.math.log((denominator + 1e-10)))
log_jac = tf.tensor_scatter_nd_update(log_jac, target_in_idx, log_jac_in)
else:
y = x
a = ((dy * (sk - dk)) + ((y - yk) * ((dkp + dk) - (2 * sk))))
b = ((dy * dk) - ((y - yk) * ((dkp + dk) - (2 * sk))))
c = ((- sk) * (y - yk))
discriminant = tf.maximum(((b ** 2) - ((4 * a) * c)), 0.0)
xi = ((2 * c) / ((- b) - tf.math.sqrt(discriminant)))
result_in = ((xi * dx) + xk)
result = tf.tensor_scatter_nd_update(result, target_in_idx, result_in)
if (tf.size(target_out_idx) > 1):
scale = (total_height / total_width)
shift = (bottom_edge - (scale * left_edge))
scale_out = tf.gather_nd(scale, target_out_idx)
shift_out = tf.gather_nd(shift, target_out_idx)
if (not inverse):
result_out = ((scale_out * target_out[(..., None)]) + shift_out)
log_jac_out = tf.math.log((scale_out + 1e-10))
log_jac_out = tf.squeeze(log_jac_out, axis=(- 1))
log_jac = tf.tensor_scatter_nd_update(log_jac, target_out_idx, log_jac_out)
else:
result_out = ((target_out[(..., None)] - shift_out) / scale_out)
result_out = tf.squeeze(result_out, axis=(- 1))
result = tf.tensor_scatter_nd_update(result, target_out_idx, result_out)
if (not inverse):
return (result, tf.reduce_sum(log_jac, axis=(- 1)))
return result
def _semantic_spline_parameters(self, parameters):
shape = tf.shape(parameters)
rank = len(shape)
if (rank == 2):
new_shape = (shape[0], self.dim_out, (- 1))
elif (rank == 3):
new_shape = (shape[0], shape[1], self.dim_out, (- 1))
else:
raise NotImplementedError('Spline flows can currently only operate on 2D and 3D inputs!')
parameters = tf.reshape(parameters, new_shape)
parameters = tf.split(parameters, list(self.spline_params_counts.values()), axis=(- 1))
return parameters
def _constrain_parameters(self, parameters):
(left_edge, bottom_edge, widths, heights, derivatives) = parameters
left_edge = (left_edge + self.default_domain[0])
bottom_edge = (bottom_edge + self.default_domain[2])
default_width = ((self.default_domain[1] - self.default_domain[0]) / self.bins)
default_height = ((self.default_domain[3] - self.default_domain[2]) / self.bins)
xshift = tf.math.log((tf.math.exp(default_width) - 1))
yshift = tf.math.log((tf.math.exp(default_height) - 1))
widths = tf.math.softplus((widths + xshift))
heights = tf.math.softplus((heights + yshift))
shift = tf.math.log((EULER_CONST - 1.0))
derivatives = tf.nn.softplus((derivatives + shift))
total_height = tf.reduce_sum(heights, axis=(- 1), keepdims=True)
total_width = tf.reduce_sum(widths, axis=(- 1), keepdims=True)
scale = (total_height / total_width)
derivatives = tf.concat([scale, derivatives, scale], axis=(- 1))
return (left_edge, bottom_edge, widths, heights, derivatives) |
def main(opts):
logger = logging.getLogger(__name__)
roidb = combined_roidb_for_training(cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
logger.info('{:d} roidb entries'.format(len(roidb)))
roi_data_loader = RoIDataLoader(roidb, num_loaders=opts.num_loaders, minibatch_queue_size=opts.minibatch_queue_size, blobs_queue_capacity=opts.blobs_queue_capacity)
blob_names = roi_data_loader.get_output_names()
net = core.Net('dequeue_net')
net.type = 'dag'
all_blobs = []
for gpu_id in range(cfg.NUM_GPUS):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(muji.OnGPU(gpu_id)):
for blob_name in blob_names:
blob = core.ScopedName(blob_name)
all_blobs.append(blob)
workspace.CreateBlob(blob)
logger.info('Creating blob: {}'.format(blob))
net.DequeueBlobs(roi_data_loader._blobs_queue_name, blob_names)
logger.info(('Protobuf:\n' + str(net.Proto())))
if opts.profiler:
import cProfile
cProfile.runctx('loader_loop(roi_data_loader)', globals(), locals(), sort='cumulative')
else:
loader_loop(roi_data_loader)
roi_data_loader.register_sigint_handler()
roi_data_loader.start(prefill=True)
total_time = 0
for i in range(opts.num_batches):
start_t = time.time()
for _ in range(opts.x_factor):
workspace.RunNetOnce(net)
total_time += ((time.time() - start_t) / opts.x_factor)
logger.info('{:d}/{:d}: Averge dequeue time: {:.3f}s [{:d}/{:d}]'.format((i + 1), opts.num_batches, (total_time / (i + 1)), roi_data_loader._minibatch_queue.qsize(), opts.minibatch_queue_size))
time.sleep(opts.sleep_time)
logger.info('Shutting down data loader (EnqueueBlob errors are ok)...')
roi_data_loader.shutdown() |
def predict_contacts(model, x, y, use_cuda):
b = len(x)
(x, order) = pack_sequences(x)
x = PackedSequence(Variable(x.data), x.batch_sizes)
z = model(x)
z = unpack_sequences(z, order)
logits = []
y_list = []
for i in range(b):
zi = z[i]
lp = model.predict(zi.unsqueeze(0)).view((- 1))
yi = y[i].view((- 1))
if use_cuda:
yi = yi.cuda()
mask = (yi < 0)
lp = lp[(~ mask)]
yi = yi[(~ mask)]
logits.append(lp)
y_list.append(yi)
return (logits, y_list) |
def tune_config(key, name, tfms_fixed={}, **kwargs):
import optuna
from optuna.integration import FastAIPruningCallback
from optuna.visualization import plot_optimization_history
import logging
import sys
optuna.logging.get_logger('optuna').addHandler(logging.StreamHandler(sys.stdout))
storage_name = 'sqlite:///{}.db'.format(name)
pruner = optuna.pruners.MedianPruner(n_startup_trials=5, n_warmup_steps=10)
study = optuna.create_study(study_name=name, storage=storage_name, direction='minimize', pruner=pruner, load_if_exists=True)
cc = cfg(key)
dls_train = dl_from_config(cc, bs=10240, frac=1.0)
trange = ContTransformerRange
tlog = ContTransformerLogRange
tlog2 = ContTransformerLog2Range
tnexp = ContTransformerNegExpRange
tclamp = ContTransformerClamp01Range
trafos = {'trange': trange, 'tlog': tlog, 'tlog2': tlog2, 'tnexp': tnexp, 'tclamp': tclamp}
def objective(trial):
tfms = copy(tfms_fixed)
for y in cc.y_names:
if (y not in tfms.keys()):
opt_tfms_y = trial.suggest_categorical(('opt_tfms_' + y), [True, False])
if opt_tfms_y:
tf = trial.suggest_categorical(('tfms_' + y), ['tlog', 'tnexp', 'tclamp'])
else:
tf = 'trange'
tfms.update({y: trafos.get(tf)})
for x in cc.cont_names:
if (x not in tfms.keys()):
opt_tfms_x = trial.suggest_categorical(('opt_tfms_' + x), [True, False])
if opt_tfms_x:
tf = trial.suggest_categorical(('tfms_' + x), ['tlog', 'tlog2', 'tnexp'])
else:
tf = 'trange'
tfms.update({x: trafos.get(tf)})
opt_deep_arch = trial.suggest_categorical('opt_deep_arch', [True, False])
if opt_deep_arch:
deep_u = trial.suggest_categorical('deep_u', [7, 8, 9, 10])
deep_n = trial.suggest_categorical('deep_n', [0, 1, 2, 3])
deep_s = trial.suggest_categorical('deep_s', ['square', 'cone'])
deep = get_arch(deep_u, deep_n, deep_s)
use_deeper = trial.suggest_categorical('use_deeper', [True, False])
if use_deeper:
deeper_u = trial.suggest_categorical('deeper_u', [7, 8, 9, 10])
deeper = get_arch(deeper_u, (deep_n + 2), deep_s)
else:
deeper = []
else:
deep = [1024, 512, 256]
deeper = []
lr = trial.suggest_float('lr', 1e-05, 0.001, log=True)
wide = trial.suggest_categorical('wide', [True, False])
mixup = trial.suggest_categorical('mixup', [True, False])
use_bn = trial.suggest_categorical('use_bn', [True, False])
dropout = trial.suggest_categorical('dropout', [0.0, 0.25, 0.5])
cbs = [FastAIPruningCallback(trial=trial, monitor='valid_loss')]
l = fit_config(key=key, dls_train=dls_train, tfms=tfms, lr=lr, deep=deep, deeper=deeper, wide=wide, mixup=mixup, use_bn=use_bn, dropout=dropout, log_wandb=False, cbs=cbs, **kwargs)
loss = l.recorder.final_record.items[1]
return loss
study.optimize(objective, n_trials=1000, timeout=86400)
return study |
def _assert_no_error(error, exception_class=None):
if (error == 0):
return
cf_error_string = Security.SecCopyErrorMessageString(error, None)
output = _cf_string_to_unicode(cf_error_string)
CoreFoundation.CFRelease(cf_error_string)
if ((output is None) or (output == u'')):
output = (u'OSStatus %s' % error)
if (exception_class is None):
exception_class = ssl.SSLError
raise exception_class(output) |
def main():
logger.info('Parsing Spec...')
spec = S.parse(toy_spec_str)
logger.info('Parsing succeeded')
logger.info('Building synthesizer...')
synthesizer = Synthesizer(enumerator=SmtEnumerator(spec, depth=3, loc=2), decider=ExampleConstraintDecider(spec=spec, interpreter=ToyInterpreter(), examples=[Example(input=[4, 3], output=3), Example(input=[6, 3], output=9), Example(input=[1, 2], output=(- 2)), Example(input=[1, 1], output=0)]))
logger.info('Synthesizing programs...')
prog = synthesizer.synthesize()
if (prog is not None):
logger.info('Solution found: {}'.format(prog))
else:
logger.info('Solution not found!') |
def match_computation(stats_baseline, key=['gnn', 'dim_inner'], mode='sqrt'):
stats = get_stats()
if (stats != stats_baseline):
while True:
if (mode == 'sqrt'):
scale = math.sqrt((stats_baseline / stats))
elif (mode == 'linear'):
scale = (stats_baseline / stats)
step = (int(round((cfg[key[0]][key[1]] * scale))) - cfg[key[0]][key[1]])
cfg[key[0]][key[1]] += step
stats = get_stats()
if (abs(step) <= 1):
break
flag_init = (1 if (stats < stats_baseline) else (- 1))
step = 1
while True:
cfg[key[0]][key[1]] += (flag_init * step)
stats = get_stats()
flag = (1 if (stats < stats_baseline) else (- 1))
if (stats == stats_baseline):
return stats
if (flag != flag_init):
if (not cfg.model.match_upper):
if (flag < 0):
cfg[key[0]][key[1]] -= (flag_init * step)
return get_stats()
else:
if (flag > 0):
cfg[key[0]][key[1]] -= (flag_init * step)
return get_stats()
return stats |
def eps(err, is_real):
e = RIF((- err), err)
if is_real:
return e
else:
return CIF(e, e) |
class GrailEntityDisambProblem():
def __init__(self, pid, query, mention, target_id, candidates):
self.pid = pid
self.qid = pid.split('-')[0]
self.query = query
self.mention = mention
self.target_id = target_id
self.candidates = candidates |
def get_model(args):
print('Loading model...')
if ('clip' in args.model):
if (args.model == 'clip32B'):
clip_variant = 'ViTB32'
elif (args.model == 'clip16B'):
clip_variant = 'ViTB16'
elif (args.model == 'clip336'):
clip_variant = 'ViTL14'
elif (args.model == 'clipres101'):
clip_variant = 'RN101'
else:
assert 0, print('Unsupported clip variant')
device = ('cuda' if torch.cuda.is_available() else 'cpu')
(model, transform) = clip.load(clip_variant, device=device)
tokenizer = None
elif (args.model == 'dpr'):
tokenizer = None
transform = None
model = SentenceTransformer('multi-qa-mpnet-base-dot-v1')
elif ('t0' in args.model):
transform = None
if (args.model == 't0pp'):
t0_variant = 'bigscience/T0pp'
elif (args.model == 't03b'):
t0_variant = 'bigscience/T0_3B'
else:
assert 0, print('Unsupported t0 variant.')
tokenizer = AutoTokenizer.from_pretrained(t0_variant, cache_dir=args.cache_dir)
tokenizer.padding_side = 'left'
model = AutoModelForSeq2SeqLM.from_pretrained(t0_variant, cache_dir=args.cache_dir)
elif ('gpt' in args.model):
transform = None
if (args.model in API_MODELS):
return (None, None, None)
if (args.model == 'gpt2.7'):
gpt_variant = 'EleutherAI/gpt-neo-2.7B'
elif (args.model == 'gpt1.3'):
gpt_variant = 'EleutherAI/gpt-neo-1.3B'
elif (args.model == 'gpt125m'):
gpt_variant = 'EleutherAI/gpt-neo-125M'
else:
assert 0, print('Unsupported gpt variant.')
tokenizer = AutoTokenizer.from_pretrained(gpt_variant, max_token_length=512, cache_dir=args.cache_dir)
model = AutoModelForCausalLM.from_pretrained(gpt_variant, pad_token_id=tokenizer.eos_token_id, cache_dir=args.cache_dir)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = 'left'
elif ('bert' in args.model):
tokenizer = AutoTokenizer.from_pretrained(args.model, cache_dir=args.cache_dir)
transform = None
model = None
else:
assert 0, print('Unsupported model.')
if args.use_gpu:
model = model.to(args.device)
return (model, transform, tokenizer) |
def hash_model(data):
cmd = np.hstack(data['cad_cmd'])
param = np.vstack(data['cad_param'])
ext = np.hstack(data['cad_ext'])
hash_str = ((((sha256(np.ascontiguousarray(ext).flatten()).hexdigest() + '_') + sha256(np.ascontiguousarray(cmd).flatten()).hexdigest()) + '_') + sha256(np.ascontiguousarray(param).flatten()).hexdigest())
uid = data['tmp_uid']
return (hash_str, uid) |
class normfactor_builder():
is_shared = True
def __init__(self, config):
self.builder_data = {}
self.config = config
self.required_parsets = {}
def collect(self, thismod, nom):
maskval = (True if thismod else False)
mask = ([maskval] * len(nom))
return {'mask': mask}
def append(self, key, channel, sample, thismod, defined_samp):
self.builder_data.setdefault(key, {}).setdefault(sample, {}).setdefault('data', {'mask': []})
nom = (defined_samp['data'] if defined_samp else ([0.0] * self.config.channel_nbins[channel]))
moddata = self.collect(thismod, nom)
self.builder_data[key][sample]['data']['mask'] += moddata['mask']
if thismod:
self.required_parsets.setdefault(thismod['name'], [required_parset(defined_samp['data'], thismod['data'])])
def finalize(self):
return self.builder_data |
def set_module(module):
def decorator(func):
if (module is not None):
func.__module__ = module
return func
return decorator |
class VGVQADataset(VQADataset):
def __init__(self, vis_processor, text_processor, vis_root, ann_paths):
super().__init__(vis_processor, text_processor, vis_root, ann_paths)
def __getitem__(self, index):
ann = self.annotation[index]
image_path = os.path.join(self.vis_root, ann['image'])
image = Image.open(image_path).convert('RGB')
image = self.vis_processor(image)
question = self.text_processor(ann['question'])
answers = [ann['answer']]
weights = [1.0]
return {'image': image, 'text_input': question, 'answers': answers, 'weights': weights} |
class Base(object):
def __init__(self, db, nnet, func, model=None):
super(Base, self).__init__()
self._db = db
self._nnet = nnet
self._func = func
if (model is not None):
self._nnet.load_pretrained_params(model)
self._nnet.cuda()
self._nnet.eval_mode()
def _inference(self, image, *args, **kwargs):
return self._func(self._db, self._nnet, image.copy(), *args, **kwargs)
def __call__(self, image, *args, **kwargs):
categories = self._db.configs['categories']
bboxes = self._inference(image, *args, **kwargs)
return {self._db.cls2name(j): bboxes[j] for j in range(1, (categories + 1))} |
.script
class Match(object):
def __init__(self, match_results: torch.Tensor):
if (len(match_results.shape) != 1):
raise ValueError('match_results should have rank 1')
if (match_results.dtype not in (torch.int32, torch.int64)):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self.match_results = match_results
def matched_column_indices(self):
return torch.nonzero((self.match_results > (- 1))).flatten().long()
def matched_column_indicator(self):
return (self.match_results >= 0)
def num_matched_columns(self):
return self.matched_column_indices().numel()
def unmatched_column_indices(self):
return torch.nonzero((self.match_results == (- 1))).flatten().long()
def unmatched_column_indicator(self):
return (self.match_results == (- 1))
def num_unmatched_columns(self):
return self.unmatched_column_indices().numel()
def ignored_column_indices(self):
return torch.nonzero(self.ignored_column_indicator()).flatten().long()
def ignored_column_indicator(self):
return (self.match_results == (- 2))
def num_ignored_columns(self):
return self.ignored_column_indices().numel()
def unmatched_or_ignored_column_indices(self):
return torch.nonzero((0 > self.match_results)).flatten().long()
def matched_row_indices(self):
return torch.gather(self.match_results, 0, self.matched_column_indices()).flatten().long()
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
if isinstance(ignored_value, torch.Tensor):
input_tensor = torch.cat([ignored_value, unmatched_value, input_tensor], dim=0)
else:
input_tensor = torch.cat([torch.tensor([ignored_value, unmatched_value], dtype=input_tensor.dtype, device=input_tensor.device), input_tensor], dim=0)
gather_indices = torch.clamp((self.match_results + 2), min=0)
gathered_tensor = torch.index_select(input_tensor, 0, gather_indices)
return gathered_tensor |
def transform_svhn(augment=False, from_tensor=False, normalize=True):
if (not augment):
aug = []
else:
aug = [transforms.RandomCrop(32, padding=4)]
print('Dataset with basic SVHN augmentation')
if from_tensor:
cast = []
else:
cast = [transforms.ToTensor()]
if normalize:
normal_fn = [transforms.Normalize(mean=MEANS['svhn'], std=STDS['svhn'])]
else:
normal_fn = []
train_transform = transforms.Compose(((cast + aug) + normal_fn))
test_transform = transforms.Compose((cast + normal_fn))
return (train_transform, test_transform) |
def encode_label(x):
x_copy = x.copy(deep=True)
unique = sorted(list(set([str(item) for item in x_copy.astype(str).unique()])))
kv = {unique[i]: i for i in range(len(unique))}
x_copy = x_copy.map((lambda x: kv[str(x)]))
return x_copy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.