code stringlengths 101 5.91M |
|---|
def get_idx_list_from_words_test():
idx_list = vector_initializer.get_idx_list_from_words('[PAD]')
print(idx_list)
idx_list = vector_initializer.get_idx_list_from_words(['i', 'love', 'you'])
print(idx_list) |
def rnn_relu_cell(input, hidden, w_ih, w_hh, b_ih, b_hh):
igates = (torch.mm(input, w_ih.t()) + b_ih)
hgates = (torch.mm(hidden, w_hh.t()) + b_hh)
return torch.relu((igates + hgates)) |
def inference_cot(args, question_pool, qes_limit, given_prompt):
correct = 0
qes_count = 0
wrong_list = []
QA_record = []
for (qes_num, qes) in enumerate(question_pool):
if ((qes_limit is not None) and (qes_count == qes_limit)):
break
all_self_consistency_ans = []
if ((args.dataset == 'last_letters') and (args.use_code_style_prompt == True)):
prompt = (((given_prompt + 'Q: ') + qes['question']) + "\nA: Let's think step by step in Python.")
elif (args.basic_cot is True):
prompt = (((given_prompt + 'Q: ') + qes['question']) + '\nA:')
else:
prompt = (((given_prompt + 'Q: ') + qes['question']) + "\nA: Let's think step by step.")
if (args.model == 'gpt-3.5-turbo'):
message_list = [{'role': 'user', 'content': prompt}]
else:
prompt_list = [prompt]
for path in range(0, args.multipath):
if (args.model == 'gpt-3.5-turbo'):
responses = chatgpt_request(model=args.model, message_list=message_list, max_tokens=args.max_length_cot, temperature=args.temperature, sleep=args.api_time_interval)
else:
responses = GPT3_request(model=args.model, input_prompt=prompt_list, max_tokens=args.max_length_cot, time_interval=args.api_time_interval, temperature=args.temperature, stop='\n')
QA = {}
QA['qes_idx'] = qes['question_idx']
QA['Q'] = qes['question']
if (args.model == 'gpt-3.5-turbo'):
QA['A'] = responses['choices'][0]['message']['content']
else:
QA['A'] = responses['choices'][0]['text']
QA_record.append(QA)
pred_ans = answer_extraction(args, responses)
if (args.multipath == 1):
print(('-' * 20))
print(f'Question number: {qes_num}')
print(f"Dataset index: {qes['question_idx']}")
print((f'Q: ' + qes['question']))
if ((args.dataset == 'last_letters') and (args.use_code_style_prompt is True)):
print((f"A: Let's think step by step in Python." + QA['A']))
elif (args.basic_cot is True):
print(f"A: {QA['A']}")
else:
print((f"A: Let's think step by step." + QA['A']))
print(f'pred_ans: {pred_ans}')
print(f"GT: {qes['answer']}")
all_self_consistency_ans.append(pred_ans)
final_consistent_ans = find_most_frequent(all_self_consistency_ans, args.multipath)[(- 1)]
if (final_consistent_ans == qes['answer']):
correct += 1
else:
wrong_list.append({'idx': qes['question_idx'], 'pred_ans': final_consistent_ans, 'GT': qes['answer']})
qes_count += 1
return (correct, wrong_list, QA_record) |
class PDELU_VGG(nn.Module):
def __init__(self, vgg_name):
super(PDELU_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), PDELU()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
_properties
class Enumerator():
debug = Property(desc='Debug mode', default=False, dtype=bool)
def __init__(self, sdfg: SDFG, graph: SDFGState, subgraph: SubgraphView=None, condition_function: Callable=None):
self._sdfg = sdfg
self._graph = graph
self._subgraph = subgraph
self._scope_children = graph.scope_children()
self._condition_function = condition_function
self._map_entries = helpers.get_outermost_scope_maps(sdfg, graph, subgraph)
self._max_length = len(self._map_entries)
def iterator(self):
raise NotImplementedError
def list(self):
return list((e for e in self.iterator()))
def __iter__(self):
(yield from self.iterator())
def calculate_topology(self, subgraph):
sdfg = self._sdfg
graph = self._graph
self._adjacency_list = {m: set() for m in self._map_entries}
exit_nodes = {graph.exit_node(me): me for me in self._map_entries}
if subgraph:
proximity_in = set((ie.src for me in self._map_entries for ie in graph.in_edges(me)))
proximity_out = set((ie.dst for me in exit_nodes for ie in graph.out_edges(me)))
extended_subgraph = SubgraphView(graph, set(itertools.chain(subgraph.nodes(), proximity_in, proximity_out)))
for node in (extended_subgraph.nodes() if subgraph else graph.nodes()):
if isinstance(node, nodes.AccessNode):
adjacent_entries = set()
for e in graph.in_edges(node):
if (isinstance(e.src, nodes.MapExit) and (e.src in exit_nodes)):
adjacent_entries.add(exit_nodes[e.src])
for e in graph.out_edges(node):
if (isinstance(e.dst, nodes.MapEntry) and (e.dst in self._map_entries)):
adjacent_entries.add(e.dst)
for entry in adjacent_entries:
for other_entry in adjacent_entries:
if (entry != other_entry):
self._adjacency_list[entry].add(other_entry)
self._adjacency_list[other_entry].add(entry)
children_dict = defaultdict(set)
parent_dict = defaultdict(set)
for map_entry in self._map_entries:
map_exit = graph.exit_node(map_entry)
for e in graph.out_edges(map_exit):
if isinstance(e.dst, nodes.AccessNode):
for oe in graph.out_edges(e.dst):
if (oe.dst in self._map_entries):
other_entry = oe.dst
children_dict[map_entry].add(other_entry)
parent_dict[other_entry].add(map_entry)
self._source_maps = [me for me in self._map_entries if (len(parent_dict[me]) == 0)]
self._labels = {}
current_id = 0
while (current_id < len(self._map_entries)):
candidates = list((me for (me, s) in parent_dict.items() if ((len(s) == 0) and (me not in self._labels))))
candidates.sort(key=(lambda me: self._graph.node_id(me)))
for c in candidates:
self._labels[c] = current_id
current_id += 1
for c_child in children_dict[c]:
parent_dict[c_child].remove(c) |
def _colliding_remote_schema(testdir):
testdir.makefile('.json', bar='{"bar": {"properties": {"a": {"$ref": "b.json#/a"}, "b": {"$ref": "b.json#/ab"}}, "type": "object", "required": ["a", "b"]}}')
testdir.makefile('.json', b='{"a": {"$ref": "bc.json#/d"}, "ab": {"$ref": "c.json#/d"}}')
testdir.makefile('.json', bc='{"d": {"type": "integer"}}')
testdir.makefile('.json', c='{"d": {"type": "string"}}') |
def logical_xor_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes):
return ([None] * (len(grad_inputs) + len(inputs))) |
class ScaleEqualizationMidActivation(BaseScaleEqualization):
def __init__(self, quant_config: QuantizationConfig, fw_info: FrameworkInfo):
super().__init__(quant_config=quant_config, fw_info=fw_info, matcher_instance=MATCHER_MID, kernel_str=KERNEL, bias_str=BIAS) |
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=16, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.no_spatial = no_spatial
if (not no_spatial):
self.SpatialGate = SpatialGate()
def forward(self, x):
x_out = self.ChannelGate(x)
if (not self.no_spatial):
x_out = self.SpatialGate(x_out)
return x_out |
def meta_testing(test_dataset, model, epochs=10, episodes=1000, ways=5, shots=5, query_num=15):
module_info = utils.get_info_str('protonet', test_dataset, model, (str(ways) + 'ways'), (str(shots) + 'shots'))
(loss_list, acc_list) = ([], [])
model.eval()
for epoch in range(epochs):
(test_loss, test_acc) = (0.0, 0.0)
for _ in range(episodes):
task = test_dataset.sample_task_set(ways=ways, shots=shots, query_num=query_num)
task.transfer_backend('tensor')
(support_embeddings, query_embeddings) = (model(task.support_data), model(task.query_data))
prototypes = get_prototypes(support_embeddings, task.support_labels, ways, shots)
(loss, acc) = _get_prediction(prototypes, query_embeddings, task.query_labels)
test_loss += loss.numpy()[0]
test_acc += acc
loss_list.append((test_loss / episodes))
acc_list.append((test_acc / episodes))
print('Test Epoch', epoch, [module_info], 'Loss', (test_loss / episodes), '\t', 'Accuracy', (test_acc / episodes))
print('Test finished', [module_info])
print('Test Loss', np.mean(loss_list), '\tTest Accuracy', np.mean(acc_list), '\tStd', np.std(acc_list)) |
class Clip(core.Clip):
def __init__(self, clip_id, data_home, dataset_name, index, metadata):
super().__init__(clip_id, data_home, dataset_name, index, metadata)
self.audio_path = self.get_path('audio')
def audio(self) -> Optional[Tuple[(np.ndarray, float)]]:
return load_audio(self.audio_path)
def split(self):
return self._clip_metadata.get('split')
def tags(self):
scene_label = self._clip_metadata.get('scene_label')
if (scene_label is None):
return None
else:
return annotations.Tags([scene_label], 'open', np.array([1.0]))
def city(self):
return self._clip_metadata.get('city')
def identifier(self):
return self._clip_metadata.get('identifier')
def to_jams(self):
return jams_utils.jams_converter(audio_path=self.audio_path, tags=self.tags, metadata=self._clip_metadata) |
def get_fused_cname(fused_cname, orig_cname):
assert (fused_cname and orig_cname)
return StringEncoding.EncodedString(('%s%s%s' % (Naming.fused_func_prefix, fused_cname, orig_cname))) |
def exact_set_match(gold: str, pred: str) -> float:
(gold_set, pred_set) = extract_gold_pred_sets(gold, pred)
return float((gold_set == pred_set)) |
class Critic(nn.Module):
def __init__(self, body: nn.Module, output_dim: int, use_layer_init: bool=True):
super().__init__()
self.body = body
if use_layer_init:
self.fc = layer_init(nn.Linear(self.body.feature_dim, output_dim), w_scale=0.1)
else:
self.fc = nn.Linear(self.body.feature_dim, output_dim)
def forward(self, *x):
return self.fc(self.body(torch.cat(x, (- 1)))) |
class TestAll(TestCasePlus):
([MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM])
def test_seq2seq_dataset_truncation(self, tok_name):
tokenizer = AutoTokenizer.from_pretrained(tok_name)
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
max_len_source = max((len(tokenizer.encode(a)) for a in ARTICLES))
max_len_target = max((len(tokenizer.encode(a)) for a in SUMMARIES))
max_src_len = 4
max_tgt_len = 8
assert (max_len_target > max_src_len)
assert (max_len_source > max_src_len)
(src_lang, tgt_lang) = ('ro_RO', 'de_DE')
train_dataset = Seq2SeqDataset(tokenizer, data_dir=tmp_dir, type_path='train', max_source_length=max_src_len, max_target_length=max_tgt_len, src_lang=src_lang, tgt_lang=tgt_lang)
dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(batch, dict)
assert (batch['attention_mask'].shape == batch['input_ids'].shape)
assert (batch['input_ids'].shape[1] == max_src_len)
assert (batch['labels'].shape[1] == max_tgt_len)
if (tok_name != MBART_TINY):
continue
batch['decoder_input_ids'] = shift_tokens_right(batch['labels'], tokenizer.pad_token_id)
assert (batch['decoder_input_ids'][(0, 0)].item() == tokenizer.lang_code_to_id[tgt_lang])
assert (batch['decoder_input_ids'][(0, (- 1))].item() == tokenizer.eos_token_id)
assert (batch['input_ids'][(0, (- 2))].item() == tokenizer.eos_token_id)
assert (batch['input_ids'][(0, (- 1))].item() == tokenizer.lang_code_to_id[src_lang])
break
([BART_TINY, BERT_BASE_CASED])
def test_legacy_dataset_truncation(self, tok):
tokenizer = AutoTokenizer.from_pretrained(tok)
tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
max_len_source = max((len(tokenizer.encode(a)) for a in ARTICLES))
max_len_target = max((len(tokenizer.encode(a)) for a in SUMMARIES))
trunc_target = 4
train_dataset = LegacySeq2SeqDataset(tokenizer, data_dir=tmp_dir, type_path='train', max_source_length=20, max_target_length=trunc_target)
dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert (batch['attention_mask'].shape == batch['input_ids'].shape)
assert (batch['input_ids'].shape[1] == max_len_source)
assert (20 >= batch['input_ids'].shape[1])
assert (batch['labels'].shape[1] == trunc_target)
assert (max_len_target > trunc_target)
break
def test_pack_dataset(self):
tokenizer = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25')
tmp_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
orig_examples = tmp_dir.joinpath('train.source').open().readlines()
save_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(tokenizer, tmp_dir, 128, save_dir)
orig_paths = {x.name for x in tmp_dir.iterdir()}
new_paths = {x.name for x in save_dir.iterdir()}
packed_examples = save_dir.joinpath('train.source').open().readlines()
assert (len(packed_examples) < len(orig_examples))
assert (len(packed_examples) == 1)
assert (len(packed_examples[0]) == sum((len(x) for x in orig_examples)))
assert (orig_paths == new_paths)
.skipif((not FAIRSEQ_AVAILABLE), reason='This test requires fairseq')
def test_dynamic_batch_size(self):
if (not FAIRSEQ_AVAILABLE):
return
(ds, max_tokens, tokenizer) = self._get_dataset(max_len=64)
required_batch_size_multiple = 64
batch_sampler = ds.make_dynamic_sampler(max_tokens, required_batch_size_multiple=required_batch_size_multiple)
batch_sizes = [len(x) for x in batch_sampler]
assert (len(set(batch_sizes)) > 1)
assert (sum(batch_sizes) == len(ds))
data_loader = DataLoader(ds, batch_sampler=batch_sampler, collate_fn=ds.collate_fn, num_workers=2)
failures = []
num_src_per_batch = []
for batch in data_loader:
src_shape = batch['input_ids'].shape
bs = src_shape[0]
assert (((bs % required_batch_size_multiple) == 0) or (bs < required_batch_size_multiple))
num_src_tokens = np.product(batch['input_ids'].shape)
num_src_per_batch.append(num_src_tokens)
if (num_src_tokens > (max_tokens * 1.1)):
failures.append(num_src_tokens)
assert (num_src_per_batch[0] == max(num_src_per_batch))
if failures:
raise AssertionError(f'too many tokens in {len(failures)} batches')
def test_sortish_sampler_reduces_padding(self):
(ds, _, tokenizer) = self._get_dataset(max_len=512)
bs = 2
sortish_sampler = ds.make_sortish_sampler(bs, shuffle=False)
naive_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2)
sortish_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2, sampler=sortish_sampler)
pad = tokenizer.pad_token_id
def count_pad_tokens(data_loader, k='input_ids'):
return [batch[k].eq(pad).sum().item() for batch in data_loader]
assert (sum(count_pad_tokens(sortish_dl, k='labels')) < sum(count_pad_tokens(naive_dl, k='labels')))
assert (sum(count_pad_tokens(sortish_dl)) < sum(count_pad_tokens(naive_dl)))
assert (len(sortish_dl) == len(naive_dl))
def _get_dataset(self, n_obs=1000, max_len=128):
if os.getenv('USE_REAL_DATA', False):
data_dir = 'examples/seq2seq/wmt_en_ro'
max_tokens = ((max_len * 2) * 64)
if (not Path(data_dir).joinpath('train.len').exists()):
save_len_file(MARIAN_TINY, data_dir)
else:
data_dir = 'examples/seq2seq/test_data/wmt_en_ro'
max_tokens = (max_len * 4)
save_len_file(MARIAN_TINY, data_dir)
tokenizer = AutoTokenizer.from_pretrained(MARIAN_TINY)
ds = Seq2SeqDataset(tokenizer, data_dir=data_dir, type_path='train', max_source_length=max_len, max_target_length=max_len, n_obs=n_obs)
return (ds, max_tokens, tokenizer)
def test_distributed_sortish_sampler_splits_indices_between_procs(self):
(ds, max_tokens, tokenizer) = self._get_dataset()
ids1 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=0, add_extra_examples=False))
ids2 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=1, add_extra_examples=False))
assert (ids1.intersection(ids2) == set())
([MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM])
def test_dataset_kwargs(self, tok_name):
tokenizer = AutoTokenizer.from_pretrained(tok_name, use_fast=False)
if (tok_name == MBART_TINY):
train_dataset = Seq2SeqDataset(tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path='train', max_source_length=4, max_target_length=8, src_lang='EN', tgt_lang='FR')
kwargs = train_dataset.dataset_kwargs
assert (('src_lang' in kwargs) and ('tgt_lang' in kwargs))
else:
train_dataset = Seq2SeqDataset(tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path='train', max_source_length=4, max_target_length=8)
kwargs = train_dataset.dataset_kwargs
assert (('add_prefix_space' not in kwargs) if (tok_name != BART_TINY) else ('add_prefix_space' in kwargs))
assert ((len(kwargs) == 1) if (tok_name == BART_TINY) else (len(kwargs) == 0)) |
class TestDBLDetector():
def setup(self):
pass
def test_dbl(self, log_counter_df):
counter_df = log_counter_df
ts_df = counter_df[[constants.LOG_COUNTS]]
ts_df.index = counter_df[constants.LOG_TIMESTAMPS]
counter_df['attribute'] = counter_df.drop([constants.LOG_COUNTS, constants.LOG_TIMESTAMPS], axis=1).apply((lambda x: '-'.join(x.astype(str))), axis=1)
attr_list = counter_df['attribute'].unique()
res = pd.Series()
for attr in attr_list:
temp_df = counter_df[(counter_df['attribute'] == attr)][[constants.LOG_TIMESTAMPS, constants.LOG_COUNTS]]
if (temp_df.shape[0] < constants.MIN_TS_LENGTH):
anom_score = np.repeat(0.0, temp_df.shape[0])
res = res.append(pd.Series(anom_score, index=temp_df.index))
else:
params = DBLDetectorParams(wind_sz='1min')
model = DBLDetector(params)
model.fit(temp_df)
anom_score = model.predict(temp_df)['anom_score']
res = res.append(anom_score)
assert (res.index == counter_df.index).all(), 'Res.index should be identical to counter_df.index'
assert (len(res) == len(counter_df.index)), 'length of res should be equal to length of counter_df' |
def process_class_names(instance):
try:
words = instance
words = words[:10]
clss = ''
start = words.index('class')
end = words.index('(')
clss = words[(start + 1)]
for i in range((start + 2), end):
clss = ((clss + ' ') + words[i])
original_clss = clss
clss = strip_punctuation(clss)
clss = ' '.join(clss.split())
words = clss.split(' ')
clss = ''
for word in words:
if word[0].isupper():
clss = ((clss + ' ') + word)
else:
clss = (clss + word)
clss = clss.strip()
instance = ' '.join(instance).replace(original_clss, clss, 1).split(' ')
except:
pass
return instance |
class OverlapPatchEmbed(nn.Module):
def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768, norm_cfg=dict(type='BN', requires_grad=True)):
super().__init__()
patch_size = (patch_size, patch_size)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride, padding=((patch_size[0] // 2), (patch_size[1] // 2)))
self.norm = build_norm_layer(norm_cfg, embed_dim)[1]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
(_, _, H, W) = x.shape
x = self.norm(x)
return (x, H, W) |
def applyWord2VecMostSimilar(modelname='../data/skip_nostop_single_100features_10minwords_5context', word='#donaldtrump', top=10):
model = word2vec.Word2Vec.load(modelname)
print('Find ', top, ' terms most similar to ', word, '...')
for res in model.most_similar(word, topn=top):
print(res)
print('Finding terms containing ', word, '...')
for v in model.vocab:
if (word in v):
print(v) |
.parametrize('observation_shape', [(4, 84, 84), (100,)])
.parametrize('action_size', [2])
.parametrize('batch_size', [32])
.parametrize('encoder_factory', [DefaultEncoderFactory()])
def test_create_deterministic_policy(observation_shape: Sequence[int], action_size: int, batch_size: int, encoder_factory: EncoderFactory) -> None:
policy = create_deterministic_policy(observation_shape, action_size, encoder_factory, device='cpu:0')
assert isinstance(policy, DeterministicPolicy)
x = torch.rand((batch_size, *observation_shape))
y = policy(x)
assert (y.mu.shape == (batch_size, action_size)) |
def prepare_data(data_path):
train_path = os.path.join(data_path, 'sst.train.sentences.txt')
if (not tf.gfile.Exists(train_path)):
url = '
files = ['stsa.binary.phrases.train', 'stsa.binary.dev', 'stsa.binary.test']
for fn in files:
tx.data.maybe_download((url + fn), data_path, extract=True)
(fn_train, _) = transform_raw_sst(data_path, 'stsa.binary.phrases.train', 'sst2.train')
transform_raw_sst(data_path, 'stsa.binary.dev', 'sst2.dev')
transform_raw_sst(data_path, 'stsa.binary.test', 'sst2.test')
vocab = tx.data.make_vocab(fn_train)
fn_vocab = os.path.join(data_path, 'sst2.vocab')
with open(fn_vocab, 'w', encoding='utf-8') as f_vocab:
for v in vocab:
f_vocab.write((v + '\n'))
tf.logging.info('Preprocessing done: {}'.format(data_path)) |
class Partition23(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/T5Block[6]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wo]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[7]/T5LayerFF[2]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerSelfAttention[0]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[q]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/T5Attention[EncDecAttention]/Linear[o]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerCrossAttention[1]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerFF[2]/T5LayerNorm[layer_norm]', 'T5ForConditionalGeneration/T5Stack[decoder]/T5Block[8]/T5LayerFF[2]/T5DenseReluDense[DenseReluDense]/Linear[wi]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:23'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1, 1, 1, 1, 1]
self.lookup = {'l_0': 'decoder.6.2.dropout', 'l_1': 'decoder.7.0.layer_norm', 'l_2': 'decoder.7.0.SelfAttention.q', 'l_3': 'decoder.7.0.SelfAttention.k', 'l_4': 'decoder.7.0.SelfAttention.v', 'l_5': 'decoder.7.0.SelfAttention.dropout', 'l_6': 'decoder.7.0.SelfAttention.o', 'l_7': 'decoder.7.0.dropout', 'l_8': 'decoder.7.1.layer_norm', 'l_9': 'decoder.7.1.EncDecAttention.q', 'l_10': 'decoder.7.1.EncDecAttention.v', 'l_11': 'decoder.7.1.EncDecAttention.dropout', 'l_12': 'decoder.7.1.EncDecAttention.o', 'l_13': 'decoder.7.1.dropout', 'l_14': 'decoder.7.2.layer_norm', 'l_15': 'decoder.7.2.DenseReluDense.wi', 'l_16': 'decoder.7.2.DenseReluDense.dropout', 'l_17': 'decoder.7.2.DenseReluDense.wo', 'l_18': 'decoder.7.2.dropout', 'l_19': 'decoder.8.0.layer_norm', 'l_20': 'decoder.8.0.SelfAttention.q', 'l_21': 'decoder.8.0.SelfAttention.k', 'l_22': 'decoder.8.0.SelfAttention.v', 'l_23': 'decoder.8.0.SelfAttention.dropout', 'l_24': 'decoder.8.0.SelfAttention.o', 'l_25': 'decoder.8.0.dropout', 'l_26': 'decoder.8.1.layer_norm', 'l_27': 'decoder.8.1.EncDecAttention.q', 'l_28': 'decoder.8.1.EncDecAttention.k', 'l_29': 'decoder.8.1.EncDecAttention.v', 'l_30': 'decoder.8.1.EncDecAttention.dropout', 'l_31': 'decoder.8.1.EncDecAttention.o', 'l_32': 'decoder.8.1.dropout', 'l_33': 'decoder.8.2.layer_norm', 'l_34': 'decoder.8.2.DenseReluDense.wi'}
self.to(self.device)
def forward(self, *args):
(x0, x1, x2, x3, x4, x5) = unflatten(args, self.input_structure)
t_0 = self.l_10(x0)
t_1 = self.l_28(x0)
t_2 = self.l_29(x0)
t_3 = self.l_0(x5)
t_3 = (x4 + t_3)
t_4 = self.l_1(t_3)
t_5 = t_4.size()
t_6 = self.l_2(t_4)
t_7 = self.l_3(t_4)
t_4 = self.l_4(t_4)
t_5 = t_5[0]
t_6 = t_6.view(t_5, (- 1), 32, 128)
t_6 = t_6.transpose(1, 2)
t_7 = t_7.view(t_5, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_4 = t_4.view(t_5, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_7 = t_7.transpose(3, 2)
t_7 = torch.matmul(t_6, t_7)
t_7 += x2
t_6 = t_7.float()
t_6 = torch.nn.functional.softmax(t_6, dim=(- 1), _stacklevel=3, dtype=None)
t_7 = t_6.type_as(t_7)
t_7 = self.l_5(t_7)
t_4 = torch.matmul(t_7, t_4)
t_4 = t_4.transpose(1, 2)
t_4 = t_4.contiguous()
t_5 = t_4.view(t_5, (- 1), 4096)
t_5 = self.l_6(t_5)
t_5 = self.l_7(t_5)
t_5 = (t_3 + t_5)
t_3 = self.l_8(t_5)
t_4 = t_3.size()
t_3 = self.l_9(t_3)
t_4 = t_4[0]
t_3 = t_3.view(t_4, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_7 = x1.view(t_4, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_0 = t_0.view(t_4, (- 1), 32, 128)
t_0 = t_0.transpose(1, 2)
t_7 = t_7.transpose(3, 2)
t_7 = torch.matmul(t_3, t_7)
t_7 += x3
t_3 = t_7.float()
t_3 = torch.nn.functional.softmax(t_3, dim=(- 1), _stacklevel=3, dtype=None)
t_7 = t_3.type_as(t_7)
t_7 = self.l_11(t_7)
t_0 = torch.matmul(t_7, t_0)
t_0 = t_0.transpose(1, 2)
t_0 = t_0.contiguous()
t_4 = t_0.view(t_4, (- 1), 4096)
t_4 = self.l_12(t_4)
t_4 = self.l_13(t_4)
t_4 = (t_5 + t_4)
t_5 = self.l_14(t_4)
t_5 = self.l_15(t_5)
t_5 = torch.nn.functional.relu(t_5, inplace=False)
t_5 = self.l_16(t_5)
t_5 = self.l_17(t_5)
t_5 = self.l_18(t_5)
t_5 = (t_4 + t_5)
t_4 = self.l_19(t_5)
t_0 = t_4.size()
t_7 = self.l_20(t_4)
t_3 = self.l_21(t_4)
t_4 = self.l_22(t_4)
t_0 = t_0[0]
t_7 = t_7.view(t_0, (- 1), 32, 128)
t_7 = t_7.transpose(1, 2)
t_3 = t_3.view(t_0, (- 1), 32, 128)
t_3 = t_3.transpose(1, 2)
t_4 = t_4.view(t_0, (- 1), 32, 128)
t_4 = t_4.transpose(1, 2)
t_3 = t_3.transpose(3, 2)
t_3 = torch.matmul(t_7, t_3)
t_3 += x2
t_7 = t_3.float()
t_7 = torch.nn.functional.softmax(t_7, dim=(- 1), _stacklevel=3, dtype=None)
t_3 = t_7.type_as(t_3)
t_3 = self.l_23(t_3)
t_4 = torch.matmul(t_3, t_4)
t_4 = t_4.transpose(1, 2)
t_4 = t_4.contiguous()
t_0 = t_4.view(t_0, (- 1), 4096)
t_0 = self.l_24(t_0)
t_0 = self.l_25(t_0)
t_0 = (t_5 + t_0)
t_5 = self.l_26(t_0)
t_4 = t_5.size()
t_5 = self.l_27(t_5)
t_4 = t_4[0]
t_5 = t_5.view(t_4, (- 1), 32, 128)
t_5 = t_5.transpose(1, 2)
t_1 = t_1.view(t_4, (- 1), 32, 128)
t_1 = t_1.transpose(1, 2)
t_2 = t_2.view(t_4, (- 1), 32, 128)
t_2 = t_2.transpose(1, 2)
t_1 = t_1.transpose(3, 2)
t_1 = torch.matmul(t_5, t_1)
t_1 += x3
t_5 = t_1.float()
t_5 = torch.nn.functional.softmax(t_5, dim=(- 1), _stacklevel=3, dtype=None)
t_1 = t_5.type_as(t_1)
t_1 = self.l_30(t_1)
t_2 = torch.matmul(t_1, t_2)
t_2 = t_2.transpose(1, 2)
t_2 = t_2.contiguous()
t_4 = t_2.view(t_4, (- 1), 4096)
t_4 = self.l_31(t_4)
t_4 = self.l_32(t_4)
t_4 = (t_0 + t_4)
t_0 = self.l_33(t_4)
t_0 = self.l_34(t_0)
return list(flatten((x0, x2, x3, t_4, t_0)))
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, *args, **kwargs):
return load_state_dict(self, *args, **kwargs)
def named_parameters(self, *args, **kwargs):
return named_parameters(self, *args, **kwargs)
def named_buffers(self, *args, **kwargs):
return named_buffers(self, *args, **kwargs)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument('bliss_filename')
arg_parser.add_argument('--subset_segment_file')
arg_parser.add_argument('--output_type', default='', help='e.g. segment_name')
arg_parser.add_argument('--merge_swb_ab', action='store_true')
arg_parser.add_argument('--sort_by_time', action='store_true')
arg_parser.add_argument('--merge_segs_up_to_time', type=float)
args = arg_parser.parse_args()
subset_segment_list = None
if args.subset_segment_file:
subset_segment_list = set(open(args.subset_segment_file).read().splitlines())
rec_filenames = set()
items_by_rec = {}
for bliss_item in iter_bliss(args.bliss_filename):
if (subset_segment_list and (bliss_item.segment_name not in subset_segment_list)):
continue
rec_name = bliss_item.recording_filename
assert rec_name, ('invalid item %r' % bliss_item)
if args.merge_swb_ab:
rec_name = os.path.basename(rec_name)
(rec_name, _) = os.path.splitext(rec_name)
rec_filenames.add(rec_name)
assert (rec_name[(- 1)] in 'AB')
rec_name = rec_name[:(- 1)]
else:
rec_filenames.add(rec_name)
items_by_rec.setdefault(rec_name, []).append(bliss_item)
assert items_by_rec
if args.merge_swb_ab:
if subset_segment_list:
for key in list(items_by_rec.keys()):
if (((key + 'A') not in rec_filenames) or ((key + 'B') not in rec_filenames)):
del items_by_rec[key]
assert items_by_rec, ('rec_filenames %r' % (rec_filenames,))
else:
for key in items_by_rec.keys():
assert ((key + 'A') in rec_filenames)
assert ((key + 'B') in rec_filenames)
for (key, ls) in items_by_rec.items():
assert isinstance(ls, list)
if args.sort_by_time:
ls.sort(key=(lambda item: item.start_time))
if args.merge_segs_up_to_time:
for (key, ls) in items_by_rec.items():
i = 0
while (i < len(ls)):
j = (i + 1)
dt = ls[i].delta_time
while (j < len(ls)):
if ((dt + ls[j].delta_time) > args.merge_segs_up_to_time):
break
dt += ls[j].delta_time
j += 1
if (j > (i + 1)):
ls[i:j] = [BlissItem(segment_name=';'.join([item.segment_name for item in ls[i:j]]), recording_filename=ls[i].recording_filename, start_time=0.0, end_time=dt, orth=' '.join([item.orth for item in ls[i:j]]))]
i += 1
output_types = args.output_type.split(',')
for (key, ls) in items_by_rec.items():
assert isinstance(ls, list)
for item in ls:
assert isinstance(item, BlissItem)
if (not output_types):
print(item)
else:
print(' '.join([str(getattr(item, key)) for key in output_types])) |
def video_processing(ref, dist):
video = {}
video_type = ['ref', 'dist']
for i_type in video_type:
if (i_type == 'ref'):
video_name = ref
else:
video_name = dist
video_name_dis = video_name
video_capture = cv2.VideoCapture()
video_capture.open(video_name)
cap = cv2.VideoCapture(video_name)
video_channel = 3
video_height_crop = 448
video_width_crop = 448
video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
video_frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS)))
video_length_read = int((video_length / video_frame_rate))
transformations = transforms.Compose([transforms.Resize(520), transforms.CenterCrop(448), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transformed_video = torch.zeros([video_length_read, video_channel, video_height_crop, video_width_crop])
video_read_index = 0
frame_idx = 0
for i in range(video_length):
(has_frames, frame) = video_capture.read()
if has_frames:
if ((video_read_index < video_length_read) and ((frame_idx % video_frame_rate) == 0)):
read_frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
read_frame = transformations(read_frame)
transformed_video[video_read_index] = read_frame
video_read_index += 1
frame_idx += 1
if (video_read_index < video_length_read):
for i in range(video_read_index, video_length_read):
transformed_video[i] = transformed_video[(video_read_index - 1)]
video_capture.release()
video[i_type] = transformed_video
return (video['ref'], video['dist'], video_name_dis) |
.parametrize('sparse_feature_num,dense_feature_num', [(2, 0), (0, 2)])
def test_WDL(sparse_feature_num, dense_feature_num):
if (version.parse(tf.__version__) >= version.parse('2.0.0')):
return
model_name = 'WDL'
sample_size = SAMPLE_SIZE
(x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=dense_feature_num, hash_flag=True)
model = WDL(feature_columns, feature_columns, dnn_hidden_units=[4, 4], dnn_dropout=0.5)
check_model(model, model_name, x, y) |
def run_pool(poolsize, chunksize):
client = utils.init_client(MONGO_ARGS)
id_collection = client[DB_NAME][READ_COL]
query = utils.prepare_query(filters)
document_ids = id_collection.find(query).distinct('_id')
logger.info(f'Obtained ID list for {len(document_ids)} articles.')
if (DOC_LIMIT > 0):
document_ids = document_ids[:DOC_LIMIT]
logger.info(f'Processing {len(document_ids)} articles...')
pool = Pool(processes=poolsize)
pool.map(process_chunks, chunker(document_ids, chunksize=chunksize))
pool.close() |
def BlanusaSecondSnarkGraph():
c0 = ((- 1), 0)
c1 = ((- 1), 1)
g = Graph({c0: [(0, 0), (1, 4), c1], c1: [(0, 3), (1, 1)], (0, 2): [(0, 5)], (0, 6): [(0, 4)], (0, 7): [(0, 1)], (1, 7): [(1, 2)], (1, 0): [(1, 6)], (1, 3): [(1, 5)]}, name='Blanusa Second Snark Graph')
g.add_cycle([(0, i) for i in range(5)])
g.add_cycle([(1, i) for i in range(5)])
g.add_cycle([(0, 5), (0, 6), (0, 7), (1, 5), (1, 6), (1, 7)])
g._circle_embedding([(0, ((2 * i) % 5)) for i in range(5)], center=((- 1.5), 0), shift=0.5)
g._circle_embedding([(1, ((2 * i) % 5)) for i in range(5)], center=(1.5, 0))
g._circle_embedding(([(0, i) for i in range(5, 8)] + ([c0] * 4)), center=((- 1.2), 0), shift=2.5, radius=2.2)
g._circle_embedding(([(1, i) for i in range(5, 8)] + ([c0] * 4)), center=(1.2, 0), shift=(- 1), radius=2.2)
g._circle_embedding([c0, c1], shift=0.5)
g.relabel()
return g |
def get_all_models(dirname):
dirs = [d for d in os.listdir(dirname) if ('evaluate.json' in os.listdir(os.path.join(dirname, d)))]
if (len(dirs) == 0):
return None
return [os.path.join(dirname, d) for d in dirs] |
_metric
def ppl_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=0.0001, space='w', sampling='end', crop=True, batch_size=2)
return dict(ppl_wend=ppl) |
def print(*args, **kwargs):
__builtin__.print(*args, **kwargs)
with open(out_path, 'a') as fp:
__builtin__.print(*args, file=fp, **kwargs) |
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if (lmbd_int == 0):
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range((lmbd_int + 1)):
coef_i = (scipy.special.binom(lmbd_int, i) * (q ** i))
(s1, s2) = (0, 0)
for j in range((i + 1)):
coef_j = (scipy.special.binom(i, j) * ((- 1) ** (i - j)))
s1 += (coef_j * np.exp((((j * j) - j) / (2.0 * (sigma ** 2)))))
s2 += (coef_j * np.exp((((j * j) + j) / (2.0 * (sigma ** 2)))))
a_lambda_first_term_exact += (coef_i * s1)
a_lambda_second_term_exact += (coef_i * s2)
a_lambda_exact = (((1.0 - q) * a_lambda_first_term_exact) + (q * a_lambda_second_term_exact))
if verbose:
print('A: by binomial expansion {} = {} + {}'.format(a_lambda_exact, ((1.0 - q) * a_lambda_first_term_exact), (q * a_lambda_second_term_exact)))
return _to_np_float64(a_lambda_exact) |
class BoolQ():
def all_speedups_boolq():
(seq_gpipe_dict, seq_gpipe_times) = Hack.get_boolq_seq_hack_gpipe_times_and_dict()
seq_stale_fn = 'results/FOR_PAPER/all_results_new_t5_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42.txt'
seq_exp_stale_fn = os.path.join('results/t5/super_glue/boolq', 'new_t5_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream_t5_tfds_stale_bs_20_se_10_seed_42.json')
(seq_stale_dict, seq_stale_times) = get_fixed_dict_and_times_single(exp_fn=seq_exp_stale_fn, checkpoints_eval_fn=seq_stale_fn)
exp_results_dir = 'results/t5/super_glue/boolq/'
exp_stale_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42.json')
exp_gpipe_fn = os.path.join(exp_results_dir, 'new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42.json')
gpipe_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_gpipe_bs_20_se_10_seed_42.txt'
stale_fn = 'results/all_results_new_args_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42_layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_t5_tfds_stale_bs_20_se_5_seed_42.txt'
(virtual_gpipe_dict, virtual_times_gpipe) = get_fixed_dict_and_times_single(exp_fn=exp_gpipe_fn, checkpoints_eval_fn=gpipe_fn)
(virtual_stale_dict, virtual_times_stale) = get_fixed_dict_and_times_single(exp_fn=exp_stale_fn, checkpoints_eval_fn=stale_fn)
compute_all_speedups(seq_gpipe_dict, seq_gpipe_times, seq_stale_dict, seq_stale_times, virtual_gpipe_dict, virtual_stale_dict, virtual_times_gpipe, virtual_times_stale) |
def pooling_with_mask(rep_tensor, rep_mask, method='max', scope=None):
with tf.name_scope((scope or ('%s_pooling' % method))):
if (method == 'max'):
rep_tensor_masked = exp_mask_for_high_rank(rep_tensor, rep_mask)
output = tf.reduce_max(rep_tensor_masked, (- 2))
elif (method == 'mean'):
rep_tensor_masked = mask_for_high_rank(rep_tensor, rep_mask)
rep_sum = tf.reduce_sum(rep_tensor_masked, (- 2))
denominator = tf.reduce_sum(tf.cast(rep_mask, tf.int32), (- 1), True)
denominator = tf.where(tf.equal(denominator, tf.zeros_like(denominator, tf.int32)), tf.ones_like(denominator, tf.int32), denominator)
output = (rep_sum / tf.cast(denominator, tf.float32))
else:
raise AttributeError(('No Pooling method name as %s' % method))
return output |
class InvariantModule(tf.keras.Model):
def __init__(self, settings, **kwargs):
super().__init__(**kwargs)
self.s1 = Sequential([Dense(**settings['dense_s1_args']) for _ in range(settings['num_dense_s1'])])
self.s2 = Sequential([Dense(**settings['dense_s2_args']) for _ in range(settings['num_dense_s2'])])
if (settings['pooling_fun'] == 'mean'):
pooling_fun = partial(tf.reduce_mean, axis=(- 2))
elif (settings['pooling_fun'] == 'max'):
pooling_fun = partial(tf.reduce_max, axis=(- 2))
elif callable(settings['pooling_fun']):
pooling_fun = settings['pooling_fun']
else:
raise ConfigurationError('pooling_fun argument not understood!')
self.pooler = pooling_fun
def call(self, x, **kwargs):
x_reduced = self.pooler(self.s1(x, **kwargs))
out = self.s2(x_reduced, **kwargs)
return out |
class AlphaDropout(_DropoutNd):
def forward(self, input: Tensor) -> Tensor:
return cF.complex_fcaller(F.alpha_dropout, input, self.p, self.training) |
def _get_head_stage(arch, head_name, blocks):
if (head_name not in arch):
head_name = 'head'
head_stage = arch.get(head_name)
ret = mbuilder.get_blocks(arch, stage_indices=head_stage, block_indices=blocks)
return ret['stages'] |
def _wrap(fn, kwargs, error_queue):
_prctl_pr_set_pdeathsig(signal.SIGINT)
try:
fn(**kwargs)
except KeyboardInterrupt:
pass
except EarlyStopping:
sys.exit(signal.SIGUSR1)
except Exception:
import traceback
error_queue.put(traceback.format_exc())
sys.exit(1) |
.parametrize('seed', [313])
.parametrize('axis', [0, 3])
.parametrize('decay_rate', [0.9])
.parametrize('eps', [1e-05])
.parametrize('nonlinearity', ['relu'])
.parametrize('output_stat', [False])
.parametrize('add', [True, False])
.parametrize('ctx, func_name', ctxs)
.parametrize('no_scale, no_bias', [[False, False], [True, True]])
.parametrize('no_mean', [True, False])
.parametrize('no_variance', [True, False])
def test_fused_batch_normalization_forward_backward(seed, axis, decay_rate, eps, nonlinearity, output_stat, add, ctx, func_name, no_scale, no_bias, no_mean, no_variance):
import platform
if ((platform.system() == 'Windows') and (len(ctx.backend) > 1)):
pytest.skip('Currently not worked with CUDA/cuDNN on Windows platform.')
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = list(create_inputs(rng, axis, add))
axes = [axis]
batch_stat = True
inputs = mask_inputs(inputs, no_scale, no_bias, no_mean, no_variance)
insert_identity = [True, True, True, False, False, False]
function_tester(rng, F.fused_batch_normalization, ref_fused_batch_normalization, inputs, ref_grad=ref_grad_fused_batch_normalization, func_args=[axes, decay_rate, eps, batch_stat, nonlinearity, output_stat], backward=[True, True, True, False, False, add], ctx=ctx, func_name=func_name, dstep=0.01, atol_b=0.01, insert_identity=insert_identity)
if (no_mean and no_variance):
return
vinputs = []
for i in inputs:
if (i is None):
vinputs.append(None)
continue
vinputs.append(nn.Variable.from_numpy_array(i, need_grad=True))
for i in range(5):
inputs[0] = rng.randn(*inputs[0].shape)
vinputs[0].d[...] = inputs[0]
ref_y = ref_fused_batch_normalization(*(inputs + [axes, decay_rate, eps, batch_stat, nonlinearity, output_stat]))
with nn.context_scope(ctx), nn.auto_forward():
y = F.fused_batch_normalization(*(vinputs + [axes, decay_rate, eps, batch_stat, nonlinearity, output_stat]))
assert_allclose(vinputs[3].d, inputs[3])
assert_allclose(vinputs[4].d, inputs[4], atol=0.001)
batch_stat = False
if output_stat:
return
ref_y = ref_fused_batch_normalization(*(inputs + [axes, decay_rate, eps, batch_stat, nonlinearity, output_stat]))
with nn.context_scope(ctx), nn.auto_forward():
y = F.fused_batch_normalization(*(vinputs + [axes, decay_rate, eps, batch_stat, nonlinearity, output_stat]))
assert_allclose(ref_y, y.d, atol=1e-06) |
def get_rowspace_projection(W: np.ndarray) -> np.ndarray:
if np.allclose(W, 0):
w_basis = np.zeros_like(W.T)
else:
w_basis = scipy.linalg.orth(W.T)
w_basis = (w_basis * np.sign(w_basis[0][0]))
P_W = w_basis.dot(w_basis.T)
return P_W |
def isic_time(u, v, model, **kwargs):
w = (u[0].indices[0].spacing * model.irho)
ics = kwargs.get('icsign', 1)
return (w * sum(((((uu * vv.dt2) * model.m) + (ics * inner_grad(uu, vv))) for (uu, vv) in zip(u, v)))) |
class submission_writer(object):
def __init__(self, job_name, out_dir, memory, asr_pth, skp_pth, emo_pth, lang_pth):
self.job_name = job_name
self.out_dir = out_dir
self.memory = memory
self.tasks = {'ASR': asr_pth, 'spk_id': skp_pth, 'EMO': emo_pth, 'LANG': lang_pth}
def write(self, sbatch_file_name, cmd):
out_dir = './downstream_submissions/'
if (not os.path.exists(out_dir)):
os.mkdir(out_dir)
write_slurm_submission_file(os.path.join(out_dir, sbatch_file_name), self.job_name, self.out_dir, self.memory, cmd)
def cmd_maker(self, pase_cfg, latest_ckpt, data_root, res_pth):
cmds = []
for (name, run_file) in self.tasks.items():
cmd = 'python {} {} {} {} {}\n'.format(run_file, pase_cfg, latest_ckpt, data_root, (res_pth + name))
cmds.append(cmd)
return cmds
def __call__(self, sbatch_file_name, pase_cfg, latest_ckpt, data_root, res_pth):
cmd = self.cmd_maker(pase_cfg, latest_ckpt, data_root, res_pth)
self.write(sbatch_file_name, cmd) |
def get_wilds_ood_test_loader(dataset, data_dir, data_fraction=1.0, model_seed=0):
config = get_default_config(dataset, data_fraction=data_fraction)
dataset_kwargs = ({'fold': POVERTY_FOLDS[model_seed]} if (dataset == 'poverty') else {})
full_dataset = get_dataset(dataset=dataset, root_dir=data_dir, **dataset_kwargs)
train_grouper = CombinatorialGrouper(dataset=full_dataset, groupby_fields=config.groupby_fields)
if (dataset == 'fmow'):
config.batch_size = (config.batch_size // 2)
test_transform = initialize_transform(transform_name=config.eval_transform, config=config, dataset=full_dataset)
test_data = full_dataset.get_subset('test', frac=config.frac, transform=test_transform)
test_loader = get_eval_loader(loader=config.eval_loader, dataset=test_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs)
test_loader = ProperDataLoader(test_loader)
return test_loader |
def get_dataloader(rank, world_size):
train_df = dataset_utils.create_df(config.train_dir)
valid_df = dataset_utils.create_df(config.valid_dir)
pseudo_df = 'pseudo_df.csv'
train_df = train_df.append(pseudo_df)
flood_label_paths = train_df['flood_label_path'].values.tolist()
train_has_masks = list(map(dataset_utils.has_mask, flood_label_paths))
train_df['has_mask'] = train_has_masks
remove_indices = dataset_utils.filter_df(train_df)
train_df = train_df.drop(train_df.index[remove_indices])
transform = A.Compose([A.HorizontalFlip(p=0.5), A.Rotate(270), A.ElasticTransform(p=0.4, alpha=120, sigma=(120 * 0.05), alpha_affine=(120 * 0.03))])
train_dataset = ETCIDataset(train_df, split='train', transform=transform)
validation_dataset = ETCIDataset(valid_df, split='validation', transform=None)
stratified_sampler = sampler_utils.BalanceClassSampler(train_df['has_mask'].values.astype('int'))
train_sampler = sampler_utils.DistributedSamplerWrapper(stratified_sampler, rank=rank, num_replicas=world_size, shuffle=True)
val_sampler = DistributedSampler(validation_dataset, rank=rank, num_replicas=world_size, shuffle=False)
train_loader = DataLoader(train_dataset, batch_size=config.local_batch_size, sampler=train_sampler, pin_memory=True, num_workers=8, worker_init_fn=worker_utils.seed_worker)
val_loader = DataLoader(validation_dataset, batch_size=config.local_batch_size, sampler=val_sampler, pin_memory=True, num_workers=8)
return (train_loader, val_loader) |
class AudioEncoder(nn.Module):
def __init__(self, num_output_length, if_tanh=False):
super(AudioEncoder, self).__init__()
self.if_tanh = if_tanh
self.block1 = BasicBlock(1, 16, kernel_size=3, stride=1)
self.block2 = BasicBlock(16, 32, kernel_size=3, stride=2)
self.block3 = BasicBlock(32, 64, kernel_size=3, stride=1)
self.block4 = BasicBlock(64, 128, kernel_size=3, stride=1)
self.block5 = BasicBlock(128, 256, kernel_size=3, stride=2)
self.fc1 = nn.Sequential(nn.Linear(6912, 512), nn.BatchNorm1d(512), nn.ReLU(inplace=True))
self.fc2 = nn.Linear(512, num_output_length)
def forward(self, inputs):
out = self.block1(inputs)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = out.contiguous().view(out.shape[0], (- 1))
out = self.fc1(out)
out = self.fc2(out)
if self.if_tanh:
out = F.tanh(out)
return out |
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1, batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil((float(n_samples) / rbm.batch_size)))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), (- 21.0), decimal=0)
assert_array_equal(X, Xdigits) |
def optimize_inference_for_dag(net, input_blobs, namescope=''):
netproto = copy.deepcopy(net.Proto())
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
def is_activation_blob(b):
return ((b not in external_input) and (b not in external_output))
activation_blobs = set()
seen_as_output = set()
ops = list(net.Proto().op)
op_indices = [index for (index, op) in enumerate(net.Proto().op)]
for op in ops:
for b in op.input:
if is_activation_blob(b):
activation_blobs.add(b)
if (b not in seen_as_output):
raise AssertionError('{} not in external input'.format(b))
for b in op.output:
if is_activation_blob(b):
activation_blobs.add(b)
seen_as_output = seen_as_output.union(set(op.output))
assert (not op.is_gradient_op), 'You can only pass inference-only nets to optimize_inference_for_dag'
start_time = time.time()
optim_str = C.memonger_compute_blob_recycling_for_dag(netproto.SerializeToString(), [str(s).encode('utf-8') for s in input_blobs], op_indices, set((str(s).encode('utf-8') for s in activation_blobs)), namescope.encode('utf-8'), set(), {})
log.info('Memonger memory optimization took {} secs'.format((time.time() - start_time)))
optim = caffe2_pb2.NetDef()
optim.ParseFromString(optim_str)
assert verify_graph_equality(net.Proto(), optim), 'Memonger graph is not equal to original.'
assert verify_inplace_blobs(net.Proto(), optim), 'Inplace assignments differ in memonger net.'
return optim |
def process_triple(j, AVRO_FILE, triple, start_time, len_local_entity_mentions_map, job_object: PipelineJob):
if (triple['confidence_score'] < 0.3):
return None
if ('PRP$' in [w['pos'] for w in triple['dropped_words_subject']]):
return None
if ('no' in [v for (k, v) in triple['quantities'].items()]):
return None
sentence = [w['word'] for w in triple['sentence_linked']['tokens']]
if (((j % job_object.opts.process_avro__log_every) == 0) and (j > 0)):
job_object.log('{} {} triples processed {} entities collected {} sec'.format(AVRO_FILE, j, len_local_entity_mentions_map, (time.time() - start_time)))
subject_word = [(w['word'] if ('QUANT' not in w['word']) else (triple['quantities'][w['word'][6:]] if (w['word'][6:] in triple['quantities']) else w['word'])) for w in sorted((triple['subject'] + triple['dropped_words_subject']), key=(lambda x: x['index']))]
subject_word_lc = [w.lower() for w in subject_word]
relation_word = [(w['word'] if ('QUANT' not in w['word']) else (triple['quantities'][w['word'][6:]] if (w['word'][6:] in triple['quantities']) else w['word'])) for w in sorted((triple['relation'] + triple['dropped_words_relation']), key=(lambda x: x['index']))]
relation_word_lc = [w.lower() for w in relation_word]
object_word = [(w['word'] if ('QUANT' not in w['word']) else (triple['quantities'][w['word'][6:]] if (w['word'][6:] in triple['quantities']) else w['word'])) for w in sorted((triple['object'] + triple['dropped_words_object']), key=(lambda x: x['index']))]
object_word_lc = [w.lower() for w in object_word]
if (relation_word == ['is:impl_appos-clause']):
return None
if (len([w['word'] for w in triple['subject']]) == 0):
if job_object.opts.verbose:
print('len([w["word"] for w in triple["subject"]]) == 0', '\n\n')
return None
if (len([w['word'] for w in triple['object']]) == 0):
if job_object.opts.verbose:
print('len([w["word"] for w in triple["object"]]) == 0', '\n\n')
return None
if ([w['pos'] for w in triple['subject']][(- 1)] in ['RB', 'WDT']):
return None
if (([w['pos'] for w in triple['subject']][(- 1)] in ['DT', 'PRP', 'PRP$']) and (triple['subject'][(- 1)]['word'] not in ['I'])):
if job_object.opts.verbose:
print("[w['pos'] for w in triple['subject']][-1] in ['DT', 'PRP', 'PRP$']")
if job_object.opts.verbose:
print('\n\n', sentence, '\n\n', triple['quantities'], '\n\n', subject_word, relation_word, object_word, prettyformat_dict_string(triple), '\n\n')
return None
if ([w['pos'] for w in triple['object']][(- 1)] in ['RB', 'WDT']):
return None
if (([w['pos'] for w in triple['object']][(- 1)] in ['DT', 'PRP', 'PRP$']) and (triple['object'][(- 1)]['word'] not in ['I'])):
if job_object.opts.verbose:
print("w['pos'] for w in triple['object']][-1] in ['DT', 'PRP', 'PRP$']")
if job_object.opts.verbose:
print('\n\n', sentence, '\n\n', triple['quantities'], '\n\n', subject_word, relation_word, object_word, prettyformat_dict_string(triple), '\n\n')
return None
if ((len(subject_word) == 0) or (len(object_word) == 0)):
if job_object.opts.verbose:
print('len(_bject_word) == 0', '\n\n')
return None
if ((len(subject_word) > job_object.opts.process_avro__len_subject_word) or (len(object_word) > job_object.opts.process_avro__len_object_word)):
if job_object.opts.verbose:
print('len(_bject_word) > 10 ', '\n\n')
return None
return (subject_word, relation_word, object_word, subject_word_lc, relation_word_lc, object_word_lc) |
def sort_auto_mapping(fname, overwrite: bool=False):
with open(fname, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
new_lines = []
line_idx = 0
while (line_idx < len(lines)):
if (_re_intro_mapping.search(lines[line_idx]) is not None):
indent = (len(re.search('^(\\s*)\\S', lines[line_idx]).groups()[0]) + 8)
while (not lines[line_idx].startswith(((' ' * indent) + '('))):
new_lines.append(lines[line_idx])
line_idx += 1
blocks = []
while (lines[line_idx].strip() != ']'):
if (lines[line_idx].strip() == '('):
start_idx = line_idx
while (not lines[line_idx].startswith(((' ' * indent) + ')'))):
line_idx += 1
blocks.append('\n'.join(lines[start_idx:(line_idx + 1)]))
else:
blocks.append(lines[line_idx])
line_idx += 1
blocks = sorted(blocks, key=(lambda x: _re_identifier.search(x).groups()[0]))
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(fname, 'w', encoding='utf-8') as f:
f.write('\n'.join(new_lines))
elif ('\n'.join(new_lines) != content):
return True |
class TestTransform():
def __init__(self, size):
self.transform = Compose([Resize(size), Normalize(), ToTensor()])
def __call__(self, image):
(image, _, _) = self.transform(image)
return image |
_utils.test()
def test_offset_for_vector():
a = ti.field(dtype=ti.i32, shape=16, offset=(- 48))
b = ti.field(dtype=ti.i32, shape=16, offset=None)
offset = 16
shape = 16
c = ti.Vector.field(n=1, dtype=ti.i32, shape=shape, offset=offset)
def test():
for i in c:
c[i][0] = (2 * i)
test()
for i in range(offset, (offset + shape), 1):
assert (c[i][0] == (2 * i)) |
class _ModuleNode(_PathNode):
__slots__ = ['source_file']
def __init__(self, source_file: str):
self.source_file = source_file |
def eval_model_val(checkpoint, logger, att_feats, train_data, val_data, classes):
logger.info('building model...')
states = torch.load(checkpoint)
net = CVAE(x_dim=states['x_dim'], s_dim=states['s_dim'], z_dim=states['z_dim'], enc_layers=states['enc_layers'], dec_layers=states['dec_layers'])
dis = Discriminator(x_dim=states['x_dim'], s_dim=states['s_dim'], layers=states['dis_layers'])
reg = Regressor(x_dim=states['x_dim'], s_dim=states['s_dim'], layers=states['reg_layers'])
net.cuda()
dis.cuda()
reg.cuda()
logger.info(f'loading model from checkpoint: {checkpoint}')
net.load_state_dict(states['gen'])
dis.load_state_dict(states['dis'])
reg.load_state_dict(states['reg'])
logger.info('generating synthetic samples...')
net.eval()
samples = generate_samples(net, args.num_samples, att_feats[classes['val']], classes['val'])
new_train_data = (train_data + samples)
(X, Y) = zip(*new_train_data)
X = np.array(X)
Y = np.array(Y)
if (args.classifier == 'svc'):
clf = LinearSVC(C=args.C)
logger.info('training linear SVC...')
else:
clf = KNeighborsClassifier(n_neighbors=args.K)
logger.info('training kNN classifier')
clf.fit(X=X, y=Y)
(test_X, test_Y) = zip(*val_data)
logger.info('predicting...')
pred_Y = clf.predict(test_X)
macc_u = cal_macc(truth=test_Y, pred=pred_Y)
logger.info(f'gzsl macc_u: {macc_u:4.5}')
return macc_u |
def get_evaluation_chunk_logits_data_key(evaluation_chunk_id):
return 'evaluation_chunks/{}_logits_data.bytes'.format(evaluation_chunk_id) |
class FairseqEncoderDecoderModel(BaseFairseqModel):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
assert isinstance(self.encoder, FairseqEncoder)
assert isinstance(self.decoder, FairseqDecoder)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return decoder_out
def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
features = self.decoder.extract_features(prev_output_tokens, encoder_out=encoder_out, **kwargs)
return features
def output_layer(self, features, **kwargs):
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
return (self.encoder.max_positions(), self.decoder.max_positions())
def max_decoder_positions(self):
return self.decoder.max_positions() |
def _init_weight_alt(m, n=''):
if isinstance(m, nn.Conv2d):
fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt((2.0 / fan_out)))
if (m.bias is not None):
if ('class_net.predict' in n):
m.bias.data.fill_((- math.log(((1 - 0.01) / 0.01))))
else:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_() |
def gene_data(aug_dials_file, ori_dial_file, aug_type):
data = []
with open(aug_dials_file) as f:
aug_dials = json.load(f)
with open(ori_dial_file) as f:
dials = json.load(f)
for dial_dict in tqdm(dials):
for (ti, turn) in enumerate(dial_dict['dialogue']):
data_point = convert_data(dial_dict, ti, turn['transcript'], turn['belief_state'])
data.append((data_point + '\n'))
key = (dial_dict['dialogue_idx'] + str(ti))
if ((key in aug_dials) and aug_dials[key]['success']):
data_point = convert_data(dial_dict, ti, aug_dials[key]['new_utter'], aug_dials[key]['belief_state'])
data.append((data_point + '\n'))
new_data_file = open((('resources/train.' + aug_type) + '_aug_history_belief'), 'w')
for line in data:
new_data_file.write(line)
new_data_file.close() |
class TableEncoder(json.JSONEncoder):
def tablToJson(self, o):
rd = o.records()
if (len(rd) > 0):
if isinstance(rd[0], (str, Lib)):
return rd
try:
name = (x.name for x in dc.fields(rd[0]) if (x.repr == True))
out = {n: [getattr(r, n) for r in rd] for n in name}
if (('weightType' in out) and isinstance(out['weightType'][0], WeightType)):
out['weightType'] = out['weightType'][0]
out['length'] = len(rd)
return out
except:
raise TypeError(f'Table: {o}')
elif o.type:
name = (x.name for x in dc.fields(o.type) if (x.repr == True))
return {n: [] for n in name}
return []
def default(self, o):
try:
return super().default(o)
except TypeError:
if isinstance(o, Table):
return self.tablToJson(o)
elif isinstance(o, Enum):
return o.value
raise TypeError(f'type:{type(o)}. {o}') |
def read_selected_sentences(filename):
xml_to_sent_dict = {}
with open(filename, 'rb') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
reader.next()
for line in reader:
xml_filename = '{}_{}.xml'.format(line[0], line[1])
sent_id = int(line[2])
if (xml_filename not in xml_to_sent_dict):
xml_to_sent_dict[xml_filename] = []
xml_to_sent_dict[xml_filename].append(sent_id)
return xml_to_sent_dict |
def test_emptyarray():
assert (ak_from_buffers(*ak_to_buffers([])).to_list() == [])
assert (ak_from_buffers(*ak_to_buffers([[], [], []])).to_list() == [[], [], []])
assert (pickle.loads(pickle.dumps(ak_Array([]), (- 1))).to_list() == [])
assert (pickle.loads(pickle.dumps(ak_Array([[], [], []]), (- 1))).to_list() == [[], [], []]) |
def read_array(fp, allow_pickle=False, pickle_kwargs=None):
version = read_magic(fp)
_check_version(version)
(shape, fortran_order, dtype) = _read_array_header(fp, version)
if (len(shape) == 0):
count = 1
else:
count = numpy.multiply.reduce(shape, dtype=numpy.int64)
if dtype.hasobject:
if (not allow_pickle):
raise ValueError('Object arrays cannot be loaded when allow_pickle=False')
if (pickle_kwargs is None):
pickle_kwargs = {}
try:
array = pickle.load(fp, **pickle_kwargs)
except UnicodeError as err:
if (sys.version_info[0] >= 3):
raise UnicodeError(('Unpickling a python object failed: %r\nYou may need to pass the encoding= option to numpy.load' % (err,)))
raise
else:
if isfileobj(fp):
array = numpy.fromfile(fp, dtype=dtype, count=count)
else:
array = numpy.ndarray(count, dtype=dtype)
if (dtype.itemsize > 0):
max_read_count = (BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize))
for i in range(0, count, max_read_count):
read_count = min(max_read_count, (count - i))
read_size = int((read_count * dtype.itemsize))
data = _read_bytes(fp, read_size, 'array data')
array[i:(i + read_count)] = numpy.frombuffer(data, dtype=dtype, count=read_count)
if fortran_order:
array.shape = shape[::(- 1)]
array = array.transpose()
else:
array.shape = shape
return array |
class Experiment(ABC):
def __init__(self, config_path: str):
self.config_path = config_path
self.root = Path(config_path).parent
gin.parse_config_file(self.config_path)
()
def build(self, experiment_name: str, module: str, repeat: int, variables_dict: Dict[(str, SearchSpace)]):
logging.info('Creating experiment instances ...')
experiment_path = os.path.join(EXPERIMENTS_PATH, experiment_name)
variables_dict['repeat'] = list(range(repeat))
(variable_names, variables) = zip(*variables_dict.items())
for instance_values in tqdm(product(*variables)):
instance_variables = dict(zip(variable_names, instance_values))
instance_name = ','.join([(('%s=%.4g' % (name.split('.')[(- 1)], value)) if isinstance(value, float) else ('%s=%s' % (name.split('.')[(- 1)], str(value).replace(' ', '_')))) for (name, value) in instance_variables.items()])
instance_path = os.path.join(experiment_path, instance_name)
Path(instance_path).mkdir(parents=True, exist_ok=False)
instance_config_path = os.path.join(instance_path, 'config.gin')
copy(self.config_path, instance_config_path)
with open(instance_config_path, 'a') as cfg:
for (name, value) in instance_variables.items():
value = (f"'{value}'" if isinstance(value, str) else str(value))
cfg.write(f'''{name} = {value}
''')
command_file = os.path.join(instance_path, 'command')
with open(command_file, 'w') as cmd:
cmd.write(f'python -m {module} --config_path={instance_config_path} run >> {instance_path}/instance.log 2>&1')
def instance(self):
...
()
def run(self, timer: Optional[int]=0):
time.sleep(random.uniform(0, timer))
running_flag = os.path.join(self.root, '_RUNNING')
success_flag = os.path.join(self.root, '_SUCCESS')
if (os.path.isfile(success_flag) or os.path.isfile(running_flag)):
return
elif (not os.path.isfile(running_flag)):
Path(running_flag).touch()
try:
self.instance()
except Exception as e:
Path(running_flag).unlink()
raise e
except KeyboardInterrupt:
Path(running_flag).unlink()
raise Exception('KeyboardInterrupt')
Path(running_flag).unlink()
Path(success_flag).touch()
def build_experiment(self):
if (EXPERIMENTS_PATH in str(self.root)):
raise Exception('Cannot build ensemble from ensemble member configuration.')
self.build() |
def generate_virtual_adversarial_perturbation(x, logit, is_training=True):
d = tf.random_normal(shape=tf.shape(x))
for _ in range(FLAGS.num_power_iterations):
d = (FLAGS.xi * get_normalized_vector(d))
logit_p = logit
logit_m = forward((x + d), update_batch_stats=False, is_training=is_training)
dist = L.kl_divergence_with_logit(logit_p, logit_m)
grad = tf.gradients(dist, [d], aggregation_method=2)[0]
d = tf.stop_gradient(grad)
return (FLAGS.epsilon * get_normalized_vector(d)) |
def get_val(config_cmd, att):
words = config_cmd.split(' ')
i = words.index('--{}'.format(att))
val = words[(i + 1)]
return val |
def convert_to_bool(x: str) -> bool:
if (x.lower() in ['1', 'y', 'yes', 'true']):
return True
if (x.lower() in ['0', 'n', 'no', 'false']):
return False
raise ValueError(f'{x} is not a value that can be converted to a bool.') |
def keyword_while(A: dace.float32[N], B: dace.float32[N]):
i = dace.define_local_scalar(dtype=dace.int32)
i = 0
while True:
B[i] = ((A[i] + i) - i)
i += 1
if (i < N):
continue
else:
break |
class FixedAcquisitionRule(AcquisitionRule[(TensorType, SearchSpace, ProbabilisticModel)]):
def __init__(self, query_points: SequenceN[Sequence[float]]):
self._qp = tf.constant(query_points, dtype=tf.float64)
def __repr__(self) -> str:
return f'FixedAcquisitionRule({self._qp!r})'
def acquire(self, search_space: SearchSpace, models: Mapping[(Tag, ProbabilisticModel)], datasets: Optional[Mapping[(Tag, Dataset)]]=None) -> TensorType:
return self._qp |
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
super(Caffe2CppRep, self).__init__()
self.__core = cpp_rep
self.__external_outputs = cpp_rep.external_outputs()
self.__external_inputs = cpp_rep.external_inputs()
self.__uninitialized_inputs = cpp_rep.uninitialized_inputs()
def init_net(self):
return self.__core.init_net()
def pred_net(self):
return self.__core.pred_net()
def external_outputs(self):
return self.__core.external_outputs()
def external_inputs(self):
return self.__core.external_inputs()
def run(self, inputs):
output_values = None
if isinstance(inputs, dict):
output_values = self.__core.run(inputs)
elif (isinstance(inputs, list) or isinstance(inputs, tuple)):
if (len(inputs) != len(self.__uninitialized_inputs)):
raise RuntimeError('Expected {} values for uninitialized graph inputs ({}), but got {}.'.format(len(self.__uninitialized_inputs), ', '.join(self.__uninitialized_inputs), len(inputs)))
input_map = {}
for (k, v) in zip(self.__uninitialized_inputs, inputs):
input_map[k] = v
output_values = self.__core.run(input_map)
else:
output_values = self.__core.run([inputs])
return namedtupledict('Outputs', self.__external_outputs)(*output_values) |
.parametrize('type_', ('string', 'integer', 'array', 'object', 'boolean', 'number'))
def test_cookies(testdir, type_):
testdir.make_test('\()\(suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large], deadline=None, max_examples=20)\ndef test_(case):\n assert_str(case.cookies["token"])\n assert_requests_call(case)\n ', schema_name='simple_openapi.yaml', **as_param({'name': 'token', 'in': 'cookie', 'required': True, 'schema': {'type': type_}}))
testdir.run_and_assert(passed=1) |
def count_type_citizens(model, condition, exclude_jailed=True):
count = 0
for agent in model.schedule.agents:
if (agent.breed == 'cop'):
continue
if (exclude_jailed and agent.jail_sentence):
continue
if (agent.condition == condition):
count += 1
return count |
def main(version: str, data_root: str, submission_path: str, config_name: str='predict_2020_icra.json') -> None:
predictions = json.load(open(submission_path, 'r'))
nusc = NuScenes(version=version, dataroot=data_root)
helper = PredictHelper(nusc)
config = load_prediction_config(helper, config_name)
results = compute_metrics(predictions, helper, config)
json.dump(results, open(submission_path.replace('.json', '_metrics.json'), 'w'), indent=2) |
def fuzzy_string_match(str_ref, str_hyp):
return (fuzz.token_sort_ratio(str_ref, str_hyp) / 100.0) |
def tok2int_sent(sentence, tokenizer, max_seq_length):
(sent_a, sent_b) = sentence
tokens_a = tokenizer.tokenize(sent_a)
tokens_b = None
if sent_b:
tokens_b = tokenizer.tokenize(sent_b)
_truncate_seq_pair(tokens_a, tokens_b, (max_seq_length - 3))
elif (len(tokens_a) > (max_seq_length - 2)):
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ((['[CLS]'] + tokens_a) + ['[SEP]'])
segment_ids = ([0] * len(tokens))
if tokens_b:
tokens = ((tokens + tokens_b) + ['[SEP]'])
segment_ids += ([1] * (len(tokens_b) + 1))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
return (input_ids, input_mask, segment_ids) |
def write_results(results):
filename = mktemp()
with open(filename, 'w') as f:
json.dump(results, f)
return filename |
def launch_job(cfg, init_method, func, daemon=False):
if (cfg.NUM_GPUS > 1):
torch.multiprocessing.spawn(mpu.run, nprocs=cfg.NUM_GPUS, args=(cfg.NUM_GPUS, func, init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg), daemon=daemon)
else:
func(cfg=cfg) |
def parse_example_proto(example_serialized):
feature_map = {'image/filename': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, default_value=(- 1)), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value='')}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
feature_map.update({k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
bbox = tf.concat(0, [ymin, xmin, ymax, xmax])
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return (features['image/filename'], label, bbox, features['image/class/text']) |
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += (nblocks[3] * growth_rate)
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x, with_latent=False, fake_relu=False, no_relu=False):
assert (not no_relu), 'DenseNet has no pre-ReLU activations, no_relu not supported'
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
if fake_relu:
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
else:
out = F.avg_pool2d(FakeReLU.apply(self.bn(out)), 4)
out = out.view(out.size(0), (- 1))
latent = out
out = self.linear(out)
if with_latent:
return (out, latent)
return out |
def get_settings(args=None):
if (not args):
args = get_args()
settings = experiment_settings.ExperimentSettings(args)
assert (args.rho_scale_lower >= args.rho_scale_upper)
return settings |
class NLPDataLoader(KerasDataLoader):
def __init__(self, collaborator_count, split_ratio, num_samples, data_path, batch_size, **kwargs):
self.shard_num = data_path
self.data_path = dlu.download_data_()
self.batch_size = batch_size
(train, valid, details) = dlu.load_shard(collaborator_count, self.shard_num, self.data_path, num_samples, split_ratio)
self.num_samples = details['num_samples']
self.num_encoder_tokens = details['num_encoder_tokens']
self.num_decoder_tokens = details['num_decoder_tokens']
self.max_encoder_seq_length = details['max_encoder_seq_length']
self.max_decoder_seq_length = details['max_decoder_seq_length']
self.X_train = [train[0], train[1]]
self.y_train = train[2]
self.X_valid = [valid[0], valid[1]]
self.y_valid = valid[2]
def get_feature_shape(self):
return self.X_train[0].shape
def get_train_loader(self, batch_size=None):
return self._get_batch_generator(X1=self.X_train[0], X2=self.X_train[1], y=self.y_train, batch_size=batch_size)
def get_valid_loader(self, batch_size=None):
return self._get_batch_generator(X1=self.X_valid[0], X2=self.X_valid[1], y=self.y_valid, batch_size=batch_size)
def get_train_data_size(self):
return self.X_train[0].shape[0]
def get_valid_data_size(self):
return self.X_valid[0].shape[0]
def _batch_generator(X1, X2, y, idxs, batch_size, num_batches):
for i in range(num_batches):
a = (i * batch_size)
b = (a + batch_size)
(yield ([X1[idxs[a:b]], X2[idxs[a:b]]], y[idxs[a:b]]))
def _get_batch_generator(self, X1, X2, y, batch_size):
if (batch_size is None):
batch_size = self.batch_size
idxs = np.random.permutation(np.arange(X1.shape[0]))
num_batches = int(np.ceil((X1.shape[0] / batch_size)))
return self._batch_generator(X1, X2, y, idxs, batch_size, num_batches) |
class ForgotForm(Form):
email = TextField('Email', validators=[DataRequired(), Length(min=6, max=40)]) |
class TFXLNetPreTrainedModel():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def graph_constructor(X, bihierarchy, constraint_structure):
S = {index for (index, x) in np.ndenumerate(X)}
(A, B) = bihierarchy
(A.append(S), B.append(S))
for x in S:
(A.append({x}), B.append({x}))
for x in S:
constraint_structure.update({frozenset({x}): (0, 1)})
R1 = nx.DiGraph()
for x in A:
for y in A:
if ((x < y) and (not any(((x < z < y) for z in A)))):
R1.add_edge(frozenset(y), frozenset(x), weight=sum([X[i] for i in x]), min_capacity=constraint_structure[frozenset(x)][0], max_capacity=constraint_structure[frozenset(x)][1])
R2 = nx.DiGraph()
for x in B:
for y in B:
if ((y < x) and (not any(((y < z < x) for z in B)))):
R2.add_edge((frozenset(y), 'p'), (frozenset(x), 'p'), weight=sum([X[i] for i in y]), min_capacity=constraint_structure[frozenset(y)][0], max_capacity=constraint_structure[frozenset(y)][1])
G = nx.compose(R1, R2)
for (index, x) in np.ndenumerate(X):
G.add_edge(frozenset({index}), (frozenset({index}), 'p'), weight=x, min_capacity=0, max_capacity=1)
return G |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
.torch
def test_callback_for_cardinality(sequential_info):
schema = TensorSchema([TensorFeatureInfo('user_id', feature_type=FeatureType.CATEGORICAL, is_seq=True), TensorFeatureInfo('item_id', feature_type=FeatureType.CATEGORICAL, is_seq=True), TensorFeatureInfo('some_user_feature', feature_type=FeatureType.CATEGORICAL), TensorFeatureInfo('some_item_feature', feature_type=FeatureType.CATEGORICAL, is_seq=True)])
for f in schema.all_features:
assert (f.cardinality is None)
PandasSequentialDataset(schema, 'user_id', 'item_id', sequential_info['sequences'])
assert (schema.all_features[0].cardinality == 4)
assert (schema.all_features[1].cardinality == 6)
assert (schema.all_features[2].cardinality == 4)
assert (schema.all_features[3].cardinality == 6) |
.parametrize('csr_container', CSR_CONTAINERS)
def test_unsorted_indices(csr_container):
(X, y) = load_digits(return_X_y=True)
X_test = csr_container(X[50:100])
(X, y) = (X[:50], y[:50])
X_sparse = csr_container(X)
coef_dense = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
assert_allclose(coef_dense, coef_sorted.toarray())
def scramble_indices(X):
new_data = []
new_indices = []
for i in range(1, len(X.indptr)):
row_slice = slice(*X.indptr[(i - 1):(i + 1)])
new_data.extend(X.data[row_slice][::(- 1)])
new_indices.extend(X.indices[row_slice][::(- 1)])
return csr_container((new_data, new_indices, X.indptr), shape=X.shape)
X_sparse_unsorted = scramble_indices(X_sparse)
X_test_unsorted = scramble_indices(X_test)
assert (not X_sparse_unsorted.has_sorted_indices)
assert (not X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
assert_allclose(coef_unsorted.toarray(), coef_sorted.toarray())
assert_allclose(sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test)) |
def interpolate_tracking_boxes(left_box: TrackingBox, right_box: TrackingBox, right_ratio: float) -> TrackingBox:
def interp_list(left, right, rratio):
return tuple((((1.0 - rratio) * np.array(left, dtype=float)) + (rratio * np.array(right, dtype=float))))
def interp_float(left, right, rratio):
return (((1.0 - rratio) * float(left)) + (rratio * float(right)))
rotation = Quaternion.slerp(q0=Quaternion(left_box.rotation), q1=Quaternion(right_box.rotation), amount=right_ratio).elements
tracking_score = interp_float(left_box.tracking_score, right_box.tracking_score, right_ratio)
return TrackingBox(sample_token=right_box.sample_token, translation=interp_list(left_box.translation, right_box.translation, right_ratio), size=interp_list(left_box.size, right_box.size, right_ratio), rotation=rotation, velocity=interp_list(left_box.velocity, right_box.velocity, right_ratio), ego_translation=interp_list(left_box.ego_translation, right_box.ego_translation, right_ratio), tracking_id=right_box.tracking_id, tracking_name=right_box.tracking_name, tracking_score=tracking_score) |
def show_metric_table(base):
di = {}
for p in sorted(base.parent.glob('*/hydra/overrides.yaml')):
for l in OmegaConf.load(str(p)):
(k, v) = l.split('=')
if (k in di.keys()):
di[k].append(v)
else:
di[k] = [v]
with open((p.parent.parent / 'mIoU.txt'), 'r') as f:
for l in f.readlines():
l = l.replace('\n', '')
(k, v) = l.split(' - ')
v = round(float(v), 4)
if (k in di.keys()):
di[k].append(v)
else:
di[k] = [v]
with open((p.parent.parent / 'training_time.txt'), 'r') as f:
for l in f.readlines():
l.replace('\n', '')
(k, v) = l.split(' - ')
v = round((float(v) / 3600), 4)
if (k in di.keys()):
di[k].append(v)
else:
di[k] = [v]
st.dataframe(pd.DataFrame(di)) |
class PairDataset(VisionDataset):
def __init__(self, root, dataset_name, index, prompt, pairs_file=None, extensions='.jpg', height=512, Train=True, down_scale=1, break_iter=None):
assert ((down_scale == 1) or (down_scale == 2)), 'only support resolution of 1024X512 and 512X256'
self.downscale = down_scale
if (self.downscale == 2):
self.height = 256
self.width = 512
elif (self.downscale == 1):
self.height = 512
self.width = 1024
(transform, target_transform) = self.init_crops_transform()
self.pairs = np.load(pairs_file, allow_pickle=True).item()
num_panos = (len(os.listdir(root)) - 1)
num_pairs = (len(self.pairs) // num_panos)
self.index = ((num_pairs * int(index)) + 1)
super(PairDataset, self).__init__(root, transform=transform, target_transform=target_transform)
if (break_iter is None):
self.break_iter = 0
else:
self.break_iter = (break_iter + 1)
self.extensions = extensions
self.train = Train
self.dataset_type = dataset_name
self.prompt = prompt
def __getitem__(self, index):
img1 = self.pairs[self.index]['img1']
img2 = self.pairs[self.index]['img2']
pano = self.pairs[self.index]['pano']
path = os.path.join(self.root, img1['path'])
(rotation_x1, rotation_y1) = (img1['x'], img1['y'])
image1 = self.loader(path)
path2 = os.path.join(self.root, img2['path'])
(rotation_x2, rotation_y2) = (img2['x'], img2['y'])
image2 = self.loader(path2)
full_path = os.path.join(self.root, pano['path'])
image1_original = self.normal_transform(image1)
image2_original = self.normal_transform(image2)
if (self.target_transform is not None):
image1 = self.target_transform(image1)
image2 = self.target_transform(image2)
image_pano = self.normal_transform(self.loader(full_path, self.downscale))
prompt = self.prompt
return {'img1': image1, 'img2': image2, 'img1_original': image1_original, 'img2_original': image2_original, 'pano': image_pano, 'rotation_x1': rotation_x1, 'rotation_y1': rotation_y1, 'rotation_x2': rotation_x2, 'rotation_y2': rotation_y2, 'path': path, 'path2': path2, 'pano_path': full_path, 'txt': prompt}
def __len__(self):
return 5
def init_crops_transform(self):
transform = transforms.Compose([transforms.Resize((int(self.height), int(self.height))), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
target_transform = transforms.Compose([transforms.Resize((int(self.height), int(self.height))), transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))])
return (transform, target_transform)
def normal_transform(self, image):
image_arr = np.array(image)
image_arr = (image_arr / 255).astype(np.float32)
return torch.tensor(image_arr)
def loader(self, path, down_scale=1):
with open(path, 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if (down_scale > 1):
(width, height) = img.size
new_width = int((width / down_scale))
new_height = int((height / down_scale))
new_size = (new_width, new_height)
img = img.resize(new_size)
return img |
class NumpySimpleITKImageBridge():
def convert(array: np.ndarray, properties: ImageProperties) -> sitk.Image:
is_vector = False
if (not (array.shape == properties.size[::(- 1)])):
if (array.ndim == 1):
array = array.reshape(properties.size[::(- 1)])
elif (array.ndim == 2):
is_vector = True
array = array.reshape((properties.size[::(- 1)] + (array.shape[1],)))
elif (array.ndim == (len(properties.size) + 1)):
is_vector = True
else:
raise ValueError('array shape {} not supported'.format(array.shape))
image = sitk.GetImageFromArray(array, is_vector)
image.SetOrigin(properties.origin)
image.SetSpacing(properties.spacing)
image.SetDirection(properties.direction)
return image |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313, 999])
def test_gelu_forward_backward(seed, ctx, func_name):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3, 4).astype(np.float32)]
function_tester(rng, F.gelu, ref_gelu, inputs, ctx=ctx, func_name=func_name, atol_b=0.001, atol_accum=0.001) |
def get_default_hyperparams(model_name):
defaults = {'dropout_rate': [0.1, 0.2, 0.3, 0.4, 0.5], 'hidden_layer_size': [5, 10, 25, 50, 100, 150], 'minibatch_size': [256, 512, 1024], 'learning_rate': np.logspace((- 4), 0, 5), 'max_norm': [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]}
if ('rnf' in model_name):
print('Using RNF params')
params = dict(defaults)
params['skip_rate'] = [0.25, 0.5, 0.75]
else:
print('Returning default params')
params = dict(defaults)
return params |
def train(epoch, model, optimizer, scheduler):
global global_step
epoch_loss = 0.0
running_loss = [0.0, 0.0, 0.0]
model.train()
display_step = 100
for (batch_idx, (x, c)) in enumerate(train_loader):
scheduler.step()
global_step += 1
(x, c) = (x.to(device), c.to(device))
optimizer.zero_grad()
(log_p, logdet) = model(x, c)
(log_p, logdet) = (torch.mean(log_p), torch.mean(logdet))
loss = (- (log_p + logdet))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
running_loss[0] += (loss.item() / display_step)
running_loss[1] += (log_p.item() / display_step)
running_loss[2] += (logdet.item() / display_step)
epoch_loss += loss.item()
if (((batch_idx + 1) % display_step) == 0):
print('Global Step : {}, [{}, {}] [Log pdf, Log p(z), Log Det] : {}'.format(global_step, epoch, (batch_idx + 1), np.array(running_loss)))
running_loss = [0.0, 0.0, 0.0]
del x, c, log_p, logdet, loss
del running_loss
gc.collect()
print('{} Epoch Training Loss : {:.4f}'.format(epoch, (epoch_loss / len(train_loader))))
return (epoch_loss / len(train_loader)) |
def get_site_symmetries(wyckoff):
ssyms = []
for w in wyckoff:
ssyms += [('"%-6s"' % w_s['site_symmetry']) for w_s in w['wyckoff']]
damp_array_site_symmetries(ssyms) |
class TestTextFileReader(TestCase):
def test_text_file_reader(self):
schema = Struct(('field1', Scalar(dtype=str)), ('field2', Scalar(dtype=str)), ('field3', Scalar(dtype=np.float32)))
num_fields = 3
col_data = [['l1f1', 'l2f1', 'l3f1', 'l4f1'], ['l1f2', 'l2f2', 'l3f2', 'l4f2'], [0.456, 0.789, 0.10101, (- 24342.64)]]
row_data = list(zip(*col_data))
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as txt_file:
txt_file.write(('\n'.join(('\t'.join((str(x) for x in f)) for f in row_data)) + '\n'))
txt_file.flush()
for num_passes in range(1, 3):
for batch_size in range(1, (len(row_data) + 2)):
init_net = core.Net('init_net')
reader = TextFileReader(init_net, filename=txt_file.name, schema=schema, batch_size=batch_size, num_passes=num_passes)
workspace.RunNetOnce(init_net)
net = core.Net('read_net')
(should_stop, record) = reader.read_record(net)
results = ([np.array([])] * num_fields)
while True:
workspace.RunNetOnce(net)
arrays = FetchRecord(record).field_blobs()
for i in range(num_fields):
results[i] = np.append(results[i], arrays[i])
if workspace.FetchBlob(should_stop):
break
for i in range(num_fields):
col_batch = np.tile(col_data[i], num_passes)
if (col_batch.dtype in (np.float32, np.float64)):
np.testing.assert_array_almost_equal(col_batch, results[i], decimal=3)
else:
np.testing.assert_array_equal(col_batch, results[i]) |
def _init_nd_shape_and_axes(x, shape, axes):
noshape = (shape is None)
noaxes = (axes is None)
if (not noaxes):
axes = _iterable_of_int(axes, 'axes')
axes = [((a + x.ndim) if (a < 0) else a) for a in axes]
if any((((a >= x.ndim) or (a < 0)) for a in axes)):
raise ValueError('axes exceeds dimensionality of input')
if (len(set(axes)) != len(axes)):
raise ValueError('all axes must be unique')
if (not noshape):
shape = _iterable_of_int(shape, 'shape')
if (axes and (len(axes) != len(shape))):
raise ValueError('when given, axes and shape arguments have to be of the same length')
if noaxes:
if (len(shape) > x.ndim):
raise ValueError('shape requires more axes than are present')
axes = range((x.ndim - len(shape)), x.ndim)
shape = [(x.shape[a] if (s == (- 1)) else s) for (s, a) in zip(shape, axes)]
elif noaxes:
shape = list(x.shape)
axes = range(x.ndim)
else:
shape = [x.shape[a] for a in axes]
if any(((s < 1) for s in shape)):
raise ValueError(f'invalid number of data points ({shape}) specified')
return (tuple(shape), list(axes)) |
_tf
class TFTransfoXLModelLanguageGenerationTest(unittest.TestCase):
('Skip test until #12651 is resolved.')
def test_lm_generate_transfo_xl_wt103(self):
model = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103')
input_ids = tf.convert_to_tensor([[33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0]], dtype=tf.int32)
expected_output_ids = [33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0, 33, 1, 1857, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 28, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 0]
output_ids = model.generate(input_ids, max_length=200, do_sample=False)
self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) |
def dist0(x, *, c=1.0, keepdim=False):
c = torch.as_tensor(c).type_as(x)
return _dist0(x, c, keepdim=keepdim) |
def measure_cpu_gpu_instant_load():
gpu_load = []
if gpu_load_backend_ok:
global gpu_a_load
global gpu_m_count
gpu_m_count += 1
try:
comm = current_communicator()
if comm:
index = comm.local_rank
elif ('cuda' in str(nn.get_current_context().backend)):
index = 0
else:
raise Exception
handler = nvml.nvmlDeviceGetHandleByIndex(index)
gpu_load = [[index, nvml.nvmlDeviceGetUtilizationRates(handler).gpu]]
if (index in gpu_a_load.keys()):
gpu_a_load[index]['name'] = nvml.nvmlDeviceGetName(handler).decode('utf-8')
o_load = gpu_a_load[index]['load']
n_load = gpu_load[0][1]
gpu_a_load[index]['load'] = ((((gpu_m_count - 1) * o_load) + n_load) / gpu_m_count)
else:
gpu_a_load[index] = {'name': nvml.nvmlDeviceGetName(handler).decode('utf-8'), 'load': gpu_load[0][1]}
except Exception:
gpu_load = []
if cpu_load_backend_ok:
global p_handler
cpu_load = p_handler.cpu_percent()
callback.update_status(('cpu_gpu_load', collect_and_shape_result(cpu_load, gpu_load))) |
class NameMap(dict):
def __getitem__(self, k):
assert isinstance(k, SDFG)
if (k not in self):
self[k] = {}
return super().__getitem__(k)
def get(self, k):
return self[k]
def __setitem__(self, k, v) -> None:
assert isinstance(k, SDFG)
return super().__setitem__(k, v) |
class TernaryTransformer(Transformer):
def __init__(self):
self.lossy = True
def forward(self, data, **kwargs):
mean_topk = np.mean(np.abs(data))
out_ = np.where((data > 0.0), mean_topk, 0.0)
out = np.where((data < 0.0), (- mean_topk), out_)
(int_array, int2float_map) = self._float_to_int(out)
metadata = {'int_to_float': int2float_map}
return (int_array, metadata)
def backward(self, data, metadata, **kwargs):
import copy
data = copy.deepcopy(data)
int2float_map = metadata['int_to_float']
for key in int2float_map:
indices = (data == key)
data[indices] = int2float_map[key]
return data
def _float_to_int(np_array):
flatten_array = np_array.reshape((- 1))
unique_value_array = np.unique(flatten_array)
int_array = np.zeros(flatten_array.shape, dtype=np.int32)
int_to_float_map = {}
float_to_int_map = {}
for (idx, u_value) in enumerate(unique_value_array):
int_to_float_map.update({idx: u_value})
float_to_int_map.update({u_value: idx})
indices = np.where((flatten_array == u_value))
int_array[indices] = idx
int_array = int_array.reshape(np_array.shape)
return (int_array, int_to_float_map) |
def test_cx():
circuit = Circuit(2)
circuit.cx(0, 1)
expect = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
assert array_equal(expect, circuit.get_unitary_matrix()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.