code stringlengths 101 5.91M |
|---|
_utils.test(debug=True)
def test_adjoint_checkbit_place_grad():
x = ti.field(float)
y = ti.field(float)
ti.root.place(x, x.grad, y)
def test():
x[None] = 1
with ti.ad.Tape(loss=x, validation=True):
test()
assert x.snode.ptr.has_adjoint_checkbit()
assert (not y.snode.ptr.has_adjoint_checkbit()) |
class ArcSoftmax(Linear):
def forward(self, logits, targets):
index = torch.where((targets != (- 1)))[0]
m_hot = torch.zeros(index.size()[0], logits.size()[1], device=logits.device, dtype=logits.dtype)
m_hot.scatter_(1, targets[(index, None)], self.m)
logits.acos_()
logits[index] += m_hot
logits.cos_().mul_(self.s)
return logits |
def test_inlinepp_in_unroll():
ctr = 11
def stateful(i):
nonlocal ctr
ctr += 1
return (ctr + i)
def tester(a: dace.float64[3]):
for i in dace.unroll(range(3)):
a[i] = dace.inline(stateful(i))
sdfg = tester.to_sdfg()
assert _find_in_tasklet(sdfg, '12')
assert _find_in_tasklet(sdfg, '14')
assert _find_in_tasklet(sdfg, '16')
a = np.random.rand(3)
sdfg(a)
assert np.allclose(a, np.array([12, 14, 16])) |
def load_vocabulary(fn):
vocabulary = set()
with open(fn) as f:
for line in f:
vocabulary.add(line.strip())
return vocabulary |
.parametrize('dtype, storage_format', [(ti.f32, 'col_major'), (ti.f32, 'row_major'), (ti.f64, 'col_major'), (ti.f64, 'row_major')])
_utils.test(arch=ti.cpu)
def test_sparse_matrix_builder(dtype, storage_format):
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dtype, storage_format=storage_format)
def fill(Abuilder: ti.types.sparse_matrix_builder()):
for (i, j) in ti.ndrange(n, n):
Abuilder[(i, j)] += (i + j)
fill(Abuilder)
A = Abuilder.build()
for i in range(n):
for j in range(n):
assert (A[(i, j)] == (i + j)) |
class TrainState(object):
def __init__(self, optimizer, lr_scheduler, step, nnet=None, nnet_ema=None):
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.step = step
self.nnet = nnet
self.nnet_ema = nnet_ema
def ema_update(self, rate=0.9999):
if (self.nnet_ema is not None):
ema(self.nnet_ema, self.nnet, rate)
def save(self, path):
os.makedirs(path, exist_ok=True)
torch.save(self.step, os.path.join(path, 'step.pth'))
for (key, val) in self.__dict__.items():
if ((key != 'step') and (val is not None)):
torch.save(val.state_dict(), os.path.join(path, f'{key}.pth'))
def load(self, path):
logging.info(f'load from {path}')
self.step = torch.load(os.path.join(path, 'step.pth'))
for (key, val) in self.__dict__.items():
if ((key != 'step') and (val is not None)):
val.load_state_dict(torch.load(os.path.join(path, f'{key}.pth'), map_location='cpu'))
def resume(self, ckpt_root, step=None):
if (not os.path.exists(ckpt_root)):
return
if (step is None):
ckpts = list(filter((lambda x: ('.ckpt' in x)), os.listdir(ckpt_root)))
if (not ckpts):
return
steps = map((lambda x: int(x.split('.')[0])), ckpts)
step = max(steps)
ckpt_path = os.path.join(ckpt_root, f'{step}.ckpt')
logging.info(f'resume from {ckpt_path}')
self.load(ckpt_path)
def to(self, device):
for (key, val) in self.__dict__.items():
if isinstance(val, nn.Module):
val.to(device) |
_model
def tresnet_l_448(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['tresnet_l_448']
model = TResNet(layers=[4, 5, 18, 3], num_classes=num_classes, in_chans=in_chans, width_factor=1.2, **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
def relaxed_average(var_name_suffix, rx_step):
relaxed_vars = []
for l in xrange(rx_step):
with tf.variable_scope(('RX%d' % l), reuse=True):
try:
relaxed_vars.append(tf.get_variable(var_name_suffix))
except ValueError:
pass
dsum = tf.add_n(relaxed_vars)
avg = (dsum / len(relaxed_vars))
diff = [(v - avg) for v in relaxed_vars]
davg = tf.add_n([(d * d) for d in diff])
return (avg, tf.reduce_sum(davg)) |
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), Elliott())
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), Elliott(), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), Elliott())
self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), Elliott(), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), Elliott(), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), Elliott())
self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), Elliott())
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1) |
def swap_word(new_words):
random_idx_1 = random.randint(0, (len(new_words) - 1))
random_idx_2 = random_idx_1
counter = 0
while (random_idx_2 == random_idx_1):
random_idx_2 = random.randint(0, (len(new_words) - 1))
counter += 1
if (counter > 3):
return new_words
(new_words[random_idx_1], new_words[random_idx_2]) = (new_words[random_idx_2], new_words[random_idx_1])
return new_words |
class EFDTInactiveLearningNodeMC(InactiveLearningNodeMC):
def __init__(self, initial_stats=None):
super().__init__(initial_stats)
def count_nodes():
return np.array([0, 1]) |
class SegmentMap(object):
def __init__(self):
self.map_entries = []
def load(self, path):
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'rb') as f:
for (event, elem) in ET.iterparse(f, events=('start',)):
if (elem.tag == 'map-item'):
elem = elem
item = SegmentMapItem()
item.key = elem.attrib['key']
item.value = elem.attrib['value']
self.map_entries.append(item)
def dump(self, path):
open_fun = (gzip.open if path.endswith('.gz') else open)
with open_fun(path, 'wt') as f:
f.write('<?xml version="1.0" encoding="utf8"?>\n')
f.write('<segment-key-map>\n')
for s in self.map_entries:
s.dump(f)
f.write('</segment-key-map>\n') |
class IdentityMessage(torch.nn.Module):
def __init__(self, raw_msg_dim: int, memory_dim: int, time_dim: int):
super().__init__()
self.out_channels = ((raw_msg_dim + (2 * memory_dim)) + time_dim)
def forward(self, z_src: Tensor, z_dst: Tensor, raw_msg: Tensor, t_enc: Tensor):
return torch.cat([z_src, z_dst, raw_msg, t_enc], dim=(- 1)) |
def test_get_init_seq_string_seed_lowercase(esm_sampler_fixture):
sampler = esm_sampler_fixture
out = sampler.get_init_seq('aa', 5, 1)
expected = [[32, 5, 5, 33, 33, 33]]
assert (out.tolist() == expected) |
def text(string):
if isinstance(string, Doc):
return string
if isinstance(string, str):
return _Text(string)
return prepr(string) |
class Generator(object):
__metaclass__ = ABCMeta
def init_history(self):
pass
def get_next(self, history):
pass
def stop_or_not(self, history):
pass
def max_context_size(self):
raise NotImplementedError
def truncate_history(self, history):
if (len(history) > (2 * self.max_context_size)):
return list(last_k(history, self.max_context_size))
return history
def generate(self, history=None):
if (not history):
history = self.init_history()
return self.generate_custom(history, self.get_next, self.stop_or_not)
def generate_custom(self, history, next_fxn, stop_fxn):
generated = []
history = list(history)
while True:
next = next_fxn(history)
history.append(next)
history = self.truncate_history(history)
if stop_fxn(history):
break
generated.append(next)
return generated |
def collect_hidden_states(trained_model, data: List[str], word2vec, i2p, pr=None):
trained_model.eval()
states = []
labels = []
inputs = []
outputs = []
genders = []
dataset = Dataset(data, word2vec)
gen = torch.utils.data.DataLoader(dataset, batch_size=1, drop_last=False, shuffle=False)
for ((x, y), entry) in tqdm.tqdm(zip(gen, data), total=len(data)):
gender = entry['g']
profession = i2p[entry['p']]
if (pr is not None):
if (profession != pr):
continue
state = trained_model.get_hidden_state(x).squeeze()
states.append(state)
labels.append(gender)
inputs.append(x.detach().cpu().numpy())
outputs.append(y.detach().cpu().numpy())
genders.append(gender)
return (np.array(states), np.array(labels), np.array(inputs), np.array(outputs), np.array(genders)) |
_torch
class EfficientFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((EfficientFormerModel, EfficientFormerForImageClassificationWithTeacher, EfficientFormerForImageClassification) if is_torch_available() else ())
pipeline_model_mapping = ({'feature-extraction': EfficientFormerModel, 'image-classification': (EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher)} if is_torch_available() else {})
fx_compatible = False
test_pruning = False
test_resize_embeddings = False
test_head_masking = False
def setUp(self):
self.model_tester = EfficientFormerModelTester(self)
self.config_tester = ConfigTester(self, config_class=EfficientFormerConfig, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
(reason='EfficientFormer does not use inputs_embeds')
def test_inputs_embeds(self):
pass
(reason='EfficientFormer does not support input and output embeddings')
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
(config, _) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
arg_names = [*signature.parameters.keys()]
expected_arg_names = ['pixel_values']
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = (outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states)
expected_num_layers = getattr(self.model_tester, 'expected_num_hidden_layers', (self.model_tester.num_hidden_layers + 1))
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, 'encoder_seq_length'):
seq_length = self.model_tester.encoder_seq_length
if (hasattr(self.model_tester, 'chunk_length') and (self.model_tester.chunk_length > 1)):
seq_length = (seq_length * self.model_tester.chunk_length)
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(list(hidden_states[(- 1)].shape[(- 2):]), [seq_length, self.model_tester.hidden_size])
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, 'seq_length', None)
decoder_seq_length = getattr(self.model_tester, 'decoder_seq_length', seq_len)
self.assertListEqual(list(hidden_states[(- 1)].shape[(- 2):]), [decoder_seq_length, self.model_tester.hidden_size])
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict['output_hidden_states'] = True
check_hidden_states_output(inputs_dict, config, model_class)
del inputs_dict['output_hidden_states']
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if (model_class.__name__ == 'EfficientFormerForImageClassificationWithTeacher'):
del inputs_dict['labels']
return inputs_dict
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
(reason='EfficientFormer does not implement masked image modeling yet')
def test_for_masked_image_modeling(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs)
def test_for_image_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
def test_training(self):
if (not self.model_tester.is_training):
return
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
if ((model_class in get_values(MODEL_MAPPING)) or (model_class.__name__ == 'EfficientFormerForImageClassificationWithTeacher')):
continue
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
loss = model(**inputs).loss
loss.backward()
def test_problem_types(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
problem_types = [{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float}, {'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long}, {'title': 'regression', 'num_labels': 1, 'dtype': torch.float}]
for model_class in self.all_model_classes:
if ((model_class not in [*get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)]) or (model_class.__name__ == 'EfficientFormerForImageClassificationWithTeacher')):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"):
config.problem_type = problem_type['title']
config.num_labels = problem_type['num_labels']
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
if (problem_type['num_labels'] > 1):
inputs['labels'] = inputs['labels'].unsqueeze(1).repeat(1, problem_type['num_labels'])
inputs['labels'] = inputs['labels'].to(problem_type['dtype'])
with warnings.catch_warnings(record=True) as warning_list:
loss = model(**inputs).loss
for w in warning_list:
if ('Using a target size that is different to the input size' in str(w.message)):
raise ValueError(f'Something is going wrong in the regression problem: intercepted {w.message}')
loss.backward()
def test_model_from_pretrained(self):
for model_name in EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = EfficientFormerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, 'seq_length', None)
encoder_seq_length = getattr(self.model_tester, 'encoder_seq_length', seq_len)
encoder_key_length = getattr(self.model_tester, 'key_length', encoder_seq_length)
chunk_length = getattr(self.model_tester, 'chunk_length', None)
if ((chunk_length is not None) and hasattr(self.model_tester, 'num_hashes')):
encoder_seq_length = (encoder_seq_length * self.model_tester.num_hashes)
for model_class in self.all_model_classes:
inputs_dict['output_attentions'] = True
inputs_dict['output_hidden_states'] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
self.assertEqual(len(attentions), self.model_tester.num_attention_outputs)
del inputs_dict['output_attentions']
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
self.assertEqual(len(attentions), self.model_tester.num_attention_outputs)
if (chunk_length is not None):
self.assertListEqual(list(attentions[0].shape[(- 4):]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length])
else:
self.assertListEqual(list(attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length]) |
def validate_arguments(func, args, kwargs, drop_extra=True):
parser = _parse_signature(func)
(args, kwargs, missing, extra, extra_positional) = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif ((extra or extra_positional) and (not drop_extra)):
raise ArgumentValidationError(None, extra, extra_positional)
return (tuple(args), kwargs) |
def traverse_net(max_node):
aa_nas_bench_ss = get_search_spaces('cell', 'nats-bench')
archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False)
print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2))))
random.seed(88)
random.shuffle(archs)
assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0])
assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9])
assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123])
return [x.tostr() for x in archs] |
class ConvRNNBlock(nn.Module):
def __init__(self, batch_size, in_channels, shape, num_filter, kernel_size):
super(ConvRNNBlock, self).__init__()
self.conv_rnn = ConvRNN(in_channels, shape, num_filter, kernel_size)
self.conv = TimeDistributed(nn.Conv2d((2 * num_filter), num_filter, kernel_size=1, bias=True))
self._hidden_state = self.conv_rnn.init_hidden(batch_size)
def forward(self, x):
(forward_out, backward_out) = self.conv_rnn(x, self._hidden_state)
out = torch.cat((forward_out, backward_out), dim=2)
out = F.relu(self.conv(out))
return out
def reinit_hidden(self):
self._hidden_state = repackage_hidden(self._hidden_state) |
_args('v', 'v', 'v', 'is', 'is', 'is', 'i')
def conv3d(g, input, weight, bias, stride, padding, dilation, groups):
return _convolution(g, input, weight, bias, stride, padding, dilation, False, (), groups, None, None, None, None) |
def _skip_pytest_case_requiring_pooch(data_filename):
if ('PYTEST_CURRENT_TEST' in os.environ):
import pytest
pytest.skip(f'Unable to download {data_filename}', allow_module_level=True) |
()
def plus_test_with_type_name_assertion() -> tc.TestCase:
cluster = generate_test_cluster('tests.fixtures.linecoverage.plus')
transformer = AstToTestCaseTransformer(cluster, False, EmptyConstantProvider())
transformer.visit(ast.parse('def test_case_0():\n int_0 = 42\n plus_0 = module_0.Plus()\n int_1 = plus_0.plus_four(int_0)\n'))
test_case = transformer.testcases[0]
test_case.statements[(- 1)].add_assertion(ass.TypeNameAssertion(test_case.statements[(- 1)].ret_val, 'builtins', 'int'))
return test_case |
def _subset_has_indirection(subset, pvisitor: 'ProgramVisitor'=None):
for dim in subset:
if (not isinstance(dim, tuple)):
dim = [dim]
for r in dim:
if (not symbolic.issymbolic(r)):
continue
if symbolic.contains_sympy_functions(r):
return True
if pvisitor:
for s in r.free_symbols:
try:
name = pvisitor._visitname(str(s), None)
if (isinstance(name, str) and (name in pvisitor.sdfg.arrays)):
return True
except DaceSyntaxError:
continue
return False |
class GenieModelForClassification(GenieModel):
def _init_common(self, args, tasks, **kwargs):
self.args = args
num_labels = 0
if (args.num_labels is not None):
num_labels = args.num_labels
else:
for task in tasks:
if hasattr(task, 'num_labels'):
num_labels = max(num_labels, task.num_labels)
config = AutoConfig.from_pretrained(args.pretrained_model, cache_dir=args.embeddings, num_labels=num_labels, finetuning_task='ned')
super().__init__(config)
if hasattr(config, 'd_model'):
args.dimension = config.d_model
else:
args.dimension = config.hidden_size
(self.src_lang, self.tgt_lang) = adjust_language_code(config, args.pretrained_model, kwargs.get('src_lang', 'en'), kwargs.get('tgt_lang', 'en'))
def add_new_vocab_from_data(self, tasks, resize_decoder=False):
super().add_new_vocab_from_data(tasks, resize_decoder)
self.model.resize_token_embeddings(self.numericalizer.num_tokens)
def forward(self, *input, **kwargs):
if self.training:
batch = input[0]
outputs = self.model(batch.context.value, labels=batch.answer.value, attention_mask=(batch.context.value != self.numericalizer.pad_id))
return outputs
else:
return self.model(**kwargs)
def validate(self, data_iterator, task, original_order=None, disable_progbar=True, **kwargs):
total_loss = 0.0
all_example_ids = []
all_answers = []
all_contexts = []
all_predictions = []
for batch in progress_bar(data_iterator, desc='Generating', disable=disable_progbar):
batch_example_ids = batch.example_id
batch_context = self.numericalizer.reverse(batch.context.value.data, 'context')
all_example_ids += batch_example_ids
output = self.forward(input_ids=batch.context.value, attention_mask=(batch.context.value != self.numericalizer.pad_id), labels=batch.answer.value)
labels = batch.answer.value.tolist()
logits = output.logits
predictions = torch.argmax(logits, dim=(- 1)).tolist()
if (logits.dim() == 2):
predictions = [[p] for p in predictions]
processed_preds = []
processed_labels = []
for (pred, label) in zip(predictions, labels):
preds_list = []
labels_list = []
for (p_, l_) in zip(pred, label):
if (l_ == self.numericalizer.answer_pad_id):
continue
preds_list.append(task.id2label[p_])
labels_list.append(task.id2label[l_])
processed_preds.append([' '.join(preds_list)])
processed_labels.append(' '.join(labels_list))
all_contexts += batch_context
all_answers += processed_labels
all_predictions += processed_preds
total_loss += output.loss
total_loss /= len(all_example_ids)
if (original_order is not None):
(original_order, all_example_ids, all_predictions, all_answers, all_contexts) = [list(a) for a in tuple(zip(*sorted(list(zip(original_order, all_example_ids, all_predictions, all_answers, all_contexts)))))]
output = ValidationOutput(loss=total_loss, example_ids=all_example_ids, contexts=all_contexts, answers=all_answers, predictions=all_predictions)
return output |
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert (key not in tables), 'Alias {} has the same name in table'.format(key)
tables[key] = key
return tables |
def main(args):
inpFile = args.src
output_File = args.dst
f = open(inpFile)
lines = f.readlines()
f.close()
nLines = len(lines)
cur = 0
data = dict()
while (cur < nLines):
line = lines[cur].rstrip()
components = line.split(' ')
obj = {}
if (components[0] not in data.keys()):
data[components[0]] = obj
obj = data[components[0]]
if ((components[1] not in obj.keys()) and (len(components) == 3)):
obj[components[1]] = []
elif (components[1] not in obj.keys()):
obj[components[1]] = {}
if (components[1] == 'LE'):
obj[components[1]] = (obj[components[1]] + [components[2]])
else:
vulnObj = obj[components[1]]
if (components[3] not in vulnObj.keys()):
vulnObj[components[3]] = []
if (components[1] == 'RENT'):
vulnObj[components[3]] = (vulnObj[components[3]] + [components[2]])
else:
vulnObj[components[3]] = (vulnObj[components[3]] + [int(components[2].rstrip())])
obj[components[1]] = vulnObj
data[components[0]]
cur = (cur + 1)
with open(output_File, 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4) |
def _rev_from_version(version):
p = version.rfind('-')
if (p < 0):
_simple_validate_commit_rev(version)
return version
rev = version[(p + 1):]
_simple_validate_commit_rev(rev)
return rev |
class CDF(MutableMapping, spacepy.datamodel.MetaMixin):
backward = False
def __init__(self, pathname, masterpath=None, create=None, readonly=None, encoding='utf-8'):
if (masterpath is not None):
if (create is False):
raise ValueError('Cannot specify a master CDF without creating a CDF')
if (readonly is True):
raise ValueError('Cannot create a CDF in readonly mode')
if (create and readonly):
raise ValueError('Cannot create a CDF in readonly mode')
try:
self.pathname = pathname.encode()
except AttributeError:
raise ValueError('pathname must be string-like: {0}'.format(pathname))
self._handle = ctypes.c_void_p(None)
self._opened = False
self.encoding = encoding
if ((masterpath is None) and (not create)):
self._open((True if (readonly is None) else readonly))
elif masterpath:
self._from_master(masterpath.encode())
self._check_enc()
else:
self._create()
self._check_enc()
lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2))
self._attrlistref = weakref.ref(gAttrList(self))
self.backward = (self.version()[0] < 3)
self._var_nums = {}
self._attr_info = {}
def __del__(self):
if self._opened:
self.close()
def __delitem__(self, name):
self[name]._delete()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __getitem__(self, name):
try:
return Var(self, name)
except CDFException as e:
raise KeyError('{0}: {1}'.format(name, e))
def __setitem__(self, name, data):
if isinstance(data, Var):
self.clone(data, name)
elif (name in self):
self[name][...] = data
if hasattr(data, 'attrs'):
self[name].attrs.clone(data.attrs)
else:
self.new(name, data)
def __iter__(self, current=0):
while (current < self.__len__()):
name = self[current].name()
value = (yield name)
if (value is None):
current += 1
else:
current = self[value]._num()
current += 1
def __len__(self):
count = ctypes.c_long(0)
self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count))
return count.value
def __contains__(self, key):
if isinstance(key, str):
key = key.encode('ascii')
key = key.rstrip()
if (key in self._var_nums):
return True
status = self._call(const.CONFIRM_, const.zVAR_EXISTENCE_, key, ignore=(const.NO_SUCH_VAR,))
return (status != const.NO_SUCH_VAR)
def __repr__(self):
return (('<CDF:\n' + str(self)) + '\n>')
def __str__(self):
if self._opened:
return '\n'.join([((key + ': ') + str(value)) for (key, value) in sorted(self.items())])
elif isinstance(self.pathname, str):
return 'Closed CDF {0}'.format(self.pathname)
else:
return 'Closed CDF {0}'.format(self.pathname.decode('ascii'))
def _open(self, readonly=True):
lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle))
self._opened = True
if readonly:
self.readonly(readonly)
else:
self._check_enc()
def _create(self):
lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0), (ctypes.c_long * 1)(0), ctypes.byref(self._handle))
self._opened = True
def _from_master(self, master_path):
if os.path.exists(self.pathname):
raise CDFError(const.CDF_EXISTS)
shutil.copy2(master_path, self.pathname)
self._open(False)
def _check_enc(self):
if (self.encoding not in ('ascii', 'utf-8')):
warnings.warn('Opening CDF for write with nonstandard encoding {}'.format(self.encoding))
def from_data(cls, filename, sd):
with cls(filename, '') as cdffile:
for k in sd:
cdffile[k] = sd[k]
cdffile.attrs.clone(sd.attrs)
def _call(self, *args, **kwargs):
return lib.call(const.SELECT_, const.CDF_, self._handle, *args, **kwargs)
def clone(self, zVar, name=None, data=True):
if (name is None):
name = zVar.name()
if (name in self):
del self[name]
self.new(name, type=zVar.type(), recVary=zVar.rv(), dimVarys=zVar.dv(), dims=zVar._dim_sizes(), n_elements=zVar.nelems())
self[name].compress(*zVar.compress())
self[name].attrs.clone(zVar.attrs)
if data:
r = zVar._raw
zVar._raw = True
self.raw_var(name)[...] = zVar[...]
zVar._raw = r
return zVar
def col_major(self, new_col=None):
if (new_col != None):
new_maj = (const.COLUMN_MAJOR if new_col else const.ROW_MAJOR)
self._call(const.PUT_, const.CDF_MAJORITY_, new_maj)
maj = ctypes.c_long(0)
self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj))
if (not (maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value))):
raise CDFError(const.BAD_MAJORITY)
return (maj.value == const.COLUMN_MAJOR.value)
def readonly(self, ro=None):
if (ro == True):
self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYon)
elif (ro == False):
self._call(const.SELECT_, const.CDF_READONLY_MODE_, const.READONLYoff)
self._check_enc()
mode = ctypes.c_long(0)
self._call(const.CONFIRM_, const.CDF_READONLY_MODE_, ctypes.byref(mode))
if (mode.value == const.READONLYon.value):
return True
elif (mode.value == const.READONLYoff.value):
return False
else:
raise CDFError(const.BAD_READONLY_MODE.value)
def checksum(self, new_val=None):
if (new_val != None):
self._call(const.PUT_, const.CDF_CHECKSUM_, (const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM))
chk = ctypes.c_long(0)
self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk))
if (not (chk.value in (const.MD5_CHECKSUM.value, const.NO_CHECKSUM.value))):
raise CDFError(const.BAD_CHECKSUM)
return (chk.value == const.MD5_CHECKSUM.value)
def close(self):
self._call(const.CLOSE_, const.CDF_)
self._opened = False
def compress(self, comptype=None, param=None):
return _compress(self, comptype, param)
def new(self, name, data=None, type=None, recVary=None, dimVarys=None, dims=None, n_elements=None, compress=None, compress_param=None, sparse=None, pad=None):
if hasattr(data, 'compress'):
try:
(c, cp) = data.compress()
except:
pass
else:
if (compress is None):
compress = c
if (compress_param is None):
compress_param = cp
if (hasattr(data, 'sparse') and (sparse is None)):
sparse = data.sparse()
if (hasattr(data, 'pad') and (pad is None)):
pad = data.pad()
if (recVary is None):
recVary = (data.rv() if hasattr(data, 'rv') else True)
if ((dimVarys is None) and hasattr(data, 'dv') and hasattr(data, 'shape')):
dv = data.dv()
if ((len(dv) + int(recVary)) == len(data.shape)):
dimVarys = dv
if ((type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000)) and self.backward):
raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 in backward-compatible CDF')
if (data is None):
if (type is None):
raise ValueError('Must provide either data or a CDF type.')
if (dims is None):
dims = []
if (n_elements is None):
n_elements = 1
else:
(guess_dims, guess_types, guess_elements) = _Hyperslice.types(data, encoding=self.encoding)
if (dims is None):
if recVary:
if (guess_dims == ()):
raise ValueError('Record-varying data cannot be scalar. Specify NRV with CDF.new() or put data in array.')
dims = guess_dims[1:]
else:
dims = guess_dims
if (type is None):
type = guess_types[0]
if ((type in (const.CDF_EPOCH16.value, const.CDF_TIME_TT2000.value)) and self.backward):
type = const.CDF_EPOCH
if (n_elements is None):
n_elements = guess_elements
if (dimVarys is None):
dimVarys = [True for i in dims]
recVary = (const.VARY if recVary else const.NOVARY)
dimVarys = [(const.VARY if dimVary else const.NOVARY) for dimVary in dimVarys]
if (not hasattr(type, 'value')):
type = ctypes.c_long(type)
if ((type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value, const.CDF_TIME_TT2000.value)) and self.backward):
raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; incompatible with backward-compatible CDF')
new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys)
if (compress is not None):
new_var.compress(compress, compress_param)
if (sparse is not None):
new_var.sparse(sparse)
if (pad is not None):
new_var.pad(pad)
if (data is not None):
new_var[...] = data
if hasattr(data, 'attrs'):
new_var.attrs.clone(data.attrs)
return new_var
def raw_var(self, name):
v = self[name]
v._raw = True
return v
def save(self):
self._call(const.SAVE_, const.CDF_)
def copy(self):
return CDFCopy(self)
def version(self):
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver), const.GET_, const.CDF_RELEASE_, ctypes.byref(rel), const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc))
return (ver.value, rel.value, inc.value)
def _get_attrs(self):
al = self._attrlistref()
if (al is None):
al = gAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
self.attrs.clone(value)
attrs = property(_get_attrs, _set_attrs, None, 'Global attributes for this CDF in a dict-like format.\n See `gAttrList` for details.\n ')
def var_num(self, varname):
num = self._var_nums.get(varname, None)
if (num is None):
varNum = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMBER_, varname, ctypes.byref(varNum))
num = varNum.value
self._var_nums[varname] = num
return num
def attr_num(self, attrname):
res = self._attr_info.get(attrname, None)
if (res is None):
attrNum = ctypes.c_long(0)
self._call(const.GET_, const.ATTR_NUMBER_, attrname, ctypes.byref(attrNum))
scope = ctypes.c_long(0)
self._call(const.SELECT_, const.ATTR_, attrNum, const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
if (scope.value == const.GLOBAL_SCOPE.value):
scope = True
elif (scope.value == const.VARIABLE_SCOPE.value):
scope = False
else:
raise CDFError(const.BAD_SCOPE)
res = (attrNum.value, scope)
self._attr_info[attrname] = res
return res
def clear_attr_from_cache(self, attrname):
(num, scope) = self.attr_num(attrname)
for (a, n) in list(self._attr_info.items()):
if (n[0] >= num):
del self._attr_info[a]
def clear_from_cache(self, varname):
num = self.var_num(varname)
for (v, n) in list(self._var_nums.items()):
if (n >= num):
del self._var_nums[v]
def add_attr_to_cache(self, attrname, num, scope):
self._attr_info[attrname] = (num, scope)
def add_to_cache(self, varname, num):
self._var_nums[varname] = num |
def argparser():
parser = argparse.ArgumentParser(description='PyTorch Handwriting Synthesis Model')
parser.add_argument('--hidden_size', type=int, default=400)
parser.add_argument('--n_layers', type=int, default=3)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--step_size', type=int, default=100)
parser.add_argument('--n_epochs', type=int, default=100)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--patience', type=int, default=15)
parser.add_argument('--model_type', type=str, default='prediction')
parser.add_argument('--data_path', type=str, default='./data/')
parser.add_argument('--save_path', type=str, default='./logs/')
parser.add_argument('--text_req', action='store_true')
parser.add_argument('--data_aug', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--seed', type=int, default=212, help='random seed')
args = parser.parse_args()
return args |
class BiFPN(Backbone):
def __init__(self, bottom_up, in_features, out_channels, num_top_levels, num_repeats, norm=''):
super(BiFPN, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = BackboneWithTopLevels(bottom_up, out_channels, num_top_levels, norm)
bottom_up_output_shapes = self.bottom_up.output_shape()
in_features = sorted(in_features, key=(lambda x: split_name(x)[1]))
self._size_divisibility = bottom_up_output_shapes[in_features[(- 1)]].stride
self.out_channels = out_channels
self.min_level = split_name(in_features[0])[1]
(prefix, last_suffix) = split_name(in_features[(- 1)])
for i in range(num_top_levels):
in_features.append((prefix + str(((last_suffix + i) + 1))))
self.in_features = in_features
self._out_features = ['p{}'.format(split_name(name)[1]) for name in in_features]
self._out_feature_strides = {out_name: bottom_up_output_shapes[in_name].stride for (out_name, in_name) in zip(self._out_features, in_features)}
self._out_feature_channels = {k: out_channels for k in self._out_features}
self.repeated_bifpn = nn.ModuleList()
for i in range(num_repeats):
if (i == 0):
in_channels_list = [bottom_up_output_shapes[name].channels for name in in_features]
else:
in_channels_list = [self._out_feature_channels[name] for name in self._out_features]
self.repeated_bifpn.append(SingleBiFPN(in_channels_list, out_channels, norm))
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
bottom_up_features = self.bottom_up(x)
feats = [bottom_up_features[f] for f in self.in_features]
for bifpn in self.repeated_bifpn:
feats = bifpn(feats)
return dict(zip(self._out_features, feats)) |
def remove_ignore_keys_(state_dict):
ignore_keys = ['decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'decoder.embed_positions._float_tensor']
for k in ignore_keys:
state_dict.pop(k, None) |
class BetaPrime(ReferenceDistribution):
def __init__(self, *, a, b):
super().__init__(a=a, b=b)
def _support(self, **kwargs):
return (mp.zero, mp.inf)
def _logpdf(self, x, a, b):
return ((((a - mp.one) * mp.log(x)) - ((a + b) * mp.log1p(x))) - mp.log(mp.beta(a, b)))
def _pdf(self, x, a, b):
return mp.exp(self._logpdf(x=x, a=a, b=b))
def _sf(self, x, a, b):
return (1.0 - mp.betainc(a, b, 0, (x / (1 + x)), regularized=True)) |
def GenCircle(tspec, *args):
if (tspec == PUNGraph):
return GenCircle_PUNGraph(*args)
if (tspec == PUndirNet):
return GenCircle_PUndirNet(*args)
if (tspec == PDirNet):
return GenCircle_PDirNet(*args)
if (tspec == PNGraph):
return GenCircle_PNGraph(*args)
if (tspec == PNEANet):
return GenCircle_PNEANet(*args)
if (tspec == PNGraphMP):
return GenCircle_PNGraphMP(*args)
if (tspec == PNEANetMP):
return GenCircle_PNEANetMP(*args)
raise TypeError('First argument has invalid type') |
def download_pretrained_model(model_name, *args, **kwargs):
import omegaconf
from mmf.utils.configuration import get_mmf_env, load_yaml
from omegaconf import OmegaConf
model_zoo = load_yaml(get_mmf_env(key='model_zoo'))
OmegaConf.set_struct(model_zoo, True)
OmegaConf.set_readonly(model_zoo, True)
data_dir = get_absolute_path(get_mmf_env('data_dir'))
model_data_dir = os.path.join(data_dir, 'models')
download_path = os.path.join(model_data_dir, model_name)
try:
model_config = OmegaConf.select(model_zoo, model_name)
except omegaconf.errors.OmegaConfBaseException as e:
print(f'No such model name {model_name} defined in mmf zoo')
raise e
if (('version' not in model_config) or ('resources' not in model_config)):
try:
model_config = model_config.defaults
download_path = os.path.join(model_data_dir, (model_name + '.defaults'))
except omegaconf.errors.OmegaConfBaseException as e:
print(f"Model name {model_name} doesn't specify 'resources' and 'version' while no defaults have been provided")
raise e
if ('zoo_requirements' in model_config):
requirements = model_config.zoo_requirements
if isinstance(requirements, str):
requirements = [requirements]
for item in requirements:
download_pretrained_model(item, *args, **kwargs)
version = model_config.version
resources = model_config.resources
download_resources(resources, download_path, version)
return download_path |
def _get_edge_loc_dp(x: List[float], min_feature: float=0) -> np.ndarray:
func = (lambda a, b: ((a - b) ** 2))
max_val = len(x)
divisions = 5
max_k = ((divisions * len(x)) + 1)
x = np.array(x)
zero_value = np.cumsum(func(x, 0))
one_value = np.cumsum(func(x, 1))
zero_value = ([0] + zero_value.tolist())
one_value = ([0] + one_value.tolist())
dp = ([max_val] * max_k)
d = int((min_feature * divisions))
struct_dp = [[] for i in range(d)]
dp[0] = 0
for k in range(d, max_k):
k_int = (k // divisions)
k_frac = ((k % divisions) / divisions)
if (k_int < len(x)):
val = (zero_value[k_int] + (func(x[k_int], (1 - k_frac)) * k_frac))
else:
val = max_val
new_edges = [0, (k / divisions)]
best_i_ind = 0
j_value = max_val
best_j_ind = (- 1)
for i in reversed(range(((k - (2 * d)) + 1))):
i_int = (i // divisions)
i_frac = ((i % divisions) / divisions)
base_value = (((func(x[i_int], (1 - i_frac)) * (1 - i_frac)) - one_value[(i_int + 1)]) + zero_value[k_int])
if (k_int < len(x)):
base_value += (func(x[k_int], (1 - k_frac)) * k_frac)
j_int = ((i + d) // divisions)
j_frac = (((i + d) % divisions) / divisions)
new_j_value = ((one_value[j_int] + func(x[j_int], j_frac)) - zero_value[(j_int + 1)])
if (new_j_value < j_value):
j_value = new_j_value
best_j_ind = (i + d)
new_value = (j_value + base_value)
if ((dp[i] + new_value) < val):
val = (dp[i] + new_value)
new_edges = [(best_j_ind / divisions), (k / divisions)]
best_i_ind = i
dp[k] = val
struct_dp.append((struct_dp[best_i_ind] + new_edges))
for k in range(d, (max_k - 1)):
k_int = (k // divisions)
k_frac = ((k % divisions) / divisions)
dp[k] += ((one_value[(- 1)] - one_value[(k_int + 1)]) + (func(x[k_int], (1 - k_frac)) * (1 - k_frac)))
struct_best_ind = (np.argmin(dp[d:]) + d)
edge_loc = struct_dp[struct_best_ind]
return edge_loc |
class LSTM(object):
def __init__(self, config, inputs, labels, lengths, infer=False):
self._inputs = inputs
self._labels = labels
self._lengths = lengths
self._model_type = config.model_type
if infer:
config.batch_size = 1
outputs = self._inputs
with tf.variable_scope('forward1'):
outputs = tf.reshape(outputs, [(- 1), config.input_size])
outputs = tf.layers.dense(outputs, units=config.rnn_size, activation=tf.nn.tanh, reuse=tf.get_variable_scope().reuse)
outputs = tf.reshape(outputs, [config.batch_size, (- 1), config.rnn_size])
if (config.model_type.lower() == 'blstm'):
with tf.variable_scope('blstm'):
cell = tf.contrib.rnn.BasicLSTMCell(config.rnn_size)
if ((not infer) and (config.keep_prob < 1.0)):
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=config.keep_prob)
lstm_fw_cell = tf.contrib.rnn.MultiRNNCell(([cell] * config.rnn_num_layers))
lstm_bw_cell = tf.contrib.rnn.MultiRNNCell(([cell] * config.rnn_num_layers))
lstm_fw_cell = _unpack_cell(lstm_fw_cell)
lstm_bw_cell = _unpack_cell(lstm_bw_cell)
result = rnn.stack_bidirectional_dynamic_rnn(cells_fw=lstm_fw_cell, cells_bw=lstm_bw_cell, inputs=outputs, dtype=tf.float32, sequence_length=self._lengths)
(outputs, fw_final_states, bw_final_states) = result
if (config.model_type.lower() == 'lstm'):
with tf.variable_scope('lstm'):
def lstm_cell():
return tf.contrib.rnn.LSTMCell(config.rnn_size, forget_bias=1.0, use_peepholes=True, initializer=tf.contrib.layers.xavier_initializer(), state_is_tuple=True, activation=tf.tanh)
attn_cell = lstm_cell
if ((not infer) and (config.keep_prob < 1.0)):
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(config.rnn_num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(config.batch_size, tf.float32)
state = self.initial_state
(outputs, state) = tf.nn.dynamic_rnn(cell, outputs, dtype=tf.float32, sequence_length=self._lengths, initial_state=self.initial_state)
self._final_state = state
with tf.variable_scope('forward2'):
if (config.embedding_option == 0):
if (self._model_type.lower() == 'blstm'):
outputs = tf.reshape(outputs, [(- 1), (2 * config.rnn_size)])
in_size = (2 * config.rnn_size)
else:
outputs = tf.reshape(outputs, [(- 1), config.rnn_size])
in_size = config.rnn_size
else:
if (self._model_type.lower() == 'blstm'):
outputs = tf.reshape(outputs, [config.batch_size, (- 1), (2 * config.rnn_size)])
in_size = (2 * config.rnn_size)
else:
outputs = tf.reshape(outputs, [config.batch_size, (- 1), config.rnn_size])
in_size = config.rnn_size
if (config.embedding_option == 1):
ind = tf.subtract(self._lengths, tf.constant(1))
batch_range = tf.range(config.batch_size)
indices = tf.stack([batch_range, ind], axis=1)
outputs = tf.gather_nd(outputs, indices)
self._labels = tf.reduce_mean(self._labels, 1)
elif (config.embedding_option == 2):
outputs = tf.reduce_mean(outputs, 1)
self._labels = tf.reduce_mean(self._labels, 1)
out_size = config.output_size
weights1 = tf.get_variable('weights1', [in_size, out_size], initializer=tf.random_normal_initializer(stddev=0.01))
biases1 = tf.get_variable('biases1', [out_size], initializer=tf.constant_initializer(0.0))
outputs = (tf.matmul(outputs, weights1) + biases1)
if (config.embedding_option == 0):
outputs = tf.reshape(outputs, [config.batch_size, (- 1), out_size])
self._outputs = tf.nn.sigmoid(outputs)
self.saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=30)
if infer:
return
self._loss = tf.losses.sigmoid_cross_entropy(self._labels, outputs)
if tf.get_variable_scope().reuse:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
(grads, _) = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), config.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
self._new_lr = tf.placeholder(tf.float32, shape=[], name='new_learning_rate')
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
def inputs(self):
return (self._inputs_spk1, self._inputs_spk2)
def labels(self):
return self._labels
def initial_state(self):
return self._initial_state
def final_state(self):
return self._final_state
def lr(self):
return self._lr
def loss(self):
return self._loss
def train_op(self):
return self._train_op
def outputs(self):
return self._outputs
def _weight_and_bias(in_size, out_size):
weights = tf.get_variable('weights', [in_size, out_size], initializer=tf.random_normal_initializer(stddev=0.01))
biases = tf.get_variable('biases', [out_size], initializer=tf.constant_initializer(0.0))
return (weights, biases) |
def group_df_by_time(tdf, freq_str='1D', offset_value=0, offset_unit='hours', add_starting_location=False, dtformat='%Y-%m-%d %H:%M:%S'):
df = tdf.sort_values([constants.DATETIME])
offset = pd.Timedelta(offset_value, offset_unit)
t_init = pd.to_datetime(df[constants.DATETIME].min().date())
t_end = (pd.to_datetime(df[constants.DATETIME].max().date()) + pd.Timedelta(days=1))
rg = pd.date_range((t_init + offset).strftime(dtformat), end=(t_end + offset).strftime(dtformat), freq=freq_str)
groups = []
for (st, en) in list(zip(rg, rg[1:])):
dfslice = df.loc[((df[constants.DATETIME] >= st.strftime(dtformat)) & (df[constants.DATETIME] < en.strftime(dtformat)))]
if (len(dfslice) > 0):
groups += [dfslice.reset_index(drop=True)]
if add_starting_location:
for i in range(1, len(groups)):
groups[i] = pd.concat([groups[(i - 1)][(- 1):], groups[i]]).reset_index(drop=True)
return groups |
class Encoder(nn.Module):
def __init__(self, input_resolutions: List[List[int]], latent_size: int, activation: str):
super(Encoder, self).__init__()
self._input_resolutions = input_resolutions
self._latent_size = latent_size
self._activation = activation
def build(self):
self._rgb_pre = nn.Sequential(Conv2DBlock(3, 16, 5, 1, activation=self._activation))
self._pcd_pre = nn.Sequential(Conv2DBlock(3, 16, 5, 1, activation=self._activation))
self._enc_convs = nn.Sequential(Conv2DBlock((16 * 2), 32, 5, 2, activation=self._activation), Conv2DBlock(32, 32, 3, 2, activation=self._activation), Conv2DBlock(32, 32, 3, 2, activation=self._activation), Conv2DBlock(32, 32, 3, 1, activation=self._activation))
self._enc_dense = DenseBlock(((32 * 16) * 16), self._latent_size, norm='layer')
def forward(self, observations, detach_convs=False):
(x_rgb, x_pcd) = (self._rgb_pre(observations[0]), self._pcd_pre(observations[1]))
x = torch.cat([x_rgb, x_pcd], dim=1)
(b, _, h, w) = x.shape
x = self._enc_convs(x)
if detach_convs:
x = x.detach()
x = self._enc_dense(x.view(b, (- 1)))
return x
def _tie_weights(self, src, trg):
assert (type(src) == type(trg))
trg.conv2d.weight = src.conv2d.weight
trg.conv2d.bias = src.conv2d.bias
def copy_conv_weights_from(self, source):
for i in range(4):
self._tie_weights(src=source._enc_convs[i], trg=self._enc_convs[i])
self._tie_weights(src=source._rgb_pre[0], trg=self._rgb_pre[0])
self._tie_weights(src=source._pcd_pre[0], trg=self._pcd_pre[0]) |
class ErnieMLMCriterion(paddle.nn.Layer):
def __init__(self):
super(ErnieMLMCriterion, self).__init__()
def forward(self, prediction_scores, masked_lm_labels, masked_lm_scale=1.0, weights=None):
masked_lm_labels = paddle.reshape(masked_lm_labels, shape=[(- 1), 1])
with paddle.static.amp.fp16_guard():
if (weights is not None):
masked_lm_loss = F.cross_entropy(input=prediction_scores, label=masked_lm_labels, reduction='none')
masked_lm_labels *= weights
masked_lm_loss = masked_lm_loss.mean()
else:
masked_lm_loss = F.cross_entropy(input=prediction_scores, label=masked_lm_labels)
masked_lm_loss = (masked_lm_loss / masked_lm_scale)
return masked_lm_loss |
class TestDSWrapperAndDSModier():
def test_initialization(self):
output_path = os.path.join(base_ds, (ds_name + '#{}'.format(modifier_name)))
output_images_path = os.path.join(output_path, 'images')
ds_wrapper = DSWrapper(data_path=data_path)
assert (ds_wrapper.data_path == data_path), 'root datset should be equal'
assert (ds_wrapper.data_input == input_path), 'DSWrapper should find data input path'
ds_modifer = DSModifier()
mod_ds_wrapper = ds_wrapper.modify(ds_modifer)
assert (mod_ds_wrapper.data_path == output_path), 'root datset should be equal'
assert os.path.exists(mod_ds_wrapper.data_path)
assert (mod_ds_wrapper.data_input == output_images_path), 'DSWrapper should find data input path'
shutil.rmtree(mod_ds_wrapper.data_path)
def test_log_parameters(self):
ds_wrapper = DSWrapper(data_path=data_path)
ds_modifer = DSModifier()
mod_ds_wrapper = ds_wrapper.modify(ds_modifer)
expected_extended_log_params = {'ds_name': (ds_name + '#base_modifier'), 'modifier': 'base_modifier'}
assert (mod_ds_wrapper.log_parameters() == expected_extended_log_params), 'Extended log parameters should be the same'
shutil.rmtree(mod_ds_wrapper.data_path) |
def _stft(y):
if hp.use_lws:
return _lws_processor(hp).stft(y).T
else:
return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size) |
def generate_graph_args_builder(graph: sr.Graph) -> List[str]:
out = []
out += [f'struct ComputeGraph_{graph.name} : public ti::ComputeGraph {{', f' explicit ComputeGraph_{graph.name}(TiRuntime runtime, TiComputeGraph graph) :', ' ti::ComputeGraph(runtime, graph) {', f' args_.resize({len(graph.args)});', ' }', '']
cls_name = f'ComputeGraph_{graph.name}'
for (i, arg) in enumerate(graph.args):
arg_name = arg.name
if isinstance(arg.arg, sr.ArgumentScalar):
out += generate_scalar_assign(cls_name, i, arg_name, arg.arg, True)
elif isinstance(arg.arg, sr.ArgumentNdArray):
out += generate_ndarray_assign(cls_name, i, arg_name, arg.arg, True)
elif isinstance(arg.arg, (sr.ArgumentTexture, sr.ArgumentRwTexture)):
out += generate_texture_assign(cls_name, i, arg_name, arg.arg, True)
else:
assert False
out += ['']
out += ['};', '']
return out |
class StmtBuilder(Builder):
augassign_map = {ast.Add: '+', ast.Sub: '-', ast.Mult: '*', ast.Div: '/', ast.Mod: '%'}
def build_Expr(ctx, stmt):
value = stmt.value
if (value.__class__.__name__ == 'Str'):
return None
else:
return ExprStmt(build_expr(ctx, value))
def build_Assign(ctx, stmt):
rhs = build_expr(ctx, stmt.value)
lhs = list(map((lambda x: build_expr(ctx, x)), stmt.targets))
return Assign(lhs, rhs)
def build_AnnAssign(ctx, stmt):
if (stmt.value is None):
raise UnsupportedNodeError(ctx, stmt, reason='without assigned value')
rhs = build_expr(ctx, stmt.value)
lhs = build_expr(ctx, stmt.target)
the_type = build_expr(ctx, stmt.annotation)
return Assign([lhs], rhs, the_type)
def build_Delete(ctx, stmt):
if (len(stmt.targets) > 1):
source_range = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('del')))
raise NotSupportedError(source_range, 'del with more than one operand is not supported')
return Delete(build_expr(ctx, stmt.targets[0]))
def build_Return(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('return')))
return Return(r, (None if (stmt.value is None) else build_expr(ctx, stmt.value)))
def build_Raise(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('raise')))
expr = build_expr(ctx, stmt.exc)
return Raise(r, expr)
def build_Assert(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('assert')))
test = build_expr(ctx, stmt.test)
msg = (build_expr(ctx, stmt.msg) if (stmt.msg is not None) else None)
return Assert(r, test, msg)
def build_AugAssign(ctx, stmt):
lhs = build_expr(ctx, stmt.target)
rhs = build_expr(ctx, stmt.value)
op = type(stmt.op)
if (op in StmtBuilder.augassign_map):
op_token = StmtBuilder.augassign_map[op]
else:
raise NotSupportedError(find_before(ctx, rhs.range().start, '=', offsets=((- 1), 0)), ('unsupported kind of augumented assignment: ' + op.__name__))
return AugAssign(lhs, op_token, rhs)
def build_While(ctx, stmt):
if stmt.orelse:
raise NotSupportedError(None, "else branches of while loops aren't supported")
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('while')))
return While(r, build_expr(ctx, stmt.test), build_stmts(ctx, stmt.body))
def build_For(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('for')))
return For(r, [build_expr(ctx, stmt.target)], [build_expr(ctx, stmt.iter)], build_stmts(ctx, stmt.body))
def build_If(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('if')))
return If(r, build_expr(ctx, stmt.test), build_stmts(ctx, stmt.body), build_stmts(ctx, stmt.orelse))
def build_Print(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('print')))
if stmt.dest:
raise NotSupportedError(r, "print statements with non-default destinations aren't supported")
args = [build_expr(ctx, val) for val in stmt.values]
return ExprStmt(Apply(Var(Ident(r, 'print')), args, []))
def build_Pass(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('pass')))
return Pass(r)
def build_Break(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('break')))
return Break(r)
def build_Continue(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('continue')))
return Continue(r)
def build_With(ctx, stmt):
r = ctx.make_range(stmt.lineno, stmt.col_offset, (stmt.col_offset + len('with')))
return With(r, build_withitems(ctx, stmt.items), build_stmts(ctx, stmt.body)) |
class PositionalEncoding(torch.nn.Module):
def __init__(self, num_freqs=6, d_in=3, freq_factor=np.pi, include_input=True):
super().__init__()
self.num_freqs = num_freqs
self.d_in = d_in
self.freqs = (freq_factor * (2.0 ** torch.arange(0, num_freqs)))
self.d_out = ((self.num_freqs * 2) * d_in)
self.include_input = include_input
if include_input:
self.d_out += d_in
self.register_buffer('_freqs', torch.repeat_interleave(self.freqs, 2).view(1, (- 1), 1))
_phases = torch.zeros((2 * self.num_freqs))
_phases[1::2] = (np.pi * 0.5)
self.register_buffer('_phases', _phases.view(1, (- 1), 1))
def forward(self, x):
with profiler.record_function('positional_enc'):
embed = x.unsqueeze(1).repeat(1, (self.num_freqs * 2), 1)
embed = torch.sin(torch.addcmul(self._phases, embed, self._freqs))
embed = embed.view(x.shape[0], (- 1))
if self.include_input:
embed = torch.cat((x, embed), dim=(- 1))
return embed
def from_conf(cls, conf, d_in=3):
return cls(conf.get_int('num_freqs', 6), d_in, conf.get_float('freq_factor', np.pi), conf.get_bool('include_input', True)) |
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(SAGE, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range((num_layers - 2)):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adj_t):
for (i, conv) in enumerate(self.convs[:(- 1)]):
x = conv(x, adj_t)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, adj_t)
return x.log_softmax(dim=(- 1)) |
def simulate_from_network_attr(arclist_filename, param_func_list, labels, theta, binattr_filename=None, contattr_filename=None, catattr_filename=None, sampler_func=basicALAAMsampler, numSamples=100, iterationInStep=None, burnIn=None):
assert (len(param_func_list) == len(labels))
G = Graph(arclist_filename, binattr_filename, contattr_filename, catattr_filename)
sys.stdout.write((' '.join((((['t'] + [('theta_' + z) for z in labels]) + labels) + ['acceptance_rate'])) + '\n'))
for (simvec, stats, acceptance_rate, t) in simulateALAAM(G, param_func_list, theta, numSamples, iterationInStep, burnIn, sampler_func=sampler_func):
sys.stdout.write((' '.join(((([str(t)] + [str(th) for th in list(theta)]) + [str(x) for x in list(stats)]) + [str(acceptance_rate)])) + '\n')) |
def test_the_cat_api_evaluator():
label = "curl -X GET '
context_dir = f'data/the_cat_api/v0'
generator = RagGenerator(client_name='openai', model_name='text-curie-001', context_dir=context_dir, max_output_token=256, top_k_api=3, top_k_example=3, query_template='Task: {query} (Answer in code only)\nActions:\n')
e = evaluator.TheCatAPIEvaluator(generator)
results = e('List all my favorite cats', [label])
print(results) |
def evaluate(in_channels, out_channels, kernel_size, data_shape: tuple, input_to_constant: bool, execute_cpu_dace: bool=False, queue=None):
ptmodel = Model(in_channels, out_channels, kernel_size, input_to_constant)
x = torch.rand(data_shape)
torch_output = ptmodel(x)
dace_model = DaceModule(ptmodel, dummy_inputs=(x,), auto_optimize=False)
if execute_cpu_dace:
dace_output = dace_model(x)
def TransformToFPGA(dace_module):
sdfg = dace_module.sdfg
sdfg.apply_transformations([FPGATransformSDFG, InlineSDFG])
if input_to_constant:
sdfg.apply_transformations_repeated([InputToConstant], print_report=True)
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG])
dace_model.reset_sdfg()
dace_model.append_post_onnx_hook('TransformToFPGA', TransformToFPGA)
import daceml.onnx as donnx
with dace.library.change_default(donnx.ONNXConv, 'naive_fpga'):
dace_output_fpga = dace_model(torch.clone(x))
dace_output_fpga = dace_output_fpga.detach().numpy().reshape(torch_output.shape)
diff = (np.linalg.norm((torch_output.detach().numpy() - dace_output_fpga)) / np.linalg.norm(torch_output.detach().numpy()))
print('Difference: ', diff)
if (queue is not None):
queue.put(diff)
else:
assert (diff < 1e-06)
del dace_model, ptmodel, x |
class OpTreeValue(OpTreeLeafBase):
def __init__(self, value: float) -> None:
self.value = value
def __str__(self) -> str:
return str(self.value)
def __eq__(self, other) -> bool:
if isinstance(other, OpTreeValue):
return (self.value == other.value)
def copy(self):
return OpTreeValue(self.value) |
_module()
class TransferalPerceptualLoss(nn.Module):
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if (criterion == 'l1'):
self.loss_function = torch.nn.L1Loss()
elif (criterion == 'mse'):
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
if self.use_attention:
(h, w) = soft_attention.shape[(- 2):]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(F.interpolate(soft_attention, size=((h * pow(2, i)), (w * pow(2, i))), mode='bicubic', align_corners=False))
else:
softs = [1.0, 1.0, 1.0]
loss_texture = 0
for (map, soft, texture) in zip(maps, softs, textures):
loss_texture += self.loss_function((map * soft), (texture * soft))
return (loss_texture * self.loss_weight) |
def random_color_jitter(image, p=1.0, impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(color_jitter, strength=0.2, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image) |
class NonNeg(Constraint):
def __call__(self, w):
w *= K.cast(K.greater_equal(w, 0.0), K.floatx())
return w |
def prepare_env(cfg):
fix_random_seed(cfg.BASIC.SEED)
cudnn.benchmark = cfg.CUDNN.BENCHMARK
cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
cudnn.enabled = cfg.CUDNN.ENABLE
if cfg.BASIC.BACKUP_CODES:
backup_dir = os.path.join(cfg.BASIC.SAVE_DIR, 'backup')
rm(backup_dir)
backup_codes(cfg.BASIC.ROOT_DIR, backup_dir, cfg.BASIC.BACKUP_LIST) |
_datapipe('_dataframes_shuffle', enable_df_api_tracing=True)
class ShuffleDataFramesPipe(DFIterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
if (not WITH_PANDAS):
Exception('DataFrames prototype requires pandas to function')
def __iter__(self):
size = None
all_buffer = []
for df in self.source_datapipe:
if (size is None):
size = len(df.index)
for i in range(len(df.index)):
all_buffer.append(df[i:(i + 1)])
random.shuffle(all_buffer)
buffer = []
for df in all_buffer:
buffer.append(df)
if (len(buffer) == size):
(yield pandas.concat(buffer))
buffer = []
if len(buffer):
(yield pandas.concat(buffer)) |
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.attn_2 = MultiHeadAttention(heads, d_model, dropout=dropout)
self.ff = FeedForward(d_model, dropout=dropout)
def forward(self, x, e_outputs, src_mask, trg_mask, layer_cache=None):
x2 = self.norm_1(x)
x = (x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask, layer_cache=layer_cache, attn_type='self')[0]))
x2 = self.norm_2(x)
(context, attn) = self.attn_2(x2, e_outputs, e_outputs, src_mask, attn_type='context')
x = (x + self.dropout_2(context))
x2 = self.norm_3(x)
x = (x + self.dropout_3(self.ff(x2)))
return (x, attn) |
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1] |
class ClapTextConfig(PretrainedConfig):
model_type = 'clap_text_model'
def __init__(self, vocab_size=50265, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=514, type_vocab_size=1, initializer_range=0.02, initializer_factor=1.0, layer_norm_eps=1e-12, projection_dim=512, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, projection_hidden_act='relu', **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.projection_hidden_act = projection_hidden_act
self.projection_dim = projection_dim
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'clap'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
def GetQuasiSequenceOrder1(ProteinSequence, maxlag=30, weight=0.1, distancematrix={}):
rightpart = 0.0
for i in range(maxlag):
rightpart = (rightpart + GetSequenceOrderCouplingNumber(ProteinSequence, (i + 1), distancematrix))
AAC = GetAAComposition(ProteinSequence)
result = {}
temp = (1 + (weight * rightpart))
for (index, i) in enumerate(AALetter):
result[('QSO' + str((index + 1)))] = round((AAC[i] / temp), 6)
return result |
class ManualConvLinearQATModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack')
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float)
self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float)
self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float)
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = x.view((- 1), 64).contiguous()
x = self.fc1(x)
x = self.fc2(x)
return self.dequant(x) |
def load_result(filename_list):
predict_dict = {}
gt_dict = {}
for i in range(len(filename_list)):
f = open(filename_list[i], 'r')
idx = re.findall('\\d+', filename_list[i])
id_map_dict = load_id_mapping(['../Data_process/Trivia_dataset/trivia_title_cont.tsv'], int(idx[0]))
for line in f.readlines():
(query, predict_id, gt_id, rank) = line[:(- 1)].split('\t')
gt_id = list(gt_id.split(','))
for i in range(len(gt_id)):
gt_id[i] = id_map_dict[str(gt_id[i])]
pred_id = [[], []]
predict_id = list(predict_id.split(','))
for i in range(RETURN_NUM):
pred_id[0].append(id_map_dict[predict_id[i]])
pred_id[1].append(dic[i])
gt_dict[query] = gt_id
if (query not in predict_dict):
predict_dict[query] = []
predict_dict[query].append(pred_id)
else:
predict_dict[query][0][0].extend(pred_id[0])
predict_dict[query][0][1].extend(pred_id[1])
f.close()
return (predict_dict, gt_dict) |
def saturation(A, proof=True, p=0, max_dets=5):
r = A.rank()
if (A.is_square() and (r == A.nrows())):
return identity_matrix(ZZ, r)
if (A.nrows() > r):
P = []
while (len(P) < r):
P = matrix_integer_dense_hnf.probable_pivot_rows(A)
A = A.matrix_from_rows(P)
A = copy(A)
A._factor_out_common_factors_from_each_row()
if (A.nrows() <= 1):
return A
(A, zero_cols) = A._delete_zero_columns()
if (max_dets > 0):
nr = A.nrows()
nc = A.ncols()
d = 0
trials = min(binomial(nc, nr), max_dets)
already_tried = []
while (len(already_tried) < trials):
v = random_sublist_of_size(nc, nr)
tm = verbose('saturation -- checking det condition on submatrix')
d = gcd(d, A.matrix_from_columns(v).determinant(proof=proof))
verbose(('saturation -- got det down to %s' % d), tm)
if (gcd(d, p) == 1):
return A._insert_zero_columns(zero_cols)
already_tried.append(v)
if (gcd(d, p) == 1):
return A._insert_zero_columns(zero_cols)
B = A.transpose().hermite_form(include_zero_rows=False, proof=proof)
B = B.transpose()
C = solve_system_with_difficult_last_row(B, A)
return C.change_ring(ZZ)._insert_zero_columns(zero_cols) |
def aggregate_similarity(similarity_matrix_chunk, aggregation_method='mean'):
if (aggregation_method == 'max'):
return similarity_matrix_chunk.max(dim=1)[0]
elif (aggregation_method == 'sum'):
return similarity_matrix_chunk.sum(dim=1)
elif (aggregation_method == 'mean'):
return similarity_matrix_chunk.mean(dim=1)
else:
raise ValueError('Unknown aggregate_similarity') |
def labeled_comprehension(input, labels, index, func, out_dtype, default, pass_positions=False):
as_scalar = numpy.isscalar(index)
input = numpy.asarray(input)
if pass_positions:
positions = numpy.arange(input.size).reshape(input.shape)
if (labels is None):
if (index is not None):
raise ValueError('index without defined labels')
if (not pass_positions):
return func(input.ravel())
else:
return func(input.ravel(), positions.ravel())
try:
(input, labels) = numpy.broadcast_arrays(input, labels)
except ValueError as e:
raise ValueError('input and labels must have the same shape (excepting dimensions with width 1)') from e
if (index is None):
if (not pass_positions):
return func(input[(labels > 0)])
else:
return func(input[(labels > 0)], positions[(labels > 0)])
index = numpy.atleast_1d(index)
if np.any((index.astype(labels.dtype).astype(index.dtype) != index)):
raise ValueError(f"Cannot convert index values from <{index.dtype}> to <{labels.dtype}> (labels' type) without loss of precision")
index = index.astype(labels.dtype)
lo = index.min()
hi = index.max()
mask = ((labels >= lo) & (labels <= hi))
labels = labels[mask]
input = input[mask]
if pass_positions:
positions = positions[mask]
label_order = labels.argsort()
labels = labels[label_order]
input = input[label_order]
if pass_positions:
positions = positions[label_order]
index_order = index.argsort()
sorted_index = index[index_order]
def do_map(inputs, output):
nidx = sorted_index.size
lo = numpy.searchsorted(labels, sorted_index, side='left')
hi = numpy.searchsorted(labels, sorted_index, side='right')
for (i, l, h) in zip(range(nidx), lo, hi):
if (l == h):
continue
output[i] = func(*[inp[l:h] for inp in inputs])
temp = numpy.empty(index.shape, out_dtype)
temp[:] = default
if (not pass_positions):
do_map([input], temp)
else:
do_map([input, positions], temp)
output = numpy.zeros(index.shape, out_dtype)
output[index_order] = temp
if as_scalar:
output = output[0]
return output |
.operations('failure')
def test_explicit_example_failure_output(testdir, cli, openapi3_base_url, snapshot_cli):
schema = {'openapi': '3.0.0', 'info': {'title': 'Sample API', 'description': 'API description in Markdown.', 'version': '1.0.0'}, 'paths': {'/failure': {'get': {'parameters': [{'in': 'query', 'name': 'key', 'example': 'foo', 'schema': {'type': 'string'}}], 'responses': {'200': {'description': 'OK'}}}}}}
schema_file = testdir.makefile('.yaml', schema=yaml.dump(schema))
assert (cli.run(str(schema_file), f'--base-url={openapi3_base_url}', '--sanitize-output=false') == snapshot_cli) |
def ll_heuristic(d):
d = copy(d)
I = d['I']
if (('llfirstonthefly' not in d) and ('llfirst' not in d)):
hint = ll_is_good(I)
if hint:
d[hint] = True
return d |
def test_write_statistics_no_backend():
config.configuration.statistics_output.statistics_backend = None
statistics = stat._SearchStatistics()
assert (not statistics.write_statistics()) |
def test_get_tasks(collaborator_mock):
results = (['task_name'], 0, 0, True)
collaborator_mock.client.get_tasks = mock.Mock(return_value=results)
(tasks, round_number, sleep_time, time_to_quit) = collaborator_mock.get_tasks()
assert (results == (tasks, round_number, sleep_time, time_to_quit)) |
class WarmMultiStepLR(lr_scheduler.MultiStepLR):
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=(- 1), linear=1, warmup=5):
self.linear = max(linear, 1)
self.warmup = warmup
super().__init__(optimizer, milestones, gamma=gamma, last_epoch=last_epoch)
def get_lr(self):
if ((self.linear == 1) or (self.last_epoch > self.warmup)):
return super().get_lr()
else:
gradual = ((self.linear - 1) / self.warmup)
scale = ((1 + (self.last_epoch * gradual)) / self.linear)
return [(scale * base_lr) for base_lr in self.base_lrs] |
def find_sublist(a, b):
for l in range(len(a)):
if (a[l:(l + len(b))] == b):
return l
return None |
class YouRM(dspy.Retrieve):
def __init__(self, ydc_api_key=None, k=3):
super().__init__(k=k)
if ((not ydc_api_key) and (not os.environ.get('YDC_API_KEY'))):
raise RuntimeError('You must supply ydc_api_key or set environment variable YDC_API_KEY')
elif ydc_api_key:
self.ydc_api_key = ydc_api_key
else:
self.ydc_api_key = os.environ['YDC_API_KEY']
def forward(self, query_or_queries: Union[(str, List[str])]) -> dspy.Prediction:
queries = ([query_or_queries] if isinstance(query_or_queries, str) else query_or_queries)
docs = []
for query in queries:
headers = {'X-API-Key': self.ydc_api_key}
results = requests.get(f' headers=headers).json()
for hit in results['hits'][:self.k]:
for snippet in hit['snippets']:
docs.append(snippet)
return dspy.Prediction(passages=docs) |
class UnmaskedLookup(ContentLookup):
CONTENT = 0
def tolookup(cls, layout, positions):
pos = len(positions)
positions.append(None)
positions[(pos + cls.CONTENT)] = tolookup(layout.content, positions)
return pos
def tolayout(self, lookup, pos, fields):
content = self.contenttype.tolayout(lookup, lookup.positions[(pos + self.CONTENT)], fields)
return ak.contents.UnmaskedArray(content, parameters=self.parameters) |
def make_pca_scorers(caller):
caller.train_scorer = (lambda _, __: caller.estimator.explained_variance_ratio_.sum())
caller.test_scorer = (lambda _, __: explained_variance_ratio(caller.estimator.transform(caller.X_val), caller.X_val)) |
_utils.test(debug=True)
def test_assign_chained_involve_self():
def foo():
a = 1
b = 1
a = b = (a + b)
assert (a == 2)
assert (b == 2)
foo() |
class NPA(nn.Module):
def __init__(self, input_dim=128, hidden_dim=128, attn_dim=256, fc_dim=512, num_layers=1, question_num=QUESTION_NUM[ARGS.dataset_name], dropout=0.0):
super().__init__()
self._hidden_dim = hidden_dim
self._num_layers = num_layers
self._question_num = question_num
self._lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim, num_layers=num_layers, batch_first=True, dropout=dropout, bidirectional=True)
self._response_embedding_layer = nn.Embedding(num_embeddings=(2 + 1), embedding_dim=input_dim, padding_idx=PAD_INDEX)
self._question_embedding_layer = nn.Embedding(num_embeddings=(question_num + 1), embedding_dim=input_dim, padding_idx=PAD_INDEX)
self._attention_lstm = nn.Linear(in_features=(2 * hidden_dim), out_features=attn_dim, bias=False)
self._attention_question = nn.Linear(in_features=input_dim, out_features=attn_dim, bias=False)
self._attention_weight = nn.Linear(in_features=attn_dim, out_features=1, bias=False)
self._fc_layers = FC(((2 * hidden_dim) + input_dim), fc_dim)
self._tanh = nn.Tanh()
self._softmax = nn.Softmax(dim=(- 1))
for p in self.parameters():
if (p.dim() > 1):
nn.init.xavier_uniform_(p)
def init_hidden(self, batch_size):
weight = next(self.parameters())
return (weight.new_zeros((2 * self._num_layers), batch_size, self._hidden_dim), weight.new_zeros((2 * self._num_layers), batch_size, self._hidden_dim))
def _transform_interaction_to_question_id_and_response(self, interaction):
question_id = (interaction - (self._question_num * (interaction > self._question_num).long()))
padding = (question_id == PAD_INDEX)
response = (interaction <= self._question_num).long()
response = (2 - response)
response = (response * (~ padding).long())
return (question_id, response)
def _embedding(self, interaction):
(question_id, response) = self._transform_interaction_to_question_id_and_response(interaction)
question_vector = self._question_embedding_layer(question_id)
response_vector = self._response_embedding_layer(response)
return torch.mul(question_vector, response_vector)
def _attention(self, lstm_output, question_vector):
attention_score = (self._attention_lstm(lstm_output) + self._attention_question(question_vector))
attention_score = self._tanh(attention_score)
attention_score = self._attention_weight(attention_score).squeeze((- 1))
alpha = self._softmax(attention_score).unsqueeze(1)
return torch.matmul(alpha, lstm_output)
def forward(self, input, target_id):
batch_size = input.shape[0]
hidden = self.init_hidden(batch_size)
input = self._embedding(input)
question_vector = self._question_embedding_layer(target_id)
(output, _) = self._lstm(input, (hidden[0].detach(), hidden[1].detach()))
user_vector = self._attention(output, question_vector)
user_question_vector = torch.cat([user_vector, question_vector], dim=(- 1)).squeeze(1)
output = self._fc_layers(user_question_vector)
return output |
class AlignedBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, base_width=64, cardinality=1, stride=1, dilation=1, radix=1, downsample=None, stride_3x3=False, conv='Conv2d', norm='BN', ctx=''):
super(AlignedBottleneck, self).__init__()
D = int(math.floor((planes * (base_width / 64.0))))
C = cardinality
self.radix = radix
if ((radix > 1) and ((stride > 1) or (dilation > 1))):
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
else:
self.avd_layer = None
self.conv1_1 = nn.Conv2d(inplanes, (D * C), kernel_size=1, stride=1, padding=0, bias=False)
self.bn1_1 = make_norm((D * C), norm=norm.replace('Mix', ''))
if (radix > 1):
self.conv1_2 = SplAtConv2d((D * C), (D * C), kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=C, bias=False, radix=radix, conv_op=get_conv_op(conv=conv), norm_op=get_norm_op(norm=norm))
else:
self.conv1_2 = make_conv((D * C), (D * C), kernel_size=3, stride=stride, dilation=dilation, padding=dilation, groups=C, bias=False, conv=conv)
self.conv2_1 = nn.Conv2d(inplanes, ((D * C) // 2), kernel_size=1, stride=1, padding=0, bias=False)
self.bn2_1 = make_norm(((D * C) // 2), norm=norm.replace('Mix', ''))
if (radix > 1):
self.conv2_2 = SplAtConv2d(((D * C) // 2), ((D * C) // 2), kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=math.ceil((C / 2)), bias=False, radix=radix, conv_op=get_conv_op(conv=conv), norm_op=get_norm_op(norm=norm))
self.conv2_3 = SplAtConv2d(((D * C) // 2), ((D * C) // 2), kernel_size=3, stride=1, padding=dilation, dilation=dilation, groups=math.ceil((C / 2)), bias=False, radix=radix, conv_op=get_conv_op(conv=conv), norm_op=get_norm_op(norm=norm))
else:
self.conv2_2 = make_conv(((D * C) // 2), ((D * C) // 2), kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=math.ceil((C / 2)), bias=False, conv=conv)
self.bn2_2 = make_norm(((D * C) // 2), norm=norm, an_k=(10 if (planes < 256) else 20))
self.conv2_3 = make_conv(((D * C) // 2), ((D * C) // 2), kernel_size=3, stride=1, padding=dilation, dilation=dilation, groups=math.ceil((C / 2)), bias=False, conv=conv)
if (radix == 1):
self.bn_concat = make_norm(((D * C) + ((D * C) // 2)), norm=norm, an_k=(10 if (planes < 256) else 20))
self.conv = nn.Conv2d(((D * C) + ((D * C) // 2)), (planes * self.expansion), kernel_size=1, stride=1, padding=0, bias=False)
self.bn = make_norm((planes * self.expansion), norm=norm.replace('Mix', ''))
self.ctx = make_ctx((planes * self.expansion), int(((planes * self.expansion) * 0.0625)), ctx=ctx)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
branch1 = self.conv1_1(x)
branch1 = self.bn1_1(branch1)
branch1 = self.relu(branch1)
branch1 = self.conv1_2(branch1)
branch2 = self.conv2_1(x)
branch2 = self.bn2_1(branch2)
branch2 = self.relu(branch2)
branch2 = self.conv2_2(branch2)
if (self.radix == 1):
branch2 = self.bn2_2(branch2)
branch2 = self.relu(branch2)
branch2 = self.conv2_3(branch2)
out = torch.cat((branch1, branch2), 1)
if (self.radix == 1):
out = self.bn_concat(out)
out = self.relu(out)
if (self.avd_layer is not None):
out = self.avd_layer(out)
out = self.conv(out)
out = self.bn(out)
if (self.ctx is not None):
out = self.ctx(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class BaseDataset(Dataset):
def __init__(self, root_path='', transform=None, target_transform=None, stage='train'):
super(BaseDataset, self).__init__()
self.root_path = root_path
self.transform = transform
self.target_transform = target_transform
self.stage = stage
self._set_files()
def _set_files(self):
self._imgs = []
self._labels = []
if (self.stage == 'infer'):
for (root, fnames, _) in sorted(os.walk(self.root_path)):
for fname in sorted(fnames):
self._imgs.extend(glob.glob(os.path.join(root, fname, '*.jpg')))
else:
self.cls_label = [d.name for d in os.scandir(self.root_path) if d.is_dir()]
for (root, fnames, _) in sorted(os.walk(self.root_path)):
for fname in sorted(fnames):
imgs = glob.glob(os.path.join(root, fname, '*.jpg'))
self._imgs.extend(imgs)
self._labels.extend([self.cls_label.index(fname) for _ in imgs])
def __getitem__(self, idx):
(_img, _label) = (Image.open(self._imgs[idx]).convert('RGB'), self._labels[idx])
if (self.transform is not None):
_img = self.transform(_img)
if (self.target_transform is not None):
_label = self.target_transform(_label)
return (_img, _label)
def __len__(self):
return len(self._imgs)
def __repr__(self):
fmt_str = (('Dataset: ' + self.__class__.__name__) + '\n')
fmt_str += ' # data: {}\n'.format(self.__len__())
fmt_str += ' Stage: {}\n'.format(self.stage)
fmt_str += ' Root_path: {}'.format(self.root_path)
return fmt_str |
def test_illegal_batch_size(foundation_cache):
stanza.Pipeline('en', model_dir=TEST_MODELS_DIR, processors='tokenize,pos', constituency_batch_size='zzz', foundation_cache=foundation_cache)
with pytest.raises(ValueError):
stanza.Pipeline('en', model_dir=TEST_MODELS_DIR, processors='tokenize,pos,constituency', constituency_batch_size='zzz', foundation_cache=foundation_cache) |
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None, predict=None, wrap=False, num_workers=None, shuffle_test_loader=False, use_worker_init_fn=False, shuffle_val_dataloader=False):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = (num_workers if (num_workers is not None) else (batch_size * 2))
self.use_worker_init_fn = use_worker_init_fn
if (train is not None):
self.dataset_configs['train'] = train
self.train_dataloader = self._train_dataloader
if (validation is not None):
self.dataset_configs['validation'] = validation
self.val_dataloader = self._val_dataloader
if (test is not None):
self.dataset_configs['test'] = test
self.test_dataloader = partial(self._test_dataloader, shuffle=shuffle_test_loader)
if (predict is not None):
self.dataset_configs['predict'] = predict
self.predict_dataloader = self._predict_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(((k, instantiate_from_config(self.dataset_configs[k])) for k in self.dataset_configs))
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if (is_iterable_dataset or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['train'], batch_size=self.batch_size, num_workers=self.num_workers, shuffle=(False if is_iterable_dataset else True), worker_init_fn=init_fn, drop_last=True)
def _val_dataloader(self):
if (isinstance(self.datasets['validation'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['validation'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=False, drop_last=True)
def _test_dataloader(self, shuffle=False):
is_iterable_dataset = isinstance(self.datasets['train'], Txt2ImgIterableBaseDataset)
if (is_iterable_dataset or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
shuffle = (shuffle and (not is_iterable_dataset))
return DataLoader(self.datasets['test'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn, shuffle=shuffle)
def _predict_dataloader(self, shuffle=False):
if (isinstance(self.datasets['predict'], Txt2ImgIterableBaseDataset) or self.use_worker_init_fn):
init_fn = worker_init_fn
else:
init_fn = None
return DataLoader(self.datasets['predict'], batch_size=self.batch_size, num_workers=self.num_workers, worker_init_fn=init_fn) |
def skeleton_discovery(data, alpha, indep_test, stable=True, background_knowledge=None, verbose=False, show_progress=True):
assert (type(data) == np.ndarray)
assert (0 < alpha < 1)
no_of_var = data.shape[1]
cg = CausalGraph(no_of_var)
cg.set_ind_test(indep_test)
cg.data_hash_key = hash(str(data))
if ((indep_test == chisq) or (indep_test == gsq)):
def _unique(column):
return np.unique(column, return_inverse=True)[1]
cg.is_discrete = True
cg.data = np.apply_along_axis(_unique, 0, data).astype(np.int64)
cg.cardinalities = (np.max(cg.data, axis=0) + 1)
else:
cg.data = data
depth = (- 1)
pbar = (tqdm(total=no_of_var) if show_progress else None)
while ((cg.max_degree() - 1) > depth):
depth += 1
edge_removal = []
if show_progress:
pbar.reset()
for x in range(no_of_var):
if show_progress:
pbar.update()
if show_progress:
pbar.set_description(f'Depth={depth}, working on node {x}')
Neigh_x = cg.neighbors(x)
if (len(Neigh_x) < (depth - 1)):
continue
for y in Neigh_x:
knowledge_ban_edge = False
sepsets = set()
if ((background_knowledge is not None) and (background_knowledge.is_forbidden(cg.G.nodes[x], cg.G.nodes[y]) and background_knowledge.is_forbidden(cg.G.nodes[y], cg.G.nodes[x]))):
knowledge_ban_edge = True
if knowledge_ban_edge:
if (not stable):
edge1 = cg.G.get_edge(cg.G.nodes[x], cg.G.nodes[y])
if (edge1 is not None):
cg.G.remove_edge(edge1)
edge2 = cg.G.get_edge(cg.G.nodes[y], cg.G.nodes[x])
if (edge2 is not None):
cg.G.remove_edge(edge2)
append_value(cg.sepset, x, y, ())
append_value(cg.sepset, y, x, ())
break
else:
edge_removal.append((x, y))
edge_removal.append((y, x))
Neigh_x_noy = np.delete(Neigh_x, np.where((Neigh_x == y)))
for S in combinations(Neigh_x_noy, depth):
p = cg.ci_test(x, y, S)
if (p > alpha):
if verbose:
print(('%d ind %d | %s with p-value %f\n' % (x, y, S, p)))
if (not stable):
edge1 = cg.G.get_edge(cg.G.nodes[x], cg.G.nodes[y])
if (edge1 is not None):
cg.G.remove_edge(edge1)
edge2 = cg.G.get_edge(cg.G.nodes[y], cg.G.nodes[x])
if (edge2 is not None):
cg.G.remove_edge(edge2)
append_value(cg.sepset, x, y, S)
append_value(cg.sepset, y, x, S)
break
else:
edge_removal.append((x, y))
edge_removal.append((y, x))
for s in S:
sepsets.add(s)
elif verbose:
print(('%d dep %d | %s with p-value %f\n' % (x, y, S, p)))
append_value(cg.sepset, x, y, tuple(sepsets))
append_value(cg.sepset, y, x, tuple(sepsets))
if show_progress:
pbar.refresh()
for (x, y) in list(set(edge_removal)):
edge1 = cg.G.get_edge(cg.G.nodes[x], cg.G.nodes[y])
if (edge1 is not None):
cg.G.remove_edge(edge1)
if show_progress:
pbar.close()
return cg |
class SawyerReachWallV2Policy(Policy):
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'puck_pos': obs[3:6], 'goal_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=5.0)
action['grab_effort'] = 0.0
return action.array
def _desired_pos(o_d):
pos_hand = o_d['hand_pos']
pos_goal = o_d['goal_pos']
if (((- 0.1) <= pos_hand[0] <= 0.3) and (0.6 <= pos_hand[1] <= 0.8) and (pos_hand[2] < 0.25)):
return (pos_goal + np.array([0.0, 0.0, 1.0]))
return pos_goal |
class ModelCatalogHandler(PathHandler):
PREFIX = 'catalog://'
def _get_supported_prefixes(self):
return [self.PREFIX]
def _get_local_path(self, path):
logger = logging.getLogger(__name__)
catalog_path = ModelCatalog.get(path[len(self.PREFIX):])
logger.info('Catalog entry {} points to {}'.format(path, catalog_path))
return PathManager.get_local_path(catalog_path)
def _open(self, path, mode='r'):
return PathManager.open(self._get_local_path(path), mode) |
class MyDataset(data.Dataset):
def __init__(self, images, labels, ids, timestep):
self.images = images
self.labels = labels
self.ids = ids
self.timestep = timestep
def __getitem__(self, index):
(img, target) = (self.images[index], self.labels[index])
return (img, target)
def __len__(self):
return len(self.images) |
def pos_reg(w, lambda_pos, filter_len):
location_lambda = (K.cast(K.concatenate([K.arange((filter_len / 2), stop=0, step=(- 1)), K.arange(start=1, stop=((filter_len / 2) + 1))]), 'float32') * (lambda_pos / (filter_len / 2)))
location_penalty = K.sum((location_lambda * K.sum(K.abs(w), axis=(0, 2, 3))))
return location_penalty |
class PolymakeAbstract(ExtraTabCompletion, Interface):
def __init__(self, seed=None):
Interface.__init__(self, 'polymake')
self._seed = seed
self.__tab_completion = {}
_method
def version(self):
return self.get('$Polymake::Version')
def __reduce__(self):
return (reduce_load_Polymake, ())
def _object_class(self):
return PolymakeElement
def _function_element_class(self):
return PolymakeFunctionElement
def function_call(self, function, args=None, kwds=None):
(args, kwds) = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
s = self._function_call_string(function, [s.name() for s in args], ['{}=>{}'.format(key, value.name()) for (key, value) in kwds.items()])
return self(s)
def _function_call_string(self, function, args, kwds):
if kwds:
if args:
call_str = '{}({}, {});'.format(function, ','.join(list(args)), ','.join(list(kwds)))
return call_str
return '{}({});'.format(function, ','.join(list(kwds)))
return '{}({});'.format(function, ','.join(list(args)))
def _coerce_impl(self, x, use_special=True):
if isinstance(x, dict):
A = []
z = {}
cls = self._object_class()
def convert(y):
if isinstance(y, cls):
return y
else:
return self(y)
for (k, v) in x.items():
k = convert(k)
v = convert(v)
z[k] = v
A.append('{}=>{}'.format(k.name(), v.name()))
r = self.new((('{' + ','.join(A)) + '}'))
r.__sage_dict = z
return r
import sage.rings.abc
from sage.rings.integer import Integer
from sage.rings.rational import Rational
from sage.rings.real_double import RDF
def to_str(x):
if isinstance(x, list):
s = '['
for y in x:
s += (to_str(y) + ', ')
s += ']'
return s
if isinstance(x, (Integer, Rational, int)):
return '{}'.format(x)
parent = None
try:
parent = x.parent()
except AttributeError:
pass
if isinstance(parent, sage.rings.abc.NumberField_quadratic):
return x._polymake_init_()
try:
if x.parent().is_exact():
raise NotImplementedError
except AttributeError:
pass
try:
x = RDF(x)
return '{}'.format(x)
except (TypeError, ValueError):
pass
raise NotImplementedError
try:
return self.new(to_str(x))
except NotImplementedError:
pass
return super()._coerce_impl(x, use_special=use_special)
def console(self):
raise NotImplementedError('Please use polymake_console() function or the .interact() method')
def _install_hints(self):
return (((('Please install the optional polymake package for sage' + os.linesep) + 'or install polymake system-wide') + os.linesep) + "(use the shell command 'sage --info polymake' for more information)")
def _start(self):
self.application('polytope')
self.eval('use Scalar::Util qw(reftype);')
self.eval('use Scalar::Util qw(blessed);')
def _assign_symbol(self):
return '='
def _equality_symbol(self):
return '=='
def _read_in_file_command(self, filename):
return 'eval read_file "{}";\n'.format(filename)
def _next_var_name(self):
if self._available_vars:
return self._available_vars.pop(0)
try:
self.__seq += 1
except AttributeError:
self.__seq = 0
return 'SAGE{}'.format(self.__seq)
def clear(self, var):
self._available_vars.append(_name_pattern.search(var).group())
def _create(self, value, name=None):
name = (self._next_var_name() if (name is None) else name)
self.set(name, value)
if (self.eval('print scalar {};'.format(name)).strip() == '1'):
return (('$' + name) + '[0]')
return ('' + name)
def set(self, var, value):
if isinstance(value, str):
value = value.strip().rstrip(';').strip()
cmd = '{}{}({});'.format(var, self._assign_symbol(), value)
self.eval(cmd)
def get(self, cmd):
return self.eval('print {};'.format(cmd)).strip()
def help(self, topic, pager=True):
H = self.eval('help("{}");\n'.format(topic))
if (not H):
raise PolymakeError("unknown help topic '{}'".format(topic))
if pager:
from IPython.core.page import page
page(H, start=0)
else:
return H
def _tab_completion(self):
if (not self.is_running()):
self._start()
try:
return self.__tab_completion[self._application]
except KeyError:
pass
s = self.eval("apropos '';").split('\n')
out = []
for name in s:
if (name.startswith('/common/functions/') or name.startswith('/core/functions') or name.startswith((('/' + self._application) + '/functions/'))):
out.append(name.split('/')[(- 1)])
self.__tab_completion[self._application] = sorted(out)
return self.__tab_completion[self._application]
def application(self, app):
if (app not in ['common', 'fulton', 'group', 'matroid', 'topaz', 'fan', 'graph', 'ideal', 'polytope', 'tropical']):
raise ValueError("Unknown polymake application '{}'".format(app))
self._application = app
self.eval('application "{}";'.format(app))
def new_object(self, name, *args, **kwds):
try:
f = self.__new[name]
except AttributeError:
self.__new = {}
f = self.__new[name] = self._function_class()(self, 'new {}'.format(name))
except KeyError:
f = self.__new[name] = self._function_class()(self, 'new {}'.format(name))
return f(*args, **kwds) |
def lambda_B_calc(classes, table, TOP, POP):
try:
result = 0
length = POP
maxresponse = max(list(TOP.values()))
for i in classes:
result += max(list(table[i].values()))
result = ((result - maxresponse) / (length - maxresponse))
return result
except Exception:
return 'None' |
def read_text(file: Path) -> str:
src_file = ('-'.join(str(file).split('-')[:(- 1)]) + '.trans.txt')
idx = file.stem.replace('.flac', '')
with open(src_file, 'r') as fp:
for line in fp:
if (idx == line.split(' ')[0]):
return line[:(- 1)].split(' ', 1)[1]
logger.warning(f'Transcription of {file} not found!') |
class Wav2Vec2ConformerConfig(PretrainedConfig):
model_type = 'wav2vec2-conformer'
def __init__(self, vocab_size=None, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='sum', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=3, output_hidden_size=None, position_embeddings_type='relative', rotary_embedding_base=10000, max_source_positions=5000, conv_depthwise_kernel_size=31, conformer_conv_dropout=0.1, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.vocab_size = vocab_size
self.use_weighted_layer_sum = use_weighted_layer_sum
self.max_source_positions = max_source_positions
self.position_embeddings_type = position_embeddings_type
self.rotary_embedding_base = rotary_embedding_base
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.conv_depthwise_kernel_size = conv_depthwise_kernel_size
self.conformer_conv_dropout = conformer_conv_dropout
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.add_adapter = add_adapter
self.adapter_kernel_size = adapter_kernel_size
self.adapter_stride = adapter_stride
self.num_adapter_layers = num_adapter_layers
self.output_hidden_size = (output_hidden_size or hidden_size)
self.classifier_proj_size = classifier_proj_size
self.tdnn_dim = list(tdnn_dim)
self.tdnn_kernel = list(tdnn_kernel)
self.tdnn_dilation = list(tdnn_dilation)
self.xvector_output_dim = xvector_output_dim
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1) |
_model
def selecsls60(pretrained=False, **kwargs):
return _create_model('selecsls60', pretrained, kwargs) |
class EmitGemmUniversalInstance3x():
def __init__(self, operation_suffix=''):
self.operation_suffix = operation_suffix
self.includes = ['cutlass/cutlass.h', 'cute/tensor.hpp', 'cute/atom/mma_atom.hpp', 'cutlass/numeric_types.h', 'cutlass/gemm/kernel/gemm_universal.hpp', 'cutlass/gemm/collective/collective_builder.hpp', 'cutlass/epilogue/collective/default_epilogue.hpp', 'cutlass/epilogue/thread/linear_combination.h']
self.gemm_template = '\nusing namespace cute;\n\n${collective_op}\n\nusing EpilogueOp = cutlass::epilogue::collective::DefaultEpilogue<\n cutlass::gemm::TagToStrideC_t<${layout_c}>,\n cutlass::gemm::TagToStrideC_t<${layout_c}>,\n ${epilogue_functor}\n >;\n\n// Gemm operator ${operation_name}\nusing ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<\n Shape<int,int,int,int>,\n CollectiveOp,\n EpilogueOp\n>;\n\n// Define named type\nstruct ${operation_name}${operation_suffix} : \n public ${operation_name}_base { };\n'
def emit(self, operation):
(instance_layout_A, instance_layout_B, instance_layout_C) = (operation.A.layout, operation.B.layout, operation.C.layout)
epilogue_functor = operation.epilogue_functor.emit()
collective_op = collective_op_builder.build(operation)
values = {'operation_name': operation.procedural_name(), 'operation_suffix': self.operation_suffix, 'collective_op': collective_op, 'element_a': DataTypeTag[operation.A.element], 'layout_a': LayoutTag[instance_layout_A], 'element_b': DataTypeTag[operation.B.element], 'layout_b': LayoutTag[instance_layout_B], 'element_c': DataTypeTag[operation.C.element], 'layout_c': LayoutTag[instance_layout_C], 'epilogue_functor': epilogue_functor, 'element_output': DataTypeTag[operation.epilogue_functor.element_output], 'element_accumulator': DataTypeTag[operation.accumulator_type()], 'element_epilogue': DataTypeTag[operation.epilogue_functor.element_epilogue], 'epilogue_vector_length': str(operation.epilogue_functor.epilogue_vector_length), 'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class], 'arch': ('cutlass::arch::Sm%d' % operation.arch), 'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]), 'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]), 'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]), 'cluster_shape_m': str(operation.tile_description.cluster_shape[0]), 'cluster_shape_n': str(operation.tile_description.cluster_shape[1]), 'cluster_shape_k': str(operation.tile_description.cluster_shape[2]), 'align_a': str(operation.A.alignment), 'align_b': str(operation.B.alignment)}
values['epilogue_functor'] = operation.epilogue_functor.emit()
return SubstituteTemplate(self.gemm_template, values) |
_spec_function('synthetic_efficiency')
def get_synthetic_efficiency_spec(num_prompt_tokens: Optional[int]=None, num_output_tokens: Optional[int]=None, tokenizer: Optional[str]=None, random: Optional[str]=None) -> RunSpec:
scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.synthetic_efficiency_scenario.SyntheticEfficiencyScenario', args={'num_prompt_tokens': num_prompt_tokens, 'num_instances': 10, 'tokenizer': tokenizer})
if (num_output_tokens is not None):
adapter_spec = get_completion_adapter_spec(max_tokens=num_output_tokens, random=random)
else:
adapter_spec = get_completion_adapter_spec(random=random)
return RunSpec(name=f'synthetic_efficiency:random={random}', scenario_spec=scenario_spec, adapter_spec=adapter_spec, metric_specs=(get_basic_metric_specs(['exact_match']) + get_generative_harms_metric_specs()), groups=['synthetic_efficiency']) |
class _MeshHandler():
def __init__(self, db: database.Database, form_handler: _forms.ShapeFormHandler, a_priori_tester: mesh_testing.APrioriMeshTester, a_posteriori_tester: mesh_testing.IntersectionTester) -> None:
self.db = weakref.proxy(db)
self.form_handler = form_handler
self.a_priori_tester = a_priori_tester
self.a_posteriori_tester = a_posteriori_tester
self.mesh = self.db.geometry_db.mesh
self.deformation_handler = deformations.DeformationHandler(self.mesh, self.a_priori_tester, self.a_posteriori_tester)
self.dx = self.db.geometry_db.dx
self.bbtree = self.mesh.bounding_box_tree()
self.config = self.db.config
self._current_mesh_quality = 1.0
self._gmsh_file = ''
self.volume_change = float(self.config.get('MeshQuality', 'volume_change'))
self.angle_change = float(self.config.get('MeshQuality', 'angle_change'))
self.test_for_intersections = self.config.getboolean('ShapeGradient', 'test_for_intersections')
self.mesh_quality_tol_lower: float = self.config.getfloat('MeshQuality', 'tol_lower')
self.mesh_quality_tol_upper: float = self.config.getfloat('MeshQuality', 'tol_upper')
if (self.mesh_quality_tol_lower > (0.9 * self.mesh_quality_tol_upper)):
_loggers.warning('You are using a lower remesh tolerance (tol_lower) close to the upper one (tol_upper). This may slow down the optimization considerably.')
self.mesh_quality_measure = self.config.get('MeshQuality', 'measure')
self.mesh_quality_type = self.config.get('MeshQuality', 'type')
self.current_mesh_quality: float = quality.compute_mesh_quality(self.mesh, self.mesh_quality_type, self.mesh_quality_measure)
check_mesh_quality_tolerance(self.current_mesh_quality, self.mesh_quality_tol_upper)
self.options_frobenius: _typing.KspOption = {'ksp_type': 'preonly', 'pc_type': 'jacobi', 'pc_jacobi_type': 'diagonal', 'ksp_rtol': 1e-16, 'ksp_atol': 1e-20, 'ksp_max_it': 1000}
self.trial_dg0 = fenics.TrialFunction(self.db.function_db.dg_function_space)
self.test_dg0 = fenics.TestFunction(self.db.function_db.dg_function_space)
self.search_direction_container = fenics.Function(self.db.function_db.control_spaces[0])
self.a_frobenius = None
self.l_frobenius = None
self._setup_decrease_computation()
self.options_prior: _typing.KspOption = {'ksp_type': 'preonly', 'pc_type': 'jacobi', 'pc_jacobi_type': 'diagonal', 'ksp_rtol': 1e-16, 'ksp_atol': 1e-20, 'ksp_max_it': 1000}
self.transformation_container = fenics.Function(self.db.function_db.control_spaces[0])
self.A_prior = None
self.l_prior = None
self.do_remesh: bool = self.config.getboolean('Mesh', 'remesh')
self.save_optimized_mesh: bool = self.config.getboolean('Output', 'save_mesh')
if (self.do_remesh or self.save_optimized_mesh):
self.mesh_directory = pathlib.Path(self.config.get('Mesh', 'gmsh_file')).resolve().parent
self.gmsh_file: str = ''
self.remesh_counter = 0
if (self.do_remesh and self.db.parameter_db.temp_dict):
self.gmsh_file = self.db.parameter_db.temp_dict['gmsh_file']
self.remesh_counter = self.db.parameter_db.temp_dict.get('remesh_counter', 0)
if (not self.db.parameter_db.is_remeshed):
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
remesh_directory: str = tempfile.mkdtemp(prefix='cashocs_remesh_', dir=self.mesh_directory)
else:
remesh_directory = ''
self.db.parameter_db.remesh_directory = fenics.MPI.comm_world.bcast(remesh_directory, root=0)
fenics.MPI.barrier(fenics.MPI.comm_world)
else:
self.db.parameter_db.remesh_directory = self.db.parameter_db.temp_dict['remesh_directory']
remesh_path = pathlib.Path(self.db.parameter_db.remesh_directory)
if (not remesh_path.is_dir()):
remesh_path.mkdir(parents=True, exist_ok=True)
self.remesh_geo_file = f'{self.db.parameter_db.remesh_directory}/remesh.geo'
elif self.save_optimized_mesh:
self.gmsh_file = self.config.get('Mesh', 'gmsh_file')
if (self.do_remesh and (self.remesh_counter == 0)):
self.gmsh_file_init = f'{self.db.parameter_db.remesh_directory}/mesh_{self.remesh_counter:d}.msh'
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
subprocess.run(['cp', self.gmsh_file, self.gmsh_file_init], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
self.gmsh_file = self.gmsh_file_init
def current_mesh_quality(self) -> float:
return self._current_mesh_quality
_mesh_quality.setter
def current_mesh_quality(self, value: float) -> None:
self.db.parameter_db.optimization_state['mesh_quality'] = value
self._current_mesh_quality = value
def gmsh_file(self) -> str:
return self._gmsh_file
_file.setter
def gmsh_file(self, value: str) -> None:
self.db.parameter_db.gmsh_file_path = value
self._gmsh_file = value
def move_mesh(self, transformation: fenics.Function) -> bool:
if (not ((transformation.ufl_element().family() == 'Lagrange') and (transformation.ufl_element().degree() == 1))):
raise _exceptions.CashocsException('Not a valid mesh transformation')
if (not self.a_priori_tester.test(transformation, self.volume_change)):
_loggers.debug('Mesh transformation rejected due to a priori check.')
return False
else:
success_flag = self.deformation_handler.move_mesh(transformation, validated_a_priori=True, test_for_intersections=self.test_for_intersections)
self.current_mesh_quality = quality.compute_mesh_quality(self.mesh, self.mesh_quality_type, self.mesh_quality_measure)
return success_flag
def revert_transformation(self) -> None:
self.deformation_handler.revert_transformation()
def _setup_decrease_computation(self) -> None:
if (self.angle_change != float('inf')):
self.a_frobenius = ((self.trial_dg0 * self.test_dg0) * self.dx)
self.l_frobenius = ((fenics.sqrt(fenics.inner(fenics.grad(self.search_direction_container), fenics.grad(self.search_direction_container))) * self.test_dg0) * self.dx)
def compute_decreases(self, search_direction: List[fenics.Function], stepsize: float) -> int:
if (self.angle_change == float('inf')):
return 0
else:
self.search_direction_container.vector().vec().aypx(0.0, search_direction[0].vector().vec())
self.search_direction_container.vector().apply('')
x = _utils.assemble_and_solve_linear(self.a_frobenius, self.l_frobenius, ksp_options=self.options_frobenius, comm=self.db.geometry_db.mpi_comm)
frobenius_norm = x.max()[1]
beta_armijo = self.config.getfloat('LineSearch', 'beta_armijo')
return int(np.maximum(np.ceil((np.log(((self.angle_change / stepsize) / frobenius_norm)) / np.log((1 / beta_armijo)))), 0.0))
def _generate_remesh_geo(self, input_mesh_file: str) -> None:
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
with open(self.remesh_geo_file, 'w', encoding='utf-8') as file:
temp_name = pathlib.Path(input_mesh_file).name
file.write(f'''Merge '{temp_name}';
''')
file.write('CreateGeometry;\n')
file.write('\n')
geo_file = self.db.parameter_db.temp_dict['geo_file']
with open(geo_file, 'r', encoding='utf-8') as f:
for line in f:
if line[0].islower():
file.write(line)
if (line[:5] == 'Field'):
file.write(line)
if (line[:16] == 'Background Field'):
file.write(line)
if (line[:19] == 'BoundaryLayer Field'):
file.write(line)
if (line[:5] == 'Mesh.'):
file.write(line)
fenics.MPI.barrier(fenics.MPI.comm_world)
def clean_previous_gmsh_files(self) -> None:
gmsh_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}.msh'
if (pathlib.Path(gmsh_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', gmsh_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
gmsh_pre_remesh_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}_pre_remesh.msh'
if (pathlib.Path(gmsh_pre_remesh_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', gmsh_pre_remesh_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
mesh_h5_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}.h5'
if (pathlib.Path(mesh_h5_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', mesh_h5_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
mesh_xdmf_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}.xdmf'
if (pathlib.Path(mesh_xdmf_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', mesh_xdmf_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
boundaries_h5_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}_boundaries.h5'
if (pathlib.Path(boundaries_h5_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', boundaries_h5_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
boundaries_xdmf_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}_boundaries.xdmf'
if (pathlib.Path(boundaries_xdmf_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', boundaries_xdmf_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
subdomains_h5_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}_subdomains.h5'
if (pathlib.Path(subdomains_h5_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', subdomains_h5_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
subdomains_xdmf_file = f'{self.db.parameter_db.remesh_directory}/mesh_{(self.remesh_counter - 1):d}_subdomains.xdmf'
if (pathlib.Path(subdomains_xdmf_file).is_file() and (fenics.MPI.rank(fenics.MPI.comm_world) == 0)):
subprocess.run(['rm', subdomains_xdmf_file], check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
def _reinitialize(self, solver: OptimizationAlgorithm) -> None:
solver.optimization_problem.__init__(solver.optimization_problem.mesh_parametrization, self.db.parameter_db.temp_dict['mesh_file'])
solver.optimization_problem.initialize_solve_parameters()
line_search_type = self.config.get('LineSearch', 'method').casefold()
if (line_search_type == 'armijo'):
line_search: ls.LineSearch = ls.ArmijoLineSearch(self.db, solver.optimization_problem)
elif (line_search_type == 'polynomial'):
line_search = ls.PolynomialLineSearch(self.db, solver.optimization_problem)
else:
raise _exceptions.CashocsException('This code cannot be reached.')
solver.__init__(solver.optimization_problem.db, solver.optimization_problem, line_search)
def remesh(self, solver: OptimizationAlgorithm) -> bool:
if (self.do_remesh and self.db.parameter_db.temp_dict):
self.remesh_counter += 1
temp_file = f'{self.db.parameter_db.remesh_directory}/mesh_{self.remesh_counter:d}_pre_remesh.msh'
io.write_out_mesh(self.mesh, self.gmsh_file, temp_file)
self._generate_remesh_geo(temp_file)
self.db.parameter_db.temp_dict['output_dict'] = {}
self.db.parameter_db.temp_dict['output_dict']['state_solves'] = solver.state_problem.number_of_solves
self.db.parameter_db.temp_dict['output_dict']['adjoint_solves'] = solver.adjoint_problem.number_of_solves
self.db.parameter_db.temp_dict['output_dict']['iterations'] = (solver.iteration + 1)
output_dict = solver.output_manager.output_dict
self.db.parameter_db.temp_dict['output_dict']['cost_function_value'] = output_dict['cost_function_value'][:]
self.db.parameter_db.temp_dict['output_dict']['gradient_norm'] = output_dict['gradient_norm'][:]
self.db.parameter_db.temp_dict['output_dict']['stepsize'] = output_dict['stepsize'][:]
self.db.parameter_db.temp_dict['output_dict']['MeshQuality'] = output_dict['MeshQuality'][:]
self.db.parameter_db.temp_dict['output_dict']['angle'] = output_dict['angle'][:]
self.db.parameter_db.temp_dict['OptimizationRoutine']['rtol'] = self.config.getfloat('OptimizationRoutine', 'rtol')
self.db.parameter_db.temp_dict['OptimizationRoutine']['atol'] = self.config.getfloat('OptimizationRoutine', 'atol')
self.db.parameter_db.temp_dict['OptimizationRoutine']['max_iter'] = self.config.getint('OptimizationRoutine', 'max_iter')
dim = self.mesh.geometric_dimension()
new_gmsh_file = f'{self.db.parameter_db.remesh_directory}/mesh_{self.remesh_counter:d}.msh'
gmsh_cmd_list = ['gmsh', self.remesh_geo_file, f'-{int(dim):d}', '-o', new_gmsh_file]
if (fenics.MPI.rank(fenics.MPI.comm_world) == 0):
if (not self.config.getboolean('Mesh', 'show_gmsh_output')):
subprocess.run(gmsh_cmd_list, check=True, stdout=subprocess.DEVNULL)
else:
subprocess.run(gmsh_cmd_list, check=True)
fenics.MPI.barrier(fenics.MPI.comm_world)
_remove_gmsh_parametrizations(new_gmsh_file)
self.db.parameter_db.temp_dict['remesh_counter'] = self.remesh_counter
self.db.parameter_db.temp_dict['remesh_directory'] = self.db.parameter_db.remesh_directory
self.db.parameter_db.temp_dict['result_dir'] = solver.output_manager.result_dir
new_xdmf_file = f'{self.db.parameter_db.remesh_directory}/mesh_{self.remesh_counter:d}.xdmf'
io.convert(new_gmsh_file, new_xdmf_file)
self.clean_previous_gmsh_files()
self.db.parameter_db.temp_dict['mesh_file'] = new_xdmf_file
self.db.parameter_db.temp_dict['gmsh_file'] = new_gmsh_file
self.db.parameter_db.temp_dict['OptimizationRoutine']['iteration_counter'] = solver.iteration
self.db.parameter_db.temp_dict['OptimizationRoutine']['gradient_norm_initial'] = solver.gradient_norm_initial
self._update_mesh_transfer_matrix(new_xdmf_file, solver)
self._reinitialize(solver)
self._check_imported_mesh_quality(solver)
return True
else:
return False
def _check_imported_mesh_quality(self, solver: OptimizationAlgorithm) -> None:
mesh_quality_tol_lower = self.db.config.getfloat('MeshQuality', 'tol_lower')
mesh_quality_tol_upper = self.db.config.getfloat('MeshQuality', 'tol_upper')
if (mesh_quality_tol_lower > (0.9 * mesh_quality_tol_upper)):
_loggers.warning('You are using a lower remesh tolerance (tol_lower) close to the upper one (tol_upper). This may slow down the optimization considerably.')
mesh_quality_measure = self.db.config.get('MeshQuality', 'measure')
mesh_quality_type = self.db.config.get('MeshQuality', 'type')
mesh = solver.optimization_problem.states[0].function_space().mesh()
current_mesh_quality = quality.compute_mesh_quality(mesh, mesh_quality_type, mesh_quality_measure)
check_mesh_quality_tolerance(current_mesh_quality, mesh_quality_tol_upper)
def _update_mesh_transfer_matrix(self, xdmf_filename: str, solver: OptimizationAlgorithm) -> None:
if self.config.getboolean('ShapeGradient', 'global_deformation'):
pre_log_level = _loggers._cashocs_logger.level
_loggers.set_log_level(_loggers.LogLevel.WARNING)
(mesh, _, _, _, _, _) = import_mesh(xdmf_filename)
_loggers.set_log_level(pre_log_level)
deformation_space = fenics.VectorFunctionSpace(mesh, 'CG', 1)
interpolator = _utils.Interpolator(deformation_space, self.db.function_db.control_spaces[0])
new_transfer_matrix = self.db.geometry_db.transfer_matrix.matMult(interpolator.transfer_matrix)
self.db.parameter_db.temp_dict['transfer_matrix'] = new_transfer_matrix.copy()
self.db.parameter_db.temp_dict['old_transfer_matrix'] = self.db.geometry_db.transfer_matrix.copy()
self.db.parameter_db.temp_dict['deformation_function'] = solver.line_search.deformation_function.copy(True) |
def get_span_mask(start_ids, end_ids, max_len):
tmp = torch.arange(max_len, device=start_ids.device).unsqueeze(0).expand(start_ids.shape[0], (- 1))
batch_start_ids = start_ids.unsqueeze(1).expand_as(tmp)
batch_end_ids = end_ids.unsqueeze(1).expand_as(tmp)
mask = ((tmp >= batch_start_ids).float() * (tmp <= batch_end_ids).float())
return mask |
def getname(sent):
mid_sent = []
for word in sent.split():
mid_sent.extend(cln_word(word))
curr_name = 'Someone'
other_name = 'Someone'
for word in mid_sent:
arr = re.findall('\\w*:\\w*', word)
if (len(arr) == 1):
curr_name = getspe(word)
other_name = curr_name
new_sent = []
for word in mid_sent:
arr = re.findall('\\w*:\\w*', word)
if (len(arr) == 1):
other_name = curr_name
curr_name = getspe(word)
elif ((word == 'I') or (word == 'i')):
new_sent.append(curr_name)
elif ((word == 'My') or (word == 'my')):
new_sent.append((curr_name + "'s"))
elif ((word == 'You') or (word == 'you')):
new_sent.append(other_name)
elif ((word == 'Your') or (word == 'your')):
new_sent.append((other_name + "'s"))
else:
new_sent.append(word)
return ' '.join(new_sent) |
class HalfCauchy(TransformedDistribution):
arg_constraints = {'scale': constraints.positive}
support = constraints.positive
has_rsample = True
def __init__(self, scale, validate_args=None):
super(HalfCauchy, self).__init__(Cauchy(0, scale), AbsTransform(), validate_args=validate_args)
def scale(self):
return self.base_dist.scale
def mean(self):
return self.base_dist.mean
def variance(self):
return self.base_dist.variance
def log_prob(self, value):
log_prob = (self.base_dist.log_prob(value) + math.log(2))
log_prob[(value.expand(log_prob.shape) < 0)] = (- inf)
return log_prob
def cdf(self, value):
return ((2 * self.base_dist.cdf(value)) - 1)
def icdf(self, prob):
return self.base_dist.icdf(((prob + 1) / 2))
def entropy(self):
return (self.base_dist.entropy() - math.log(2)) |
def dense_bn_relu(units):
return tf.keras.Sequential([tf.keras.layers.Dense(units, use_bias=False, kernel_regularizer=tf.keras.regularizers.l2(0.0001)), tf.keras.layers.BatchNormalization(center=True, scale=True), tf.keras.layers.ReLU()]) |
def _list_with_default(out_size: List[int], defaults: List[int]) -> List[int]:
if isinstance(out_size, int):
return out_size
if (len(defaults) <= len(out_size)):
raise ValueError('Input dimension should be at least {}'.format((len(out_size) + 1)))
return [(v if (v is not None) else d) for (v, d) in zip(out_size, defaults[(- len(out_size)):])] |
def test_record_dict_1():
text = '{"1": int64}'
parsedtype = ak.types.from_datashape(text, highlevel=False)
assert isinstance(parsedtype, ak.types.RecordType)
assert (str(parsedtype) == text) |
def time_it(func):
def wrapper(*args, **kwargs):
start = time.time()
print(f'Start {func.__name__}')
output = func(*args, **kwargs)
end = time.time()
print(f'End {func.__name__}. Elapsed {(end - start)} seconds')
return output
return wrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.