code stringlengths 101 5.91M |
|---|
_numpy_output(check_dtype=True)
def test_ufunc_expm1_c(A: dace.complex64[10]):
return np.expm1(A) |
class TestContainsNaNTest():
def test_policy(self):
data = np.array([1, 2, 3, np.nan])
(contains_nan, nan_policy) = _contains_nan(data, nan_policy='propagate')
assert contains_nan
assert (nan_policy == 'propagate')
(contains_nan, nan_policy) = _contains_nan(data, nan_policy='omit')
assert contains_nan
assert (nan_policy == 'omit')
msg = 'The input contains nan values'
with pytest.raises(ValueError, match=msg):
_contains_nan(data, nan_policy='raise')
msg = 'nan_policy must be one of'
with pytest.raises(ValueError, match=msg):
_contains_nan(data, nan_policy='nan')
def test_contains_nan_1d(self):
data1 = np.array([1, 2, 3])
assert (not _contains_nan(data1)[0])
data2 = np.array([1, 2, 3, np.nan])
assert _contains_nan(data2)[0]
data3 = np.array([np.nan, 2, 3, np.nan])
assert _contains_nan(data3)[0]
data4 = np.array([1, 2, '3', np.nan])
assert (not _contains_nan(data4)[0])
data5 = np.array([1, 2, '3', np.nan], dtype='object')
assert _contains_nan(data5)[0]
def test_contains_nan_2d(self):
data1 = np.array([[1, 2], [3, 4]])
assert (not _contains_nan(data1)[0])
data2 = np.array([[1, 2], [3, np.nan]])
assert _contains_nan(data2)[0]
data3 = np.array([['1', 2], [3, np.nan]])
assert (not _contains_nan(data3)[0])
data4 = np.array([['1', 2], [3, np.nan]], dtype='object')
assert _contains_nan(data4)[0] |
def create_batches(sampler, shuffle=True, cache_dir='cache'):
batches_dict = defaultdict((lambda : []))
for (i, batch) in enumerate(tqdm(sampler, desc='Creating batches for training')):
for (k, v) in batch.items():
batches_dict[k].append(v)
batches = Dataset.from_dict(batches_dict)
return batches |
class TestMediumLevelActionManagerSimple(unittest.TestCase):
def test_simple_mdp_without_start_orientations(self):
print('Simple - no start orientations (& shared motion goals)')
mlam = ml_action_manager_simple
self.simple_mpd_empty_hands(mlam)
self.simple_mdp_deliver_soup(mlam)
self.simple_mdp_pickup_counter_soup(mlam)
self.simple_mdp_pickup_counter_dish(mlam)
self.simple_mdp_pickup_counter_onion(mlam)
self.simple_mdp_drop_useless_dish_with_soup_idle(mlam)
self.simple_mdp_pickup_soup(mlam)
self.simple_mdp_pickup_dish(mlam)
self.simple_mdp_start_good_soup_cooking(mlam)
self.simple_mdp_start_bad_soup_cooking(mlam)
self.simple_mdp_start_1_onion_soup_cooking(mlam)
self.simple_mdp_drop_useless_onion_good_soup(mlam)
self.simple_mdp_drop_useless_onion_bad_soup(mlam)
self.simple_mdp_add_3rd_onion(mlam)
self.simple_mdp_add_2nd_onion(mlam)
self.simple_mdp_drop_useless_dish(mlam)
def test_simple_mdp_with_start_orientations(self):
print('Simple - with start orientations (no shared motion goals)')
mlam = or_ml_action_manager_simple
self.simple_mpd_empty_hands(mlam, counter_drop_forbidden=True)
self.simple_mdp_deliver_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_counter_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish_with_soup_idle(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_pickup_dish(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_good_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_bad_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_start_1_onion_soup_cooking(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_onion_good_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_onion_bad_soup(mlam, counter_drop_forbidden=True)
self.simple_mdp_add_3rd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_add_2nd_onion(mlam, counter_drop_forbidden=True)
self.simple_mdp_drop_useless_dish(mlam, counter_drop_forbidden=True)
ONION_PICKUP = ((3, 2), (1, 0))
DISH_PICKUP = ((2, 2), (0, 1))
COUNTER_DROP = ((1, 1), (0, (- 1)))
COUNTER_PICKUP = ((1, 2), ((- 1), 0))
POT_INTERACT = ((2, 1), (0, (- 1)))
SOUP_DELIVER = ((3, 2), (0, 1))
def simple_mpd_empty_hands(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.ONION_PICKUP, self.DISH_PICKUP])
def simple_mdp_deliver_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, done_soup_obj((2, 1)))], {}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.SOUP_DELIVER])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.COUNTER_DROP, self.SOUP_DELIVER])
def simple_mdp_pickup_counter_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(0, 2): done_soup_obj((0, 2))}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP], [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP])
def simple_mdp_pickup_counter_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(0, 2): Obj('dish', (0, 2))}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP], [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP])
def simple_mdp_pickup_counter_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(0, 2): Obj('onion', (0, 2))}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP], [self.ONION_PICKUP, self.DISH_PICKUP, self.COUNTER_PICKUP])
def simple_mdp_drop_useless_dish_with_soup_idle(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('dish', (2, 1)))], {(2, 0): idle_soup_obj((2, 0), 3)}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.COUNTER_DROP])
def simple_mdp_pickup_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('dish', (2, 1)))], {(2, 0): done_soup_obj((2, 0))}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.POT_INTERACT])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.COUNTER_DROP, self.POT_INTERACT])
def simple_mdp_pickup_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(2, 0): done_soup_obj((2, 0))}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.ONION_PICKUP, self.DISH_PICKUP])
def simple_mdp_start_good_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(2, 0): idle_soup_obj((2, 0), 3)}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT])
def simple_mdp_start_bad_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(2, 0): idle_soup_obj((2, 0), 2)}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT])
def simple_mdp_start_1_onion_soup_cooking(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n)], {(2, 0): idle_soup_obj((2, 0), 1)}, all_orders=simple_mdp.start_all_orders)
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT])
def simple_mdp_drop_useless_onion_good_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('onion', (2, 1)))], {(2, 0): done_soup_obj((2, 0))}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.COUNTER_DROP])
def simple_mdp_drop_useless_onion_bad_soup(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('onion', (2, 1)))], {(2, 0): done_soup_obj((2, 0), 2)}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP], [self.COUNTER_DROP])
def simple_mdp_add_3rd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('onion', (2, 1)))], {(2, 0): idle_soup_obj((2, 0), 2)}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.POT_INTERACT])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.COUNTER_DROP, self.POT_INTERACT])
def simple_mdp_add_2nd_onion(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('onion', (2, 1)))], {(2, 0): idle_soup_obj((2, 0), 1)}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.POT_INTERACT])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.COUNTER_DROP, self.POT_INTERACT])
def simple_mdp_drop_useless_dish(self, planner, counter_drop_forbidden=False):
s = OvercookedState([P((2, 2), n), P((2, 1), n, Obj('dish', (2, 1)))], {(2, 0): idle_soup_obj((2, 0), 1)}, all_orders=simple_mdp.start_all_orders)
if counter_drop_forbidden:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.POT_INTERACT])
else:
self.check_ml_action_manager(s, planner, [self.ONION_PICKUP, self.DISH_PICKUP, self.POT_INTERACT], [self.COUNTER_DROP, self.POT_INTERACT])
def check_ml_action_manager(self, state, am, expected_mla_0, expected_mla_1, debug=False):
(player_0, player_1) = state.players
mla_0 = am.get_medium_level_actions(state, player_0)
mla_1 = am.get_medium_level_actions(state, player_1)
if debug:
print('Player 0 mla', mla_0)
print('Player 1 mla', mla_1)
print(am.mdp.state_string(state))
self.assertEqual(set(mla_0), set(expected_mla_0), ((("player 0's ml_action should be " + str(expected_mla_0)) + ' but get ') + str(mla_0)))
self.assertEqual(set(mla_1), set(expected_mla_1), ((("player 0's ml_action should be " + str(expected_mla_1)) + ' but get ') + str(mla_1))) |
def get_dataset_details(dataset):
if (dataset == 'mnist'):
(input_nc, input_width, input_height) = (1, 28, 28)
classes = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
elif (dataset == 'cifar10'):
(input_nc, input_width, input_height) = (3, 32, 32)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
else:
raise NotImplementedError('Specified data set is not available.')
return (input_nc, input_width, input_height, classes) |
def writetab(llargs, fout, kset):
t = Texttable()
t.set_max_width(500)
info = ['dataset', 'al_type', 'knn', 'clustering']
restricted = ['knn', 'clustering']
header = []
format_row = []
for k in info:
if (k not in ['exp_fd']):
header.append(k)
format_row.append('t')
header.append('AUC*')
format_row.append('f')
t.header(header)
t.set_cols_dtype(format_row)
for (item, aucx) in llargs:
row = []
for k in info:
if (item['al_type'] == constants.AL_LP):
row.append(item[k])
elif (k in restricted):
row.append(None)
else:
row.append(item[k])
row.append(aucx)
t.add_row(row)
print('Subset: {}'.format(kset))
print(t.draw())
ff = open(fout, 'w')
print(t.draw(), file=ff)
print('Subset: {}'.format(kset), file=ff)
ff.close() |
def test_trigamma():
x = Symbol('x')
assert (trigamma((- 2)) == zoo)
assert (trigamma(x) == polygamma(1, x)) |
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool=True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor=None):
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if (features is not None):
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features |
def verify_dir_exists(filename):
if (not os.path.exists(os.path.dirname(filename))):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc:
if (exc.errno != errno.EEXIST):
raise |
def get_types(entity: str) -> List[str]:
query = (('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT (?x0 AS ?value) WHERE {\n SELECT DISTINCT ?x0 WHERE {\n :' + entity) + ' :type.object.type ?x0 . \n }\n }\n ')
sparql.setQuery(query)
try:
results = sparql.query().convert()
except urllib.error.URLError:
print(query)
exit(0)
rtn = []
for result in results['results']['bindings']:
rtn.append(result['value']['value'].replace(' ''))
return rtn |
def euler_xyz_to_R(euler):
return ((_Rz(np.deg2rad(euler[2])) * _Ry(np.deg2rad(euler[1]))) * _Rx(np.deg2rad(euler[0]))) |
def test_Updater_GradientDescent():
with make_scope() as session:
from returnn.tf.network import TFNetwork, ExternData
from returnn.config import Config
config = Config()
network = TFNetwork(extern_data=ExternData(), train_flag=True)
network.add_layer(name='output', layer_class=DummyLayer, initial_value=5.0, loss_value_factor=3.0)
network.initialize_params(session=session)
updater = Updater(config=config, network=network)
updater.set_learning_rate(1.0, session=session)
updater.set_trainable_vars(network.get_trainable_params())
updater.init_optimizer_vars(session=session)
session.run(updater.get_optim_op())
assert_almost_equal(session.run(network.get_default_output_layer().output.placeholder), 2.0) |
def plot_embedding_as_heatmap(embed, ax=None, title='', shape=None, color_range=(0, 0.3)):
if (ax is None):
ax = plt.gca()
if (shape is None):
height = int(np.sqrt(len(embed)))
shape = (height, (- 1))
embed = embed.reshape(shape)
cmap = cm.get_cmap()
mappable = ax.imshow(embed, cmap=cmap)
cbar = plt.colorbar(mappable, ax=ax, fraction=0.046, pad=0.04)
cbar.set_clim(*color_range)
(ax.set_xticks([]), ax.set_yticks([]))
ax.set_title(title) |
def test_pytest_parametrize(testdir):
testdir.make_test('\.parametrize("param", ("A", "B"))\()\ndef test_(request, param, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.full_path == "/v1/users"\n assert case.method in ("GET", "POST")\n', paths={'/users': {'get': {'responses': {'200': {'description': 'OK'}}}, 'post': {'responses': {'200': {'description': 'OK'}}}}})
result = testdir.runpytest('-v', '-s')
result.assert_outcomes(passed=4)
result.stdout.re_match_lines(['test_pytest_parametrize.py::test_\\[GET /v1/users\\]\\[A\\] PASSED', 'test_pytest_parametrize.py::test_\\[GET /v1/users\\]\\[B\\] PASSED', 'Hypothesis calls: 4']) |
def nonsaturating_hinge_gan_losses(discriminator_real_outputs, discriminator_fake_outputs):
generator_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(discriminator_fake_outputs), discriminator_fake_outputs)
discriminator_loss = (tf.reduce_mean(tf.nn.relu((1.0 - discriminator_real_outputs))) + tf.reduce_mean(tf.nn.relu((1.0 + discriminator_fake_outputs))))
return (generator_loss, discriminator_loss) |
class ParameterNamer(object):
def __call__(self, graph):
for node in graph.nodes:
if (node.data is None):
continue
if (node.kind in (NodeKind.Convolution, NodeKind.InnerProduct)):
names = ('weights',)
if node.parameters.bias_term:
names += ('biases',)
elif (node.kind == NodeKind.BatchNorm):
names = ('moving_mean', 'moving_variance')
if (len(node.data) == 4):
names += ('gamma', 'beta')
else:
print_stderr('WARNING: Unhandled parameters: {}'.format(node.kind))
continue
assert (len(names) == len(node.data))
node.data = dict(zip(names, node.data))
return graph |
def group_together(file_paths, num_samples):
for i in range(1, len(num_samples)):
num_samples[i] *= num_samples[(i - 1)]
all_lines = []
for file_path in file_paths:
lines = []
with open(file_path) as f:
for line in f:
lines.append(line.strip())
all_lines.append(lines)
all_groups = []
for (i, lines) in enumerate(all_lines):
for group_idx in range(0, (len(lines) // num_samples[i])):
g = lines[(group_idx * num_samples[i]):((group_idx + 1) * num_samples[i])]
if (len(all_groups) <= group_idx):
all_groups.append(g)
else:
all_groups[group_idx].extend(g)
return all_groups |
class RefAdagrad(RefSolver):
def __init__(self, lr, eps):
super().__init__()
self.lr = lr
self.eps = eps
self.G = {}
def _set_state_impl(self, key, param):
self.G[key] = np.zeros_like(param)
def _update_impl(self, key, p, g):
_update_adagrad(p, g, self.G[key], self.lr, self.eps) |
def test_test_case_equals_on_different_prim(simple_test_case: dtc.DefaultTestCase, constructor_mock):
cloned = simple_test_case.clone()
simple_test_case.add_statement(st.ConstructorStatement(simple_test_case, constructor_mock, {'y': simple_test_case.statements[0].ret_val}))
cloned.add_statement(st.ConstructorStatement(cloned, constructor_mock, {'y': cloned.statements[1].ret_val}))
assert (simple_test_case != cloned) |
def test_format_tags():
tags = ['tag_1', 'tag_2', 'tag_3']
assert (format_tags(tags) == '[tag_1,tag_2,tag_3]') |
class ThresholdedImprovementScoringFunction(MoleculewiseScoringFunction):
def __init__(self, objective, constraint, threshold, offset):
super().__init__()
self.objective = objective
self.constraint = constraint
self.threshold = threshold
self.offset = offset
def raw_score(self, smiles):
score = (self.corrupt_score if (self.constraint.score(smiles) < self.threshold) else (self.objective.score(smiles) + self.offset))
return score |
def test_for_one_epoch(model, loss, test_loader, epoch_number):
model.eval()
loss.eval()
data_time_meter = utils.AverageMeter()
batch_time_meter = utils.AverageMeter()
loss_meter = utils.AverageMeter(recent=100)
top1_meter = utils.AverageMeter(recent=100)
top5_meter = utils.AverageMeter(recent=100)
timestamp = time.time()
for (i, (images, labels)) in enumerate(test_loader):
batch_size = images.size(0)
if utils.is_model_cuda(model):
images = images.cuda()
labels = labels.cuda()
data_time_meter.update((time.time() - timestamp))
with torch.no_grad():
outputs = model(images)
loss_output = loss(outputs, labels)
if isinstance(loss_output, tuple):
(loss_value, outputs) = loss_output
else:
loss_value = loss_output
loss_meter.update(loss_value.item(), batch_size)
(top1, top5) = utils.topk_accuracy(outputs, labels, recalls=(1, 5))
top1_meter.update(top1, batch_size)
top5_meter.update(top5, batch_size)
batch_time_meter.update((time.time() - timestamp))
timestamp = time.time()
logging.info('Epoch: [{epoch}][{batch}/{epoch_size}]\tTime {batch_time.value:.2f} ({batch_time.average:.2f}) Data {data_time.value:.2f} ({data_time.average:.2f}) Loss {loss.value:.3f} {{{loss.average:.3f}, {loss.average_recent:.3f}}} Top-1 {top1.value:.2f} {{{top1.average:.2f}, {top1.average_recent:.2f}}} Top-5 {top5.value:.2f} {{{top5.average:.2f}, {top5.average_recent:.2f}}} '.format(epoch=epoch_number, batch=(i + 1), epoch_size=len(test_loader), batch_time=batch_time_meter, data_time=data_time_meter, loss=loss_meter, top1=top1_meter, top5=top5_meter))
logging.info('Epoch: [{epoch}] -- TESTING SUMMARY\tTime {batch_time.sum:.2f} Data {data_time.sum:.2f} Loss {loss.average:.3f} Top-1 {top1.average:.2f} Top-5 {top5.average:.2f} '.format(epoch=epoch_number, batch_time=batch_time_meter, data_time=data_time_meter, loss=loss_meter, top1=top1_meter, top5=top5_meter)) |
class ContinualScaler():
state_dict_key = 'amp_scaler'
def __init__(self, disable_amp):
self._scaler = torch.cuda.amp.GradScaler(enabled=(not disable_amp))
def __call__(self, loss, optimizer, model_without_ddp, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, hook=True):
self.pre_step(loss, optimizer, parameters, create_graph, clip_grad, clip_mode)
self.post_step(optimizer, model_without_ddp, hook)
def pre_step(self, loss, optimizer, parameters=None, create_graph=False, clip_grad=None, clip_mode='norm'):
self._scaler.scale(loss).backward(create_graph=create_graph)
self._scaler.unscale_(optimizer)
if (clip_grad is not None):
assert (parameters is not None)
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
def post_step(self, optimizer, model_without_ddp, hook=True):
if (hook and hasattr(model_without_ddp, 'hook_before_update')):
model_without_ddp.hook_before_update()
self._scaler.step(optimizer)
if (hook and hasattr(model_without_ddp, 'hook_after_update')):
model_without_ddp.hook_after_update()
self.update()
def update(self):
self._scaler.update()
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict) |
def index_predicates(es, KB):
file_name = ('%s_predicates' % KB)
file_path = ('../data/%s.txt' % file_name)
ns_filter = None
index_name = ('%sp' % KB)
start_indexing(es, index_name, file_path, ns_filter) |
def run(*args, env=None):
args = list(map(str, args))
if (env is None):
return subprocess.Popen(args).wait()
else:
e = os.environ.copy()
e.update(env)
return subprocess.Popen(args, env=e).wait() |
class Tanh_DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(Tanh_DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = (2 * growth_rate)
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += (nblocks[0] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += (nblocks[1] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += (nblocks[2] * growth_rate)
out_planes = int(math.floor((num_planes * reduction)))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += (nblocks[3] * growth_rate)
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.tanh(self.bn(out)), 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
class Config(NamedTuple):
name: str
settings_train: str
settings_eval: str
rendering_mode: LoadedModel.EvaluationMode
args: List[str] |
class Dict(TokenConverter):
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for (i, tok) in enumerate(tokenlist):
if (len(tok) == 0):
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if (len(tok) == 1):
tokenlist[ikey] = _ParseResultsWithOffset('', i)
elif ((len(tok) == 2) and (not isinstance(tok[1], ParseResults))):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy()
del dictvalue[0]
if ((len(dictvalue) != 1) or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys())):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist |
class ginn_autoencoder(nn.Module):
def __init__(self, g, mask, in_feats, h_feats, activation, dropout):
super(ginn_autoencoder, self).__init__()
self.mask = mask
self.masked_gcn = GCL(g, in_feats, h_feats, activation, dropout)
self.output_gcn = GCL(g, h_feats, in_feats, torch.sigmoid, dropout)
def forward(self, features):
features = torch.mul(features, self.mask)
h = self.masked_gcn(features)
h = self.output_gcn(h)
return h |
def width_to_lifetime(Gamma):
if (Gamma <= 0.0):
raise ValueError('Input provided, %s <= 0!'.format(Gamma))
return (hbar / float((Gamma / MeV))) |
def get_train_transformers(args):
img_tr = [transforms.RandomResizedCrop(int(args.image_size), (args.min_scale, args.max_scale))]
if (args.flip > 0.0):
img_tr.append(transforms.RandomHorizontalFlip(args.flip))
if (args.jitter > 0.0):
img_tr.append(transforms.ColorJitter(brightness=args.jitter, contrast=args.jitter, saturation=args.jitter, hue=min(0.5, args.jitter)))
img_tr = (img_tr + [transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
return transforms.Compose(img_tr) |
class FunnelTokenizerFast(PreTrainedTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = FunnelTokenizer
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
cls_token_type_id: int = 2
def __init__(self, vocab_file=None, tokenizer_file=None, do_lower_case=True, unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', bos_token='<s>', eos_token='</s>', clean_text=True, tokenize_chinese_chars=True, strip_accents=None, wordpieces_prefix='##', **kwargs):
super().__init__(vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, bos_token=bos_token, eos_token=eos_token, clean_text=clean_text, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, wordpieces_prefix=wordpieces_prefix, **kwargs)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if ((normalizer_state.get('lowercase', do_lower_case) != do_lower_case) or (normalizer_state.get('strip_accents', strip_accents) != strip_accents) or (normalizer_state.get('handle_chinese_chars', tokenize_chinese_chars) != tokenize_chinese_chars)):
normalizer_class = getattr(normalizers, normalizer_state.pop('type'))
normalizer_state['lowercase'] = do_lower_case
normalizer_state['strip_accents'] = strip_accents
normalizer_state['handle_chinese_chars'] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
if token_ids_1:
output += (token_ids_1 + [self.sep_token_id])
return output
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return ((len(cls) * [self.cls_token_type_id]) + (len((token_ids_0 + sep)) * [0]))
return (((len(cls) * [self.cls_token_type_id]) + (len((token_ids_0 + sep)) * [0])) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files) |
def jamendo_resampler(track_id):
audio_path = os.path.join(DATASET, 'mtg', 'raw30s', track_id)
(src, _) = load_audio(path=audio_path, ch_format=STR_CH_FIRST, sample_rate=MUSIC_SAMPLE_RATE, downmix_to_mono=True)
save_name = os.path.join(DATASET, 'mtg', 'npy', track_id.replace('.mp3', '.npy'))
if (not os.path.exists(os.path.dirname(save_name))):
os.makedirs(os.path.dirname(save_name))
np.save(save_name, src.astype(np.float32)) |
class TimeSplitter(Splitter):
_init_arg_names = ['time_threshold', 'drop_cold_users', 'drop_cold_items', 'query_column', 'item_column', 'timestamp_column', 'session_id_column', 'session_id_processing_strategy', 'time_column_format']
def __init__(self, time_threshold: Union[(datetime, str, int, float)], query_column: str='query_id', drop_cold_users: bool=False, drop_cold_items: bool=False, item_column: str='item_id', timestamp_column: str='timestamp', session_id_column: Optional[str]=None, session_id_processing_strategy: str='test', time_column_format: str='%Y-%m-%d %H:%M:%S'):
super().__init__(drop_cold_users=drop_cold_users, drop_cold_items=drop_cold_items, query_column=query_column, item_column=item_column, timestamp_column=timestamp_column, session_id_column=session_id_column, session_id_processing_strategy=session_id_processing_strategy)
self._precision = 3
self.time_column_format = time_column_format
if (isinstance(time_threshold, float) and ((time_threshold < 0) or (time_threshold > 1))):
raise ValueError('test_size must between 0 and 1')
self.time_threshold = time_threshold
def _partial_split(self, interactions: DataFrameLike, threshold: Union[(datetime, str, int)]) -> Tuple[(DataFrameLike, DataFrameLike)]:
if isinstance(threshold, str):
threshold = datetime.strptime(threshold, self.time_column_format)
if isinstance(interactions, SparkDataFrame):
return self._partial_split_spark(interactions, threshold)
return self._partial_split_pandas(interactions, threshold)
def _partial_split_pandas(self, interactions: PandasDataFrame, threshold: Union[(datetime, str, int)]) -> Tuple[(PandasDataFrame, PandasDataFrame)]:
res = interactions.copy(deep=True)
if isinstance(threshold, float):
res.sort_values(self.timestamp_column, inplace=True)
test_start_ind = int((res.shape[0] * (1 - threshold)))
test_start = res.iloc[test_start_ind][self.timestamp_column]
res['is_test'] = (res[self.timestamp_column] >= test_start)
else:
res['is_test'] = (res[self.timestamp_column] >= threshold)
if self.session_id_column:
res = self._recalculate_with_session_id_column(res)
train = res[(~ res['is_test'])].drop(columns=['is_test'])
test = res[res['is_test']].drop(columns=['is_test'])
return (train, test)
def _partial_split_spark(self, interactions: SparkDataFrame, threshold: Union[(datetime, str, int)]) -> Tuple[(SparkDataFrame, SparkDataFrame)]:
if isinstance(threshold, float):
dates = interactions.select(self.timestamp_column).withColumn('_row_number_by_ts', sf.row_number().over(Window.orderBy(self.timestamp_column)))
test_start = (int((dates.count() * (1 - threshold))) + 1)
test_start = dates.filter((sf.col('_row_number_by_ts') == test_start)).select(self.timestamp_column).collect()[0][0]
res = interactions.withColumn('is_test', (sf.col(self.timestamp_column) >= test_start))
else:
res = interactions.withColumn('is_test', (sf.col(self.timestamp_column) >= threshold))
if self.session_id_column:
res = self._recalculate_with_session_id_column(res)
train = res.filter('is_test == 0').drop('is_test')
test = res.filter('is_test').drop('is_test')
return (train, test)
def _core_split(self, interactions: DataFrameLike) -> List[DataFrameLike]:
return self._partial_split(interactions, self.time_threshold) |
def get_image_ocrs_from_path(pdf_file_path: str, ocr_file_path: str, resize_scale=resize_scale):
reader = PdfReader(pdf_file_path)
img_list = []
for i in range(len(reader.pages)):
page = reader.pages[i]
for image_file_object in page.images:
stream = io.BytesIO(image_file_object.data)
img = Image.open(stream).convert('RGB').resize(resize_scale)
img_list.append(img)
json_entry = json.load(open(ocr_file_path))[1]
json_entry = [x for x in json_entry['Blocks'] if ('Text' in x)]
pages = [x['Page'] for x in json_entry]
ocrs = {pg: [] for pg in set(pages)}
for entry in json_entry:
bbox = entry['Geometry']['BoundingBox']
(x, y, w, h) = (bbox['Left'], bbox['Top'], bbox['Width'], bbox['Height'])
bbox = [x, y, (x + w), (y + h)]
bbox = normalize_box(bbox, width=1, height=1, size=resize_scale)
ocrs[entry['Page']].append({'word': entry['Text'], 'bbox': bbox})
return (img_list, ocrs) |
_torch
_sigopt
class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase):
def setUp(self):
args = TrainingArguments('..')
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
def test_hyperparameter_search(self):
class MyTrialShortNamer(TrialShortNamer):
DEFAULTS = {'a': 0, 'b': 0}
def hp_space(trial):
return [{'bounds': {'min': (- 4), 'max': 4}, 'name': 'a', 'type': 'int'}, {'bounds': {'min': (- 4), 'max': 4}, 'name': 'b', 'type': 'int'}]
def model_init(trial):
if (trial is not None):
a = trial.assignments['a']
b = trial.assignments['b']
else:
a = 0
b = 0
config = RegressionModelConfig(a=a, b=b, double_output=False)
return RegressionPreTrainedModel(config)
def hp_name(trial):
return MyTrialShortNamer.shortname(trial.assignments)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, evaluation_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir='runs', run_name='test', model_init=model_init)
trainer.hyperparameter_search(direction='minimize', hp_space=hp_space, hp_name=hp_name, backend='sigopt', n_trials=4) |
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-08, epsrel=1.49e-08):
def ranges0(*args):
return [(qfun(args[1], args[0]) if callable(qfun) else qfun), (rfun(args[1], args[0]) if callable(rfun) else rfun)]
def ranges1(*args):
return [(gfun(args[0]) if callable(gfun) else gfun), (hfun(args[0]) if callable(hfun) else hfun)]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args, opts={'epsabs': epsabs, 'epsrel': epsrel}) |
class Renderer(object):
MAX_FBO_WIDTH = 2000
MAX_FBO_HEIGHT = 2000
def __init__(self, models_cad_files, samples=1, vertex_tmp_store_folder='.', clamp=False, vertex_scale=1.0):
self._samples = samples
self._context = gu.OffscreenContext()
(W, H) = (Renderer.MAX_FBO_WIDTH, Renderer.MAX_FBO_HEIGHT)
self._fbo = gu.Framebuffer({GL_COLOR_ATTACHMENT0: gu.Texture(GL_TEXTURE_2D, 1, GL_RGB8, W, H), GL_COLOR_ATTACHMENT1: gu.Texture(GL_TEXTURE_2D, 1, GL_R32F, W, H), GL_DEPTH_ATTACHMENT: gu.Renderbuffer(GL_DEPTH_COMPONENT32F, W, H)})
self._fbo_depth = gu.Framebuffer({GL_COLOR_ATTACHMENT0: gu.Texture(GL_TEXTURE_2D, 1, GL_RGB8, W, H), GL_COLOR_ATTACHMENT1: gu.Texture(GL_TEXTURE_2D, 1, GL_R32F, W, H), GL_DEPTH_ATTACHMENT: gu.Renderbuffer(GL_DEPTH_COMPONENT32F, W, H)})
glNamedFramebufferDrawBuffers(self._fbo.id, 2, np.array((GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1), dtype=np.uint32))
glNamedFramebufferDrawBuffers(self._fbo_depth.id, 2, np.array((GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1), dtype=np.uint32))
if (self._samples > 1):
self._render_fbo = gu.Framebuffer({GL_COLOR_ATTACHMENT0: gu.TextureMultisample(self._samples, GL_RGB8, W, H, True), GL_COLOR_ATTACHMENT1: gu.TextureMultisample(self._samples, GL_R32F, W, H, True), GL_DEPTH_STENCIL_ATTACHMENT: gu.RenderbufferMultisample(self._samples, GL_DEPTH32F_STENCIL8, W, H)})
glNamedFramebufferDrawBuffers(self._render_fbo.id, 2, np.array((GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1), dtype=np.uint32))
self._fbo.bind()
attributes = gu.geo.load_meshes_sixd(models_cad_files, vertex_tmp_store_folder, recalculate_normals=False)
vertices = []
indices = []
for (vertex, normal, color, faces) in attributes:
indices.append(faces.flatten())
vertices.append(np.hstack(((vertex * vertex_scale), normal, (color / 255.0))).flatten())
indices = np.hstack(indices).astype(np.uint32)
vertices = np.hstack(vertices).astype(np.float32)
vao = gu.VAO({(gu.Vertexbuffer(vertices), 0, (9 * 4)): [(0, 3, GL_FLOAT, GL_FALSE, (0 * 4)), (1, 3, GL_FLOAT, GL_FALSE, (3 * 4)), (2, 3, GL_FLOAT, GL_FALSE, (6 * 4))]}, gu.EBO(indices))
vao.bind()
vertex_count = [np.prod(vert[3].shape) for vert in attributes]
instance_count = np.ones(len(attributes))
first_index = [sum(vertex_count[:i]) for i in range(len(vertex_count))]
vertex_sizes = [vert[0].shape[0] for vert in attributes]
base_vertex = [sum(vertex_sizes[:i]) for i in range(len(vertex_sizes))]
base_instance = np.zeros(len(attributes))
ibo = gu.IBO(vertex_count, instance_count, first_index, base_vertex, base_instance)
ibo.bind()
gu.Shader.shader_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'shader')
shader = gu.Shader('depth_shader_phong.vs', 'depth_shader_phong.frag')
shader.compile_and_use()
self._scene_buffer = gu.ShaderStorage(0, gu.Camera().data, True)
self._scene_buffer.bind()
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 1.0)
def set_light_pose(self, direction):
glUniform3f(1, direction[0], direction[1], direction[2])
def set_ambient_light(self, a):
glUniform1f(0, a)
def set_diffuse_light(self, a):
glUniform1f(2, a)
def set_specular_light(self, a):
glUniform1f(3, a)
def render(self, obj_id, W, H, K, R, t, near, far, random_light=False, phong={'ambient': 0.4, 'diffuse': 0.8, 'specular': 0.3}):
assert ((W <= Renderer.MAX_FBO_WIDTH) and (H <= Renderer.MAX_FBO_HEIGHT))
if (self._samples > 1):
self._render_fbo.bind()
glClear(((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) | GL_STENCIL_BUFFER_BIT))
glViewport(0, 0, W, H)
camera = gu.Camera()
camera.realCamera(W, H, K, R, t, near, far)
self._scene_buffer.update(camera.data)
if random_light:
self.set_light_pose((1000.0 * np.random.random(3)))
self.set_ambient_light(phong['ambient'])
self.set_diffuse_light((phong['diffuse'] + (0.1 * ((2 * np.random.rand()) - 1))))
self.set_specular_light((phong['specular'] + (0.1 * ((2 * np.random.rand()) - 1))))
else:
self.set_light_pose(np.array([400.0, 400.0, 400]))
self.set_ambient_light(phong['ambient'])
self.set_diffuse_light(phong['diffuse'])
self.set_specular_light(phong['specular'])
glDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, ctypes.c_void_p(((obj_id * 4) * 5)))
if (self._samples > 1):
self._fbo.bind()
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
glNamedFramebufferDrawBuffer(self._fbo.id, GL_COLOR_ATTACHMENT1)
glDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, ctypes.c_void_p(((obj_id * 4) * 5)))
glNamedFramebufferReadBuffer(self._render_fbo.id, GL_COLOR_ATTACHMENT0)
glNamedFramebufferDrawBuffer(self._fbo.id, GL_COLOR_ATTACHMENT0)
glBlitNamedFramebuffer(self._render_fbo.id, self._fbo.id, 0, 0, W, H, 0, 0, W, H, GL_COLOR_BUFFER_BIT, GL_NEAREST)
glNamedFramebufferDrawBuffers(self._fbo.id, 2, np.array((GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1), dtype=np.uint32))
glNamedFramebufferReadBuffer(self._fbo.id, GL_COLOR_ATTACHMENT0)
bgr_flipped = np.frombuffer(glReadPixels(0, 0, W, H, GL_BGR, GL_UNSIGNED_BYTE), dtype=np.uint8).reshape(H, W, 3)
bgr = np.flipud(bgr_flipped).copy()
glNamedFramebufferReadBuffer(self._fbo.id, GL_COLOR_ATTACHMENT1)
depth_flipped = glReadPixels(0, 0, W, H, GL_RED, GL_FLOAT).reshape(H, W)
depth = np.flipud(depth_flipped).copy()
return (bgr, depth)
def render_many(self, obj_ids, W, H, K, Rs, ts, near, far, random_light=True, phong={'ambient': 0.4, 'diffuse': 0.8, 'specular': 0.3}):
assert ((W <= Renderer.MAX_FBO_WIDTH) and (H <= Renderer.MAX_FBO_HEIGHT))
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
glViewport(0, 0, W, H)
if random_light:
self.set_light_pose((1000.0 * np.random.random(3)))
self.set_ambient_light((phong['ambient'] + (0.1 * ((2 * np.random.rand()) - 1))))
self.set_diffuse_light((phong['diffuse'] + (0.1 * ((2 * np.random.rand()) - 1))))
self.set_specular_light((phong['specular'] + (0.1 * ((2 * np.random.rand()) - 1))))
else:
self.set_light_pose(np.array([400.0, 400.0, 400]))
self.set_ambient_light(phong['ambient'])
self.set_diffuse_light(phong['diffuse'])
self.set_specular_light(phong['specular'])
bbs = []
for i in range(len(obj_ids)):
o = obj_ids[i]
R = Rs[i]
t = ts[i]
camera = gu.Camera()
camera.realCamera(W, H, K, R, t, near, far)
self._scene_buffer.update(camera.data)
self._fbo.bind()
glDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, ctypes.c_void_p(((o * 4) * 5)))
self._fbo_depth.bind()
glViewport(0, 0, W, H)
glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT))
glDrawElementsIndirect(GL_TRIANGLES, GL_UNSIGNED_INT, ctypes.c_void_p(((o * 4) * 5)))
glNamedFramebufferReadBuffer(self._fbo_depth.id, GL_COLOR_ATTACHMENT1)
depth_flipped = glReadPixels(0, 0, W, H, GL_RED, GL_FLOAT).reshape(H, W)
depth = np.flipud(depth_flipped).copy()
(ys, xs) = np.nonzero((depth > 0))
obj_bb = misc.calc_2d_bbox(xs, ys, (W, H))
bbs.append(obj_bb)
glBindFramebuffer(GL_FRAMEBUFFER, self._fbo.id)
glNamedFramebufferReadBuffer(self._fbo.id, GL_COLOR_ATTACHMENT0)
bgr_flipped = np.frombuffer(glReadPixels(0, 0, W, H, GL_BGR, GL_UNSIGNED_BYTE), dtype=np.uint8).reshape(H, W, 3)
bgr = np.flipud(bgr_flipped).copy()
glNamedFramebufferReadBuffer(self._fbo.id, GL_COLOR_ATTACHMENT1)
depth_flipped = glReadPixels(0, 0, W, H, GL_RED, GL_FLOAT).reshape(H, W)
depth = np.flipud(depth_flipped).copy()
return (bgr, depth, bbs)
def close(self):
self._context.close() |
def evaluate_mislabeling_patch(target_class, rois, detections_adv, detections_rand, iou_thresh=0.5):
(score_adv, score_rand) = (0, 0)
for (_, roi_obj_bbox, _, _) in rois:
found = False
for detection in detections_adv:
det_obj_bbox = tuple(map(float, detection[(- 4):]))
det_obj_class = int(detection[0])
if ((det_obj_class == target_class) and (bb_intersection_over_union(roi_obj_bbox, det_obj_bbox) > iou_thresh)):
found = True
break
score_adv += found
found = False
for detection in detections_rand:
det_obj_bbox = tuple(map(float, detection[(- 4):]))
det_obj_class = int(detection[0])
if ((det_obj_class == target_class) and (bb_intersection_over_union(roi_obj_bbox, det_obj_bbox) > iou_thresh)):
found = True
break
score_rand += found
return (score_adv, score_rand) |
def Train(model, x, adj, A, optimizer):
max_epochs = 100
min_loss = 100
for epoch in range(max_epochs):
Y = model(x, adj)
loss = CutLoss.apply(Y, A)
print('Epoch {}: Loss = {}'.format(epoch, loss.item()))
if (loss < min_loss):
min_loss = loss.item()
torch.save(model.state_dict(), './trial_weights.pt')
loss.backward()
optimizer.step() |
def rand_v_diffusion(shape, sigma_data=1.0, min_value=0.0, max_value=float('inf'), device='cpu', dtype=torch.float32):
min_cdf = ((math.atan((min_value / sigma_data)) * 2) / math.pi)
max_cdf = ((math.atan((max_value / sigma_data)) * 2) / math.pi)
u = ((torch.rand(shape, device=device, dtype=dtype) * (max_cdf - min_cdf)) + min_cdf)
return (torch.tan(((u * math.pi) / 2)) * sigma_data) |
class RepeatFactorTrainingSampler(Sampler):
def __init__(self, dataset, config, num_replicas=None, rank=None, shuffle=True):
self.shuffle = shuffle
self.config = config
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(((len(dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
coco_json = dataset.coco
img_bboxes = {}
ids = dataset.ids
annotations = coco_json.anns
for item_ in annotations:
item = annotations[item_]
img_bboxes.setdefault(item['image_id'], []).append(item)
dataset_dict_img = []
for img_id in ids:
dataset_dict_img.append({'annotations': img_bboxes[img_id]})
rep_factors = self._get_repeat_factors(dataset_dict_img)
self._int_part = torch.trunc(rep_factors)
self._frac_part = (rep_factors - self._int_part)
def _get_repeat_factors(self, dataset_dicts):
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts:
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
for cat_id in cat_ids:
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for (k, v) in category_freq.items():
category_freq[k] = (v / num_images)
category_rep = {cat_id: max(self.config.MIN_REPEAT_TIMES, min(self.config.MAX_REPEAT_TIMES, math.pow((self.config.REPEAT_THRESHOLD / cat_freq), self.config.POW))) for (cat_id, cat_freq) in category_freq.items()}
rep_factors = []
for dataset_dict in dataset_dicts:
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids})
rep_factors.append(rep_factor)
logging_rank('max(rep_factors): {} , min(rep_factors): {} , len(rep_factors): {}'.format(max(rep_factors), min(rep_factors), len(rep_factors)), distributed=1, local_rank=self.rank)
return torch.tensor(rep_factors, dtype=torch.float32)
def _get_epoch_indices(self, generator):
rands = torch.rand(len(self._frac_part), generator=generator)
rep_factors = (self._int_part + (rands < self._frac_part).float())
indices = []
for (dataset_index, rep_factor) in enumerate(rep_factors):
indices.extend(([dataset_index] * int(rep_factor.item())))
return torch.tensor(indices, dtype=torch.int64)
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = self._get_epoch_indices(g)
randperm = torch.randperm(len(indices), generator=g).tolist()
indices = indices[randperm]
else:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = self._get_epoch_indices(g)
self.total_size = len(indices)
logging_rank('balance sample total_size: {}'.format(self.total_size), distributed=1, local_rank=self.rank)
self.num_samples = int((len(indices) / self.num_replicas))
offset = (self.num_samples * self.rank)
indices = indices[offset:(offset + self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch |
class TrafficControlPredictTrafficCongestion(VirtualFunctionTool):
name = 'TrafficControlPredictTrafficCongestion'
summary = 'Predicts traffic congestion at a specific road or intersection in the future based on historical data and current conditions.'
parameters: List[ArgParameter] = [{'name': 'location_id', 'type': 'string', 'description': 'The unique identifier of the road or intersection to predict congestion for.', 'required': True}, {'name': 'prediction_start_time', 'type': 'string', 'description': "The start time of the prediction in the format 'yyyy-mm-dd hh:mm:ss'.", 'required': True}, {'name': 'prediction_end_time', 'type': 'string', 'description': "The end time of the prediction in the format 'yyyy-mm-dd hh:mm:ss'.", 'required': True}]
returns: List[ArgReturn] = [{'name': 'predicted_congestion_level', 'type': 'string', 'description': "The predicted level of traffic congestion, can only be 'low', 'medium', 'high', or 'severe'."}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'location_id' argument does not correspond to a valid location."}, {'name': 'InvalidRequestException', 'description': "The 'prediction_start_time' or 'prediction_end_time' arguments are not in the correct format, or the 'prediction_start_time' argument is not before the 'prediction_end_time' argument, or the 'prediction_start_time' argument is not in the future."}] |
def get_activations(images, sess, batch_size=50, verbose=False):
inception_layer = _get_inception_layer(sess)
d0 = images.shape[0]
if (batch_size > d0):
print('warning: batch size is bigger than the data size. setting batch size to data size')
batch_size = d0
n_batches = (d0 // batch_size)
n_used_imgs = (n_batches * batch_size)
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print(('\rPropagating batch %d/%d' % ((i + 1), n_batches)), end='', flush=True)
start = (i * batch_size)
end = (start + batch_size)
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, (- 1))
if verbose:
print(' done')
return pred_arr |
def test_evaluation():
table = pd.DataFrame({'id': [0, 1, 2, 3], 'col': [1, 2, 3, 4]})
slightly_different_table = pd.DataFrame({'id': [0, 1, 2, 3], 'col': [1, 2, 3, 3.5]})
data = {'table1': table, 'table2': table}
samples = {'table1': table, 'table2': slightly_different_table}
metadata = MultiTableMetadata().load_from_dict({'tables': {'table1': {'columns': {'id': {'sdtype': 'id'}, 'col': {'sdtype': 'numerical'}}}, 'table2': {'columns': {'id': {'sdtype': 'id'}, 'col': {'sdtype': 'numerical'}}}}, 'relationships': [{'parent_table_name': 'table1', 'parent_primary_key': 'id', 'child_table_name': 'table2', 'child_foreign_key': 'id'}]})
score = evaluate_quality(data, samples, metadata).get_score()
assert (score == 0.)
report = run_diagnostic(data, samples, metadata)
assert (report.get_score() == 1)
pd.testing.assert_frame_equal(report.get_properties(), pd.DataFrame({'Property': ['Data Validity', 'Data Structure', 'Relationship Validity'], 'Score': [1.0, 1.0, 1.0]})) |
def late_import():
if ('GF2' in globals()):
return
global Cache_ntl_gf2e, GF, GF2
import sage.rings.finite_rings.element_ntl_gf2e
Cache_ntl_gf2e = sage.rings.finite_rings.element_ntl_gf2e.Cache_ntl_gf2e
import sage.rings.finite_rings.finite_field_constructor
GF = sage.rings.finite_rings.finite_field_constructor.GF
GF2 = GF(2) |
def first_bn_multiplier_weighting_fn(orig_bn_stats_holder: KerasOriginalBNStatsHolder, **kwargs) -> Dict[(str, float)]:
num_bn_layers = orig_bn_stats_holder.get_num_bn_layers()
layer_weighting_dict = {orig_bn_stats_holder.get_bn_layer_names()[0]: (10 / num_bn_layers)}
layer_weighting_dict.update({bn_layer_name: (1 / num_bn_layers) for bn_layer_name in orig_bn_stats_holder.get_bn_layer_names()[1:]})
return layer_weighting_dict |
class _FilePersistence(_ConcretePersistence):
def __init__(self, data_filename, data_store, configurator, ui):
super(_FilePersistence, self).__init__(data_store, ui)
if (not data_filename):
raise ValueError(('DataPointPersistence expects a filename ' + ('for data_filename, but got: %s' % data_filename)))
self._data_filename = data_filename
self._file = None
if configurator.discard_old_data:
self._discard_old_data()
self._lock = Lock()
self._read_start_time()
if (not self._start_time):
self._start_time = get_current_time()
self._configurator = configurator
def _discard_old_data(self):
self._truncate_file(self._data_filename)
def _truncate_file(filename):
with open(filename, 'w'):
pass
def _read_start_time(self):
if (not os.path.exists(self._data_filename)):
self._start_time = None
return
with open(self._data_filename, 'r') as data_file:
self._start_time = self._read_first_meta_block(data_file)
def _read_first_meta_block(data_file):
for line in data_file:
if (not line.startswith('#')):
return None
if line.startswith(_START_TIME_LINE):
return line[len(_START_TIME_LINE):].strip()
return None
def load_data(self, runs, discard_run_data):
if discard_run_data:
current_runs = {run for run in runs if run.is_persisted_by(self)}
else:
current_runs = None
try:
if current_runs:
with NamedTemporaryFile('w', delete=False) as target:
with open(self._data_filename, 'r') as data_file:
self._process_lines(data_file, current_runs, target)
os.unlink(self._data_filename)
shutil.move(target.name, self._data_filename)
else:
with open(self._data_filename, 'r') as data_file:
self._process_lines(data_file, current_runs, None)
except IOError:
self.ui.debug_error_info(('No data loaded, since %s does not exist.\n' % self._data_filename))
return self._start_time
def _process_lines(self, data_file, runs, filtered_data_file):
errors = set()
data_point = None
previous_run_id = None
line_number = 0
for line in data_file:
if line.startswith('#'):
line_number += 1
if filtered_data_file:
filtered_data_file.write(line)
continue
try:
(data_point, previous_run_id) = self._parse_data_line(data_point, line, line_number, runs, filtered_data_file, previous_run_id)
except ValueError as err:
msg = str(err)
if (not errors):
self.ui.debug_error_info((('Failed loading data from data file: ' + self._data_filename) + '\n'))
if (msg not in errors):
self.ui.debug_error_info((('{ind}' + msg) + '\n'))
errors.add(msg)
def _parse_data_line(self, data_point, line, line_number, runs, filtered_data_file, previous_run_id):
measurement = Measurement.from_str_list(self._data_store, line.rstrip('\n').split(self._SEP), line_number, self._data_filename)
run_id = measurement.run_id
if (filtered_data_file and runs and (run_id in runs)):
return (data_point, previous_run_id)
if filtered_data_file:
filtered_data_file.write(line)
if (previous_run_id is not run_id):
data_point = DataPoint(run_id)
previous_run_id = run_id
data_point.add_measurement(measurement)
if measurement.is_total():
run_id.loaded_data_point(data_point, ((measurement.iteration <= run_id.warmup_iterations) if run_id.warmup_iterations else False))
data_point = DataPoint(run_id)
return (data_point, previous_run_id)
_SEP = '\t'
def _open_file_and_append_execution_comment(self):
shebang_with_metadata = ('#!%s\n' % subprocess.list2cmdline(sys.argv))
shebang_with_metadata += ((_START_TIME_LINE + self._start_time) + '\n')
shebang_with_metadata += (('# Environment: ' + json.dumps(determine_environment())) + '\n')
shebang_with_metadata += (('# Source: ' + json.dumps(determine_source_details(self._configurator))) + '\n')
csv_header = (self._SEP.join(Measurement.get_column_headers()) + '\n')
try:
data_file = open(self._data_filename, 'a+')
is_empty = (data_file.tell() == 0)
data_file.write(shebang_with_metadata)
if is_empty:
data_file.write(csv_header)
data_file.flush()
return data_file
except Exception as err:
raise UIError(('Error: Was not able to open data file for writing.\n{ind}%s\n%s\n' % (os.getcwd(), err)), err)
def _persists_data_point_in_open_file(self, data_point):
for measurement in data_point.get_measurements():
line = self._SEP.join(measurement.as_str_list())
self._file.write((line + '\n'))
def persist_data_point(self, data_point):
with self._lock:
self._open_file_to_add_new_data()
self._persists_data_point_in_open_file(data_point)
self._file.flush()
def run_completed(self):
def _open_file_to_add_new_data(self):
if (not self._file):
self._file = self._open_file_and_append_execution_comment()
def close(self):
if self._file:
self._file.close()
self._file = None |
class Swish(nn.Module):
def __init__(self, inplace: bool=False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace) |
def TupleSort(name, sorts, ctx=None):
tuple = Datatype(name, ctx)
projects = [(('project%d' % i), sorts[i]) for i in range(len(sorts))]
tuple.declare(name, *projects)
tuple = tuple.create()
return (tuple, tuple.constructor(0), [tuple.accessor(0, i) for i in range(len(sorts))]) |
def magnet_loss(features, labels, margin=1.0, unique_labels=None):
nil = tf.constant(0.0, tf.float32)
one = tf.constant(1.0, tf.float32)
minus_two = tf.constant((- 2.0), tf.float32)
eps = tf.constant(0.0001, tf.float32)
margin = tf.constant(margin, tf.float32)
num_per_class = None
if (unique_labels is None):
(unique_labels, sample_to_unique_y, num_per_class) = tf.unique_with_counts(labels)
num_per_class = tf.cast(num_per_class, tf.float32)
y_mat = tf.cast(tf.equal(tf.reshape(labels, ((- 1), 1)), tf.reshape(unique_labels, (1, (- 1)))), dtype=tf.float32)
if (num_per_class is None):
num_per_class = tf.reduce_sum(y_mat, reduction_indices=[0])
class_means = (tf.reduce_sum((tf.expand_dims(tf.transpose(y_mat), (- 1)) * tf.expand_dims(features, 0)), reduction_indices=[1]) / tf.expand_dims(num_per_class, (- 1)))
squared_distance = _pdist(features, class_means)
num_samples = tf.cast(tf.shape(labels)[0], tf.float32)
variance = (tf.reduce_sum((y_mat * squared_distance)) / (num_samples - one))
const = (one / (minus_two * (variance + eps)))
linear = ((const * squared_distance) - (y_mat * margin))
maxi = tf.reduce_max(linear, reduction_indices=[1], keepdims=True)
loss_mat = tf.exp((linear - maxi))
a = tf.reduce_sum((y_mat * loss_mat), reduction_indices=[1])
b = tf.reduce_sum(((one - y_mat) * loss_mat), reduction_indices=[1])
loss = tf.maximum(nil, (- tf.log((eps + (a / (eps + b))))))
return (tf.reduce_mean(loss), class_means, variance) |
def _has_only_empty_bbox(anno):
return all((any(((o <= 1) for o in obj['bbox'][2:])) for obj in anno)) |
def model_parameters(model):
return (sum([np.prod(p.size()) for p in model.parameters()]) / 1000000.0) |
def gradients_speed(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs) |
def get_arg_parser():
from snips_nlu.cli.download import add_download_parser, add_download_all_languages_parser
from snips_nlu.cli.download_entity import add_download_entity_parser, add_download_language_entities_parser
from snips_nlu.cli.generate_dataset import add_generate_dataset_subparser
from snips_nlu.cli.inference import add_parse_parser
from snips_nlu.cli.link import add_link_parser
from snips_nlu.cli.metrics import add_cross_val_metrics_parser, add_train_test_metrics_parser
from snips_nlu.cli.training import add_train_parser
from snips_nlu.cli.versions import add_version_parser, add_model_version_parser
arg_parser = argparse.ArgumentParser(description='Snips NLU command line interface', prog='python -m snips_nlu', formatter_class=Formatter)
arg_parser.add_argument('-v', '--version', action='store_true', help='Print package version')
subparsers = arg_parser.add_subparsers(title='available commands', metavar='command [options ...]')
add_generate_dataset_subparser(subparsers, formatter_class=Formatter)
add_train_parser(subparsers, formatter_class=Formatter)
add_parse_parser(subparsers, formatter_class=Formatter)
add_download_parser(subparsers, formatter_class=Formatter)
add_download_all_languages_parser(subparsers, formatter_class=Formatter)
add_download_entity_parser(subparsers, formatter_class=Formatter)
add_download_language_entities_parser(subparsers, formatter_class=Formatter)
add_link_parser(subparsers, formatter_class=Formatter)
add_cross_val_metrics_parser(subparsers, formatter_class=Formatter)
add_train_test_metrics_parser(subparsers, formatter_class=Formatter)
add_version_parser(subparsers, formatter_class=Formatter)
add_model_version_parser(subparsers, formatter_class=Formatter)
return arg_parser |
def get_args_EBM():
parser = argparse.ArgumentParser(description='Concept argparse.')
parser.add_argument('--exp_id', type=str, help='Experiment id')
parser.add_argument('--date_time', type=str, help='date and time')
parser.add_argument('--exp_name', default='None', help='If not "None", will use asynchronous training, and the data_record oftraining will be saved under f"{exp_id}_{date_time}/{exp_name}/{filename}".')
parser.add_argument('--inspect_interval', type=int, help='Interval for inspecting and plotting.')
parser.add_argument('--save_interval', type=int, help='Interval for saving the model_dict.')
parser.add_argument('--verbose', type=int, help='verbose.')
parser.add_argument('--seed', type=int, help='seed')
parser.add_argument('--gpuid', type=str, help='gpu id.')
parser.add_argument('--id', type=str, help='id.')
parser.add_argument('--recent_record', type=int, default=(- 1), help='Number of most recent entries to keep in the data record. If -1, keeps all entries.')
parser.add_argument('--dataset', type=str, help='dataset name. Choose from "cifar10", "concept-{*}" and "arc-{*}"')
parser.add_argument('--n_examples', type=int, help='Number of examples.')
parser.add_argument('--n_queries_per_class', type=int, help='If generating fewshot, the number of queries per class.')
parser.add_argument('--canvas_size', type=int, help='Size of the canvas for concept dataset.')
parser.add_argument('--rainbow_prob', type=float, help='Probability of using rainbow color in BabyARC.')
parser.add_argument('--max_n_distractors', type=int, default=(- 1), help='Number of distractors in BabyARC. If set to -1, it will follow the default behavior.')
parser.add_argument('--min_n_distractors', type=int, default=0, help='Minimum number of distractors in BabyARC.')
parser.add_argument('--allow_connect', type=str2bool, nargs='?', const=True, default=True, help='Whether or not to allow objects to connect in the image.')
parser.add_argument('--is_rewrite', type=str2bool, nargs='?', const=True, default=False, help='If True, will rewrite the dataset.')
parser.add_argument('--max_num_occur', type=int, default=10, help='Max number of concepts (or relations) in an example.')
parser.add_argument('--n_operators', type=int, help='Number of operators in BabyARC.')
parser.add_argument('--color_avail', type=str, help='Available color in BabyARC separated by , (e.g., 1,2,3, -1 means any color).')
parser.add_argument('--to_RGB', type=str2bool, nargs='?', const=True, default=False, help='If dataset is BabyARC, convert from 10-channels to RGB')
parser.add_argument('--is_load', type=str2bool, nargs='?', const=True, default=True, help='Whether or not to load dataset from file if it exists.')
parser.add_argument('--rescaled_size', type=str, help='If dataset is BabyARC, produce the new shape for the dataset. Choose from "None" (no resizing), e.g. "16,16" (resizing to (16,16)).')
parser.add_argument('--rescaled_mode', type=str, help='Choose from "nearest", "default".')
parser.add_argument('--seed_3d', type=int, default=42, help='seed when converting BabyARC to 3D,')
parser.add_argument('--use_seed_2d', type=str2bool, default=False, help='Use "seed" argument to generate 2D examples that are converted to 3D (instead of "seed_3d")')
parser.add_argument('--image_size_3d', type=int, nargs=2, default=[256, 256], help='Size of 3D image.')
parser.add_argument('--num_processes_3d', type=int, default=20, help='Number of processes to use for conversion.')
parser.add_argument('--color_map_3d', type=str, default='same', help='If "random", will randomly assign a color per object. Otherwise, use the color dictionary.')
parser.add_argument('--add_thick_surf', type=int, nargs=2, default=[0, 0.5], help='Range of values in which to uniformly sample addition of thickness in xy plane.')
parser.add_argument('--add_thick_depth', type=int, nargs=2, default=[0, 0.5], help='Range of values in which to uniformly sample addition of thickness in z dimension.')
parser.add_argument('--transforms', type=str, help='Data augmentations to perform on initial negative samples from replay buffer. Example: "color+flip+rotate+resize" or "color+flip+rotate+resize", where the 0.5 is the probability of doing the transformation (default prob. of 1)')
parser.add_argument('--transforms_pos', type=str, help='Data augmentations to perform on initial positive samples from replay buffer. Example: "color+flip+rotate+resize:0.5" or "color+flip+rotate+resize", where the 0.5 is the probability of doing the transformation (default prob. of 1)')
parser.add_argument('--model_type', type=str, help='Model type. Choose from "CEBM", "GraphEBM", "IGEBM".')
parser.add_argument('--w_type', type=str, help='type of the first two arities of input. choose from "image", "mask", "image+mask", "obj", "image+obj"')
parser.add_argument('--mask_mode', type=str, help='mask_mode. Choose from "concat", "mulcat", "mul".')
parser.add_argument('--channel_base', type=int, help='Base n_channels for "CEBM".')
parser.add_argument('--two_branch_mode', type=str, help='Mode for the two branches of CEBM, if its mode is "operator". Choose from "concat", "imbal-{#indi-layers}".')
parser.add_argument('--is_spec_norm', type=str, help='If "True", each CNN block will have spectral norm. Choose from "True", "False", "ws" (with normalization).')
parser.add_argument('--is_res', type=str2bool, nargs='?', const=True, default=True, help='If True, will use residual layer for CResBlock.')
parser.add_argument('--c_repr_mode', type=str, help='How c_repr will be combined with the input. Choose from "None", l1", "l2", "c1", "c2", "c3".')
parser.add_argument('--c_repr_first', type=int, help='First block to pass in c_repr.')
parser.add_argument('--c_repr_base', type=int, help='Number of base channels for c_repr.')
parser.add_argument('--z_mode', type=str, help='How z will be combined with the input. Choose from "None", "c0", "c1", "c2", "c3".')
parser.add_argument('--z_first', type=int, help='First block to pass in z.')
parser.add_argument('--z_dim', type=int, help='Dimension for z.')
parser.add_argument('--pos_embed_mode', type=str, help='Whether or how to embed position. Choose from "None", "implicit", "sine", "learned".')
parser.add_argument('--aggr_mode', type=str, help='Aggregation mode for the last layer.')
parser.add_argument('--act_name', type=str, help='Activation name')
parser.add_argument('--normalization_type', type=str, help='Normalization type.')
parser.add_argument('--dropout', type=float, help='Dropout. If greater than 0, will have dropout for the CResBlock.')
parser.add_argument('--self_attn_mode', type=str, help='Choose from "None", "pixel".')
parser.add_argument('--last_act_name', type=str, help='Activation for last layer of ConceptEBM.')
parser.add_argument('--n_avg_pool', type=int, help='Number of average pooling for ConceptEBM at the beginning.')
parser.add_argument('--cumu_mode', type=str, help='cumu_mode for concept_energy_composite, for computing the loss that combines multiple solutions for the same task. Choose from "harmonic", "gm-{order}" (generalized-mean with specified order), "mean", "geometric", "sum".')
parser.add_argument('--update_ebm_dict_interval', type=int, help='Every {update_ebm_dict_interval} epochs, update the ebm_dict.')
parser.add_argument('--min_n_tasks', type=int, help='Wait until the number of tasks is above {args.min_n_tasks} in task_dict.p')
parser.add_argument('--is_save', type=str2bool, nargs='?', const=True, default=True, help='If True, will write to the ebm_dict.p and data_record for EBM_composite.')
parser.add_argument('--train_coef', type=float, help='train_coef.')
parser.add_argument('--test_coef', type=float, help='train_coef.')
parser.add_argument('--mutual_exclusive_coef', type=float, help='Coefficient for mutual-exclusive energy during composite training. Penalizes when two masks from multiple EBMs overlap in an image.')
parser.add_argument('--obj_coef', type=float, help='Coefficient for regularization to encourage each EBM to discover individual objects.')
parser.add_argument('--channel_coef', type=float, help='Coefficient for the main channel (1:10th channel) for the ARC/BabyARC tasks and all 3 channels for RGB images.')
parser.add_argument('--empty_coef', type=float, help='Coefficient for the empty channel (0th channel) for the ARC/BabyARC tasks.')
parser.add_argument('--pixel_entropy_coef', type=float, help='Coefficient for pixel-wise entropy.')
parser.add_argument('--pixel_gm_coef', type=float, help='Coefficient for pixel-wise generalize-mean distance w.r.t. 0 and 1.')
parser.add_argument('--iou_batch_consistency_coef', type=float, help='Encouraging consistency for distance of two masks across examples.')
parser.add_argument('--iou_attract_coef', type=float, help='Encouraging masks that are near to be nearer.')
parser.add_argument('--iou_concept_repel_coef', type=float, help='Repel masks that belong to different concepts that occupies one object slot.')
parser.add_argument('--iou_relation_repel_coef', type=float, help='Repel masks that belong to the same relation.')
parser.add_argument('--iou_relation_overlap_coef', type=float, help='Repel masks that belong to the same relation.')
parser.add_argument('--iou_target_matching_coef', type=float, help='Coefficient for relation tasks that if the IoU between one one discovered mask and the target mask is greater than 0.5, will further encourage it to be nearer.')
parser.add_argument('--connected_coef', type=float, help='Encourage each mask to be a single connected component.')
parser.add_argument('--connected_num_samples', type=int, help='Number of pairs of points to sample when computing connected loss.')
parser.add_argument('--target_loss_type', type=str, help='Loss_type for ebm supervised learning. Choose from any valid loss_type. E.g. "mse", "Jaccard".')
parser.add_argument('--is_selector_gnn', type=str2bool, nargs='?', const=True, default=False, help='If True, will have GNN for the selector.')
parser.add_argument('--is_zgnn_node', type=str2bool, nargs='?', const=True, default=False, help='If True, have zgnn_node for the GNN (zgnn is a tuple of (zgnn_node, zgnn_edge). If is_zgnn_node is False, zgnn_node will be None). If False, will use forward_NN.')
parser.add_argument('--is_cross_validation', type=str2bool, nargs='?', const=True, default=True, help='If True, use cross-validation within a task.')
parser.add_argument('--load_pretrained_concepts', type=str, help='If not "None", will be a string including the dirname + filename for the data_record that contains the concept_model.')
parser.add_argument('--n_GN_layers', type=int, help='Number of GN layers.')
parser.add_argument('--gnn_normalization_type', type=str, help='Normalization_type for GNN.')
parser.add_argument('--gnn_pooling_dim', type=int, help='Pooling dimension for GNN.')
parser.add_argument('--edge_attr_size', type=int, help='Size of edge_attr.')
parser.add_argument('--cnn_output_size', type=int, help='CNN output_size.')
parser.add_argument('--cnn_is_spec_norm', type=str, help='If True, will have spectral norm for CNN inside GNN. Choose from "True", "False", "ws".')
parser.add_argument('--is_ebm_share_param', type=str2bool, nargs='?', const=True, default=False, help='Whether or not to share parameter for different EBMs of the same EBM mode.')
parser.add_argument('--T_id', type=str, help='T_id for the task for standalone mode. ExamplesTuc6: 6 random concepts; Tuc6r3: 6 random concepts and 3 random relations; Tuc6r3o2: 6 random concepts, 3 random relations and 2 operators.')
parser.add_argument('--image_value_range', type=str, help='Minimum and maximum value for the values of the image at each pixel. For BabyARC/ARC, use "0,1", for CLEVR, use "-1,1".')
parser.add_argument('--w_init_type', type=str, default='random', help='How to initialize w. Choose from "input", "random", "input-mask", "input-gaus", "k-means", "k-means^x" where x is the number of clusters')
parser.add_argument('--indiv_sample', type=int, default=(- 1), help='Number of sample steps for each EBM in selector when reconstructing image. If -1, do SGLD with all EBMs')
parser.add_argument('--n_tasks', type=int, help='Number of tasks.')
parser.add_argument('--is_concat_minibatch', type=str2bool, nargs='?', const=True, default=False, help='If True, will concatenate the tasks in a minibatch into a single tensor.')
parser.add_argument('--relation_merge_mode', type=str, help='How to merge graphs for relation graph discovery. Choose from "None", "threshold".')
parser.add_argument('--is_relation_z', type=str2bool, nargs='?', const=True, default=True, help='If True, will have z for relation-EBM and reconstruction on the 2nd SGLD.')
parser.add_argument('--SGLD_is_anneal', type=str2bool, nargs='?', const=True, default=False, help='If True, will anneal the SGLD_ coefficients..')
parser.add_argument('--SGLD_anneal_power', type=float, help='Power to which annealing coefficient grows.')
parser.add_argument('--SGLD_is_penalize_lower', type=str, help='if True or "True", will penalize that the sum is less than 1. If "False" or False, will not. If "obj:0.001" e.g., will only penalize on the object locations (if n_channels==10), with coefficient of 0.001.')
parser.add_argument('--SGLD_iou_batch_consistency_coef', type=float, help='Encouraging consistency for distance of two masks across examples in SGLD.')
parser.add_argument('--SGLD_iou_attract_coef', type=float, help='Encouraging masks that are near to be nearer in SGLD.')
parser.add_argument('--SGLD_iou_concept_repel_coef', type=float, help='Repel masks that belong to different concepts that occupies one object slot in SGLD.')
parser.add_argument('--SGLD_iou_relation_repel_coef', type=float, help='Repel masks that belong to the same relation in SGLD.')
parser.add_argument('--SGLD_iou_relation_overlap_coef', type=float, help='Repel masks that belong to the same relation in SGLD.')
parser.add_argument('--train_mode', type=str, help='Training mode. Choose from "cd" (contrastive divergence) and "sl" (supervised learning).')
parser.add_argument('--energy_mode', type=str, help=' "standard:0.3": (E_pos - E_neg) * 0.3"margin^0.2:0.3": max(0, 0.3 + E_pos - E_neg) * 0.2"mid^0.2:0.3": (max(0, 0.2 + E_pos - E_empty) + max(0, 0.2 + E_empty - E_neg)) * 0.3"mid^0.2^adapt:0.3": (max(0, gamma + E_pos - E_empty) + max(0, gamma + E_empty - E_neg)) * 0.3where gamma = max(0, StopGrad(E_neg - E_pos)/2) + 0.2"standard:0.5+mid^0.2^adapt:0.3":(E_pos - E_neg) * 0.5 + (max(0, gamma + E_pos - E_empty) + max(0, gamma + E_empty - E_neg)) * 0.3,where gamma = max(0, StopGrad(E_neg - E_pos)/2) + 0.2."standard+center^stop": (E_pos - E_neg) * 1 + ((E_pos+E_neg).detach()/2 - E_empty).abs()"stop": stop gradient, and each empty loss is computed per example"stopgen": similar to "stop", but the negative energy is the mean of neg_out and neg_out_gen, per example."stopmean": stop gradient, and each empty loss is computed per minibatch"stopgenmean": similar to "stopmean", but the negative energy is the mean of neg_out and neg_out_gen.')
parser.add_argument('--supervised_loss_type', type=str, help='Loss_type for ebm supervised learning. Choose from any valid loss_type. E.g. "mse", "l1", "l2".')
parser.add_argument('--kl_all_step', type=str2bool, nargs='?', const=True, default=False, help='If True, will compute the 2nd order kl for all steps.')
parser.add_argument('--kl_coef', type=float, help='Coefficient for kl regularization.')
parser.add_argument('--entropy_coef_img', type=float, help='Coefficient for entropy for image.')
parser.add_argument('--entropy_coef_mask', type=float, help='Coefficient for entropy for mask.')
parser.add_argument('--entropy_coef_repr', type=float, help='Coefficient for entropy for repr.')
parser.add_argument('--pos_consistency_coef', type=float, help='Coefficient for positive consistency loss.')
parser.add_argument('--neg_consistency_coef', type=float, help='Coefficient for negative consistency loss.')
parser.add_argument('--emp_consistency_coef', type=float, help='Coefficient for empty consistency loss.')
parser.add_argument('--SGLD_mutual_exclusive_coef', type=float, help='Coefficient for mutual-exclusive energy during SGLD. Penalizes when two masks from multiple EBMs overlap in an image.')
parser.add_argument('--SGLD_fine_mutual_exclusive_coef', type=float, help='Coefficient for mutual-exclusive energy during SGLD. Penalizes when two masks from multiple EBMs overlap in an image.')
parser.add_argument('--SGLD_object_exceed_coef', type=float, help='Coefficient for penalizing objects exceeding the ground truth mask during SGLD. Prevents a mask from an EBM from exceeding ground-truth boundaries.')
parser.add_argument('--SGLD_pixel_entropy_coef', type=float, help='Coefficient for pixel-wise entropy during SGLD.')
parser.add_argument('--SGLD_mask_entropy_coef', type=float, help='Coefficient for mask-level entropy during SGLD.')
parser.add_argument('--SGLD_pixel_gm_coef', type=float, help='Coefficient for pixel-wise generalize-mean distance w.r.t. 0 and 1, during SGLD')
parser.add_argument('--epsilon_ent', type=float, help='epsilon for adding to the entropy compuation to prevent Inf.')
parser.add_argument('--ebm_target_mode', type=str, help='Target input to perform SGD on. Choose from "None", "r-{}" where {} choose from subset of "r", "m", "b", "x".')
parser.add_argument('--emp_target_mode', type=str, help='Set of ebm_target mode in which the emp_out will participate in the loss. Choose from "all", "r-{}" where {} choose from subset of "r", "m", "b", "x".')
parser.add_argument('--ebm_target', type=str, help='Target input to perform SGD on. Choose from "mask", "mask+repr", "repr".')
parser.add_argument('--is_pos_repr_learnable', type=str2bool, nargs='?', const=True, default=False, help='Whether the positive concept_embeddings are learnable.')
parser.add_argument('--neg_mode', type=str, help='Modes for generated negative masks from pos images and pos_masks. Only valid when is_mask is True.')
parser.add_argument('--neg_mode_coef', type=float, help='Coefficient for negative mode. Only when it is > 0 and neg_mode is not "None" will neg_mode have effect.')
parser.add_argument('--alpha', type=float, help='Coefficient for the L2 loss.')
parser.add_argument('--lambd_start', type=float, help='Starting lambda for Gaussian Distribution.')
parser.add_argument('--lambd', type=float, help='Lambda for Gaussian Distribution.')
parser.add_argument('--step_size_start', type=float, help='Starting step size for sampling.')
parser.add_argument('--step_size', type=float, help='Step size for sampling.')
parser.add_argument('--step_size_repr', type=float, help='Step size for sampling c_repr.')
parser.add_argument('--step_size_img', type=float, help='Step size for sampling img.')
parser.add_argument('--step_size_z', type=float, help='Step size for sampling z.')
parser.add_argument('--step_size_zgnn', type=float, help='Step size for sampling zgnn.')
parser.add_argument('--step_size_wtarget', type=float, help='Step size for sampling wtarget.')
parser.add_argument('--sample_step', type=int, help='Number of steps for sampling.')
parser.add_argument('--p_buffer', type=float, help='Probability for using samples inside the buffer, as compared to using Gaussian.')
parser.add_argument('--lr', type=float, help='Learning rate.')
parser.add_argument('--lr_pretrained_concepts', type=float, help='Learning rate for pretrained concepts.')
parser.add_argument('--parallel_mode', type=str, help='Parallel mode. Choose from "None", "dp" (DataParallel) and "ddp" (DistributedDataParallel).')
parser.add_argument('--batch_size', type=int, help='Batch size.')
parser.add_argument('--epochs', type=int, help='Number of epochs.')
parser.add_argument('--early_stopping_patience', type=int, help='Patience for early-stopping.')
parser.add_argument('--n_workers', type=int, help='Number of workers.')
parser.set_defaults(exp_id='ebm', date_time='3-20', inspect_interval=5, save_interval=10, verbose=1, seed=(- 1), gpuid='3', id='1', dataset='c-line', n_examples=10000, n_queries_per_class=15, canvas_size=8, rainbow_prob=0.0, max_n_distractors=0, min_n_distractors=0, allow_connect=True, is_rewrite=False, n_operators=1, color_avail='-1', transforms='None', transforms_pos='None', rescaled_size='None', rescale_mode='nearest', model_type='CEBM', w_type='image+mask', mask_mode='mul', channel_base=128, two_branch_mode='concat', is_spec_norm='True', is_res=True, c_repr_mode='c2', c_repr_first=2, c_repr_base=2, z_mode='None', z_first=2, z_dim=4, pos_embed_mode='None', aggr_mode='max', act_name='leakyrelu0.2', normalization_type='None', dropout=0, self_attn_mode='None', last_act_name='None', n_avg_pool=0, cumu_mode='harmonic', update_ebm_dict_interval=1, min_n_tasks=0, is_save=True, channel_coef=1.0, empty_coef=0.02, obj_coef=0.1, mutual_exclusive_coef=0.1, pixel_entropy_coef=0.0, pixel_gm_coef=0.0, iou_batch_consistency_coef=0.0, iou_concept_repel_coef=0.0, iou_relation_repel_coef=0.0, iou_relation_overlap_coef=0.0, iou_attract_coef=0, iou_target_matching_coef=0, connected_coef=0, connected_num_samples=2, image_value_range='0,1', is_ebm_share_param=False, n_tasks=128, T_id='Tuc6', is_concat_minibatch=False, relation_merge_mode='None', is_relation_z=True, is_cross_validation=False, load_pretrained_concepts='None', is_selector_gnn=False, is_zgnn_node=False, n_GN_layers=2, edge_attr_size=8, gnn_normalization_type='None', gnn_pooling_dim=16, cnn_output_size=32, cnn_is_spec_norm='True', train_coef=1, test_coef=1, train_mode='cd', energy_mode='standard', supervised_loss_type='mse', target_loss_type='mse', kl_all_step=False, kl_coef=0.0, entropy_coef_img=0.0, entropy_coef_mask=0.0, entropy_coef_repr=0.0, pos_consistency_coef=0.0, neg_consistency_coef=0.0, emp_consistency_coef=0.0, SGLD_is_anneal=False, SGLD_anneal_power=2.0, SGLD_is_penalize_lower='True', SGLD_mutual_exclusive_coef=0.0, SGLD_fine_mutual_exclusive_coef=0.0, SGLD_object_exceed_coef=0.0, SGLD_pixel_entropy_coef=0.0, SGLD_mask_entropy_coef=0.0, SGLD_pixel_gm_coef=0.0, SGLD_iou_batch_consistency_coef=0.0, SGLD_iou_concept_repel_coef=0.0, SGLD_iou_relation_repel_coef=0.0, SGLD_iou_relation_overlap_coef=0.0, SGLD_iou_attract_coef=0, epsilon_ent=1e-05, ebm_target_mode='None', ebm_target='mask', emp_target_mode='all', is_pos_repr_learnable=False, neg_mode='None', neg_mode_coef=0.0, alpha=1, lambd_start=(- 1), lambd=0.005, step_size_start=(- 1), step_size=20, step_size_img=(- 1), step_size_repr=(- 1), step_size_z=2, step_size_zgnn=2, step_size_wtarget=(- 1), sample_step=60, p_buffer=0.95, lr=0.0001, lr_pretrained_concepts=0, parallel_mode='None', batch_size=128, epochs=500, early_stopping_patience=(- 1), n_workers=4)
try:
get_ipython().run_line_magic('matplotlib', 'inline')
args = parser.parse_args([])
except:
args = parser.parse_args()
if (args.step_size_img == (- 1)):
args.step_size_img = args.step_size
if (args.step_size_repr == (- 1)):
args.step_size_repr = args.step_size
if (args.step_size_z == (- 1)):
args.step_size_z = args.step_size
if (args.step_size_zgnn == (- 1)):
args.step_size_zgnn = args.step_size
if (args.step_size_wtarget == (- 1)):
args.step_size_wtarget = args.step_size
return args |
def generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices, path):
pred_test = []
for (X, y) in all_iter:
X = X.to(device)
net.eval()
pred_test.extend(net(X).cpu().argmax(axis=1).detach().numpy())
gt = gt_hsi.flatten()
x_label = np.zeros(gt.shape)
for i in range(len(gt)):
if (gt[i] == 0):
gt[i] = 17
x_label[i] = 16
gt = (gt[:] - 1)
x_label[total_indices] = pred_test
x = np.ravel(x_label)
y_list = list_to_colormap(x)
y_gt = list_to_colormap(gt)
y_re = np.reshape(y_list, (gt_hsi.shape[0], gt_hsi.shape[1], 3))
gt_re = np.reshape(y_gt, (gt_hsi.shape[0], gt_hsi.shape[1], 3))
classification_map(y_re, gt_hsi, 300, (path + '.png'))
classification_map(gt_re, gt_hsi, 300, (path + '_gt.png'))
print('------Get classification maps successful-------') |
def parse_flags(line):
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [], 'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = ('-' + flag)
if (len(flag) > 0):
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d |
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('dump_dir')
parser.add_argument('start', type=int)
parser.add_argument('end', type=int)
return parser.parse_args() |
def _apply_chi(dwg, g, meld, offset):
tile = Meld.target(meld)
if (Meld.action(meld) == Action.CHI_L):
tile1 = tile
tile2 = (tile + 1)
tile3 = (tile + 2)
elif (Meld.action(meld) == Action.CHI_M):
tile1 = tile
tile2 = (tile - 1)
tile3 = (tile + 1)
else:
tile1 = tile
tile2 = (tile - 1)
tile3 = (tile - 2)
if (Meld.src(meld) == 3):
p = dwg.path(d=path_list[tile1])
p.rotate((- 90), center=((hand_x + offset), hand_y))
p.translate((((hand_x + offset) - tile_h) + 4), (hand_y + 1))
g.add(p)
offset += tile_h
p = dwg.path(d=path_list[tile2])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
p = dwg.path(d=path_list[tile3])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
elif (Meld.src(meld) == 2):
p = dwg.path(d=path_list[tile2])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
p = dwg.path(d=path_list[tile1])
p.rotate((- 90), center=((hand_x + offset), hand_y))
p.translate((((hand_x + offset) - tile_h) + 4), (hand_y + 1))
g.add(p)
offset += tile_h
p = dwg.path(d=path_list[tile3])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
elif (Meld.src(meld) == 1):
p = dwg.path(d=path_list[tile3])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
p = dwg.path(d=path_list[tile2])
p.translate((hand_x + offset), hand_y)
g.add(p)
offset += tile_w
p = dwg.path(d=path_list[tile1])
p.rotate((- 90), center=((hand_x + offset), hand_y))
p.translate((((hand_x + offset) - tile_h) + 4), (hand_y + 1))
g.add(p)
offset += tile_h
return (g, (offset + tile_w)) |
def register_Ns3Icmpv6TimeExceeded_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv6TimeExceeded const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetPacket', 'ns3::Ptr< ns3::Packet >', [], is_const=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')])
return |
def is_external_stream(node: dace.sdfg.nodes.Node, subgraph: Union[(dace.sdfg.SDFGState, ScopeSubgraphView)]):
external = False
if (isinstance(node, dace.nodes.AccessNode) and isinstance(node.desc(subgraph), dt.Stream)):
for nn in subgraph.nodes():
if ((nn != node) and isinstance(nn, dace.nodes.AccessNode) and (node.desc(subgraph) == nn.desc(subgraph))):
break
else:
external = True
return external |
class LinUCBVI(UCBVI):
def __init__(self, mdp, n_episodes=1, init_state=0, reg_factor=1.0, confidence_scaling_factor=(- 1.0), bound_theta=1.0, throttle=int(100.0)):
self.bound_theta = bound_theta
super().__init__(mdp, n_episodes=n_episodes, init_state=init_state, reg_factor=reg_factor, confidence_scaling_factor=confidence_scaling_factor, throttle=throttle)
def approximator_dim(self):
return self.mdp.n_features
def update_output_gradient(self):
self.grad_approx = self.mdp.features
def reset(self):
self.reset_upper_confidence_bounds()
self.reset_regrets()
self.reset_policy()
self.reset_state_action_reward_buffer()
self.reset_A_inv()
self.reset_grad_approx()
self.theta = (np.random.uniform((- 1), 1, (self.mdp.H, self.mdp.n_features)) * self.bound_theta)
self.b = np.zeros((self.mdp.H, self.mdp.n_features))
def confidence_multiplier(self):
return self.confidence_scaling_factor
def train(self):
self.b[self.mdp.iteration] += (self.mdp.features[(self.state, self.action)] * (self.reward + np.max(self.Q_hat[((self.mdp.iteration + 1), self.buffer_states[(self.mdp.iteration + 1)])])))
self.theta[self.mdp.iteration] = np.matmul(self.A_inv[self.mdp.iteration], self.b[self.mdp.iteration])
def predict(self):
self.Q_hat[self.mdp.iteration] = np.dot(self.mdp.features, self.theta[self.mdp.iteration]) |
.hypothesis_nested
.operations('custom_format')
def test_schema_query_hook(wsgi_app_schema, schema_url):
_app_schema.hook
def filter_query(context, query):
return (query['id'].isdigit() and query['id'].isascii())
strategy = wsgi_app_schema['/custom_format']['GET'].as_strategy()
(case=strategy)
(max_examples=3)
def test(case):
assert case.query['id'].isdigit()
test() |
_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None)
opts.dataset_kwargs.cfg.update(mirror=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid) |
def get_command_args():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-c', help='pattern config', type=str, default='meta_infos/configs/dataset_config.yaml')
parser.add_argument('--out', '-o', help='folder to save generated patterns', type=str, default='test/outputs')
args = parser.parse_args()
return args |
class MobileInvertedResidualBlock(MyModule):
def __init__(self, mobile_inverted_conv, shortcut):
super(MobileInvertedResidualBlock, self).__init__()
self.mobile_inverted_conv = mobile_inverted_conv
self.shortcut = shortcut
def forward(self, x):
if ((self.mobile_inverted_conv is None) or isinstance(self.mobile_inverted_conv, ZeroLayer)):
res = x
elif ((self.shortcut is None) or isinstance(self.shortcut, ZeroLayer)):
res = self.mobile_inverted_conv(x)
else:
res = (self.mobile_inverted_conv(x) + self.shortcut(x))
return res
def module_str(self):
return ('(%s, %s)' % ((self.mobile_inverted_conv.module_str if (self.mobile_inverted_conv is not None) else None), (self.shortcut.module_str if (self.shortcut is not None) else None)))
def config(self):
return {'name': MobileInvertedResidualBlock.__name__, 'mobile_inverted_conv': (self.mobile_inverted_conv.config if (self.mobile_inverted_conv is not None) else None), 'shortcut': (self.shortcut.config if (self.shortcut is not None) else None)}
def build_from_config(config):
mobile_inverted_conv = set_layer_from_config(config['mobile_inverted_conv'])
shortcut = set_layer_from_config(config['shortcut'])
return MobileInvertedResidualBlock(mobile_inverted_conv, shortcut) |
def _update_command_info():
global _command_info_cache
if (_command_info_cache is not None):
return
cache = {}
with open(os.path.join(SAGE_LOCAL, 'share/qepcad', 'qepcad.help')) as help:
assert (help.readline().strip() == '')
while True:
cmd_line = help.readline()
while (not cmd_line.strip()):
cmd_line = help.readline()
cmd_line = cmd_line.strip()
if (cmd_line == ''):
break
(cmd, id, phases, kind) = cmd_line.split()
assert (help.readline().strip() == '')
help_text = ''
help_line = help.readline()
while (help_line.strip() != ''):
help_text += help_line
help_line = help.readline()
special = None
if (cmd in ['d-all-cells-in-subtree', 'd-cell', 'd-pcad', 'd-pscad', 'd-stack', 'manual-choose-cell']):
special = 'cell'
if (cmd in ['ipfzt', 'rational-sample', 'triv-convert', 'use-db', 'use-selected-cells-cond', 'verbose']):
special = 'yn'
if (cmd in ['selected-cells-cond']):
special = 'formula'
if (cmd in ['ch-pivot', 'rem-pf', 'rem-pj']):
special = 'i,j'
if (cmd in ['d-2d-cad', 'set-truth-value', 'solution-extension']):
special = 'interactive'
if (cmd in ['p-2d-cad', 'trace-alg', 'trace-data']):
special = 'optional'
cmd = cmd.replace('-', '_')
cache[cmd] = (id, phases, kind, help_text, special)
_command_info_cache = cache |
('/annotator', methods=['GET'])
def annotator():
cad_type = request.args.get('cad', '')
if (cad_type == '0'):
condition = ''
else:
condition = f'and source = {cad_type}'
keyword = request.args.get('keyword', '')
img_info = get_cad_imgs(keyword, condition)
print(f'{len(img_info)} was found!!')
return render_template('gallery.html', img_info=img_info[:500], keywords=get_miscellaneous(), labels=get_category(), num_file=str(len(img_info))) |
def _digit_span_to_special_tag(span):
if ((span[0] == '0') and (len(span) > 2)):
return '<NUM>'
decimal_point_count = 0
for (idx, char) in enumerate(span):
if ((char == '.') or (char == '.') or (char == '')):
decimal_point_count += 1
if ((span[(- 1)] == '.') or (span[(- 1)] == '.') or (span[(- 1)] == '')):
if (decimal_point_count == 1):
return span
else:
return '<UNKDGT>'
if (decimal_point_count == 1):
return '<DEC>'
elif (decimal_point_count > 1):
return '<UNKDGT>'
else:
return '<NUM>' |
def elog(x):
if ((x <= 0.0) or (x >= 1.0)):
return 0
else:
return (x * log(x)) |
def build_input_fn(builder, is_training):
def _input_fn(params):
preprocess_fn_pretrain = get_preprocess_fn(is_training, is_pretrain=True)
preprocess_fn_finetune = get_preprocess_fn(is_training, is_pretrain=False)
num_classes = builder.info.features['label'].num_classes
def map_fn(image, label):
if (FLAGS.train_mode == 'pretrain'):
xs = []
for _ in range(2):
xs.append(preprocess_fn_pretrain(image))
image = tf.concat(xs, (- 1))
label = tf.zeros([num_classes])
else:
image = preprocess_fn_finetune(image)
label = tf.one_hot(label, num_classes)
return (image, label, 1.0)
dataset = builder.as_dataset(split=(FLAGS.train_split if is_training else FLAGS.eval_split), shuffle_files=is_training, as_supervised=True)
if FLAGS.cache_dataset:
dataset = dataset.cache()
if is_training:
buffer_multiplier = (50 if (FLAGS.image_size <= 32) else 10)
dataset = dataset.shuffle((params['batch_size'] * buffer_multiplier))
dataset = dataset.repeat((- 1))
dataset = dataset.map(map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(params['batch_size'], drop_remainder=is_training)
dataset = pad_to_batch(dataset, params['batch_size'])
(images, labels, mask) = tf.data.make_one_shot_iterator(dataset).get_next()
return (images, {'labels': labels, 'mask': mask})
return _input_fn |
def train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[(int, float)]:
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
def collate(examples: List[torch.Tensor]):
if (tokenizer._pad_token is None):
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (args.model_name_or_path and os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
if (args.model_name_or_path and os.path.exists(args.model_name_or_path)):
try:
checkpoint_suffix = args.model_name_or_path.split('-')[(- 1)].split('/')[0]
global_step = int(checkpoint_suffix)
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
(tr_loss, logging_loss) = (0.0, 0.0)
model_to_resize = (model.module if hasattr(model, 'module') else model)
model_to_resize.resize_token_embeddings(len(tokenizer))
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
(inputs, labels) = (mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch))
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = (model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels))
loss = outputs[0]
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
checkpoint_prefix = 'checkpoint'
output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step))
os.makedirs(output_dir, exist_ok=True)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def is_valid_profile(profile, truncation_type, p=2, generic=None):
from sage.rings.infinity import Infinity
if (generic is None):
generic = (p != 2)
if (not generic):
pro = (list(profile) + ([truncation_type] * len(profile)))
r = 0
for pro_r in pro:
r += 1
if (pro_r < Infinity):
for i in range(1, r):
if (pro_r < min((pro[((r - i) - 1)] - i), pro[(i - 1)])):
return False
else:
e = (list(profile[0]) + ([truncation_type] * len(profile[0])))
k = list(profile[1])
if (not set(k).issubset({1, 2})):
return False
if (truncation_type > 0):
k = (k + [2])
else:
k = (k + ([1] * len(profile[0])))
if (len(k) > len(e)):
e = (e + ([truncation_type] * (len(k) - len(e))))
r = 0
for e_r in e:
r += 1
if (e_r < Infinity):
for i in range(1, r):
if (e_r < min((e[((r - i) - 1)] - i), e[(i - 1)])):
return False
r = (- 1)
for k_r in k:
r += 1
if (k_r == 1):
for j in range(r):
i = (r - j)
if ((e[(i - 1)] > j) and (k[j] == 2)):
return False
return True |
class TomlEncoder(object):
def __init__(self, _dict=dict, preserve=False):
self._dict = _dict
self.preserve = preserve
self.dump_funcs = {str: _dump_str, unicode: _dump_str, list: self.dump_list, bool: (lambda v: unicode(v).lower()), int: (lambda v: v), float: _dump_float, Decimal: _dump_float, datetime.datetime: (lambda v: v.isoformat().replace('+00:00', 'Z')), datetime.time: _dump_time, datetime.date: (lambda v: v.isoformat())}
def get_empty_table(self):
return self._dict()
def dump_list(self, v):
retval = '['
for u in v:
retval += ((' ' + unicode(self.dump_value(u))) + ',')
retval += ']'
return retval
def dump_inline_table(self, section):
retval = ''
if isinstance(section, dict):
val_list = []
for (k, v) in section.items():
val = self.dump_inline_table(v)
val_list.append(((k + ' = ') + val))
retval += (('{ ' + ', '.join(val_list)) + ' }\n')
return retval
else:
return unicode(self.dump_value(section))
def dump_value(self, v):
dump_fn = self.dump_funcs.get(type(v))
if ((dump_fn is None) and hasattr(v, '__iter__')):
dump_fn = self.dump_funcs[list]
return (dump_fn(v) if (dump_fn is not None) else self.dump_funcs[str](v))
def dump_sections(self, o, sup):
retstr = ''
if ((sup != '') and (sup[(- 1)] != '.')):
sup += '.'
retdict = self._dict()
arraystr = ''
for section in o:
section = unicode(section)
qsection = section
if (not re.match('^[A-Za-z0-9_-]+$', section)):
qsection = _dump_str(section)
if (not isinstance(o[section], dict)):
arrayoftables = False
if isinstance(o[section], list):
for a in o[section]:
if isinstance(a, dict):
arrayoftables = True
if arrayoftables:
for a in o[section]:
arraytabstr = '\n'
arraystr += ((('[[' + sup) + qsection) + ']]\n')
(s, d) = self.dump_sections(a, (sup + qsection))
if s:
if (s[0] == '['):
arraytabstr += s
else:
arraystr += s
while d:
newd = self._dict()
for dsec in d:
(s1, d1) = self.dump_sections(d[dsec], (((sup + qsection) + '.') + dsec))
if s1:
arraytabstr += ((((('[' + sup) + qsection) + '.') + dsec) + ']\n')
arraytabstr += s1
for s1 in d1:
newd[((dsec + '.') + s1)] = d1[s1]
d = newd
arraystr += arraytabstr
elif (o[section] is not None):
retstr += (((qsection + ' = ') + unicode(self.dump_value(o[section]))) + '\n')
elif (self.preserve and isinstance(o[section], InlineTableDict)):
retstr += ((qsection + ' = ') + self.dump_inline_table(o[section]))
else:
retdict[qsection] = o[section]
retstr += arraystr
return (retstr, retdict) |
class PNW(BenchmarkDataset):
def __init__(self, **kwargs):
citation = 'Ni, Y., Hutko, A., Skene, F., Denolle, M., Malone, S., Bodin, P., Hartog, R., & Wright, A. (2023).Curated Pacific Northwest AI-ready Seismic Dataset. Seismica, 2(1).
license = 'CC BY 4.0'
super().__init__(citation=citation, license=license, repository_lookup=True, **kwargs)
def _download_dataset(self, writer, **kwargs):
pass |
class Task(object):
def __init__(self, config):
self.config = config
self.cli = Client(config)
def exec_sql(self, code, output=sys.stdout, resultful=False):
(task_id, status) = self.cli.create_sql_task(code)
return self._tracking(task_id, status, output, resultful)
def exec_pyodps(self, code, args, output=sys.stdout):
(task_id, status) = self.cli.create_pyodps_task(code, args)
return self._tracking(task_id, status, output, False)
def _tracking(self, task_id, status, output, resultful):
return (self._tracking_with_log(task_id, status, output, resultful) if self.config.verbose else self._tracking_quietly(task_id, status, resultful))
def _tracking_with_log(self, task_id, status, output, resultful):
log_idx = 0
while (not self.cli.completed(status)):
if (status in (AlisaTaksStatus.ALISA_TASK_WAITING, AlisaTaksStatus.ALISA_TASK_ALLOCATE)):
output.write('waiting for resources')
elif ((status == AlisaTaksStatus.ALISA_TASK_RUNNING) and (log_idx >= 0)):
self.cli.read_logs(task_id, log_idx, output)
time.sleep(WAIT_INTEVERAL_SEC)
status = self.cli.get_status(task_id)
if (status == AlisaTaksStatus.ALISA_TASK_EXPIRED):
output.write('timeout while waiting for resources')
else:
self.cli.read_logs(task_id, log_idx, output)
if (status == AlisaTaksStatus.ALISA_TASK_COMPLETED):
return (self.cli.get_results(task_id, READ_RESULTS_BATCH) if resultful else [])
raise Exception('task={}, invalid status={}'.format(task_id, status))
def _tracking_quietly(self, task_id, status, resultful):
while (not self.cli.completed(status)):
time.sleep(WAIT_INTEVERAL_SEC)
status = self.cli.get_status(task_id)
if (status != AlisaTaksStatus.ALISA_TASK_COMPLETED):
raise Exception('task({}) status is {} which means incompleted.'.format(task_id, status))
if resultful:
return self.cli.get_results(task_id, READ_RESULTS_BATCH)
return [] |
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
_lib_names = ['blis']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
opt = self.get_option_single('blis_libs', 'libraries')
blis_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if (info is None):
return
incl_dirs = self.get_include_dirs()
dict_append(info, language='c', define_macros=[('HAVE_CBLAS', None)], include_dirs=incl_dirs)
self.set_info(**info) |
class Brick(PhysicalObject):
def __init__(self, *args, **kwargs):
self.row = kwargs.pop('row')
self.column = kwargs.pop('column')
kwargs['color'] = self.get_color()
super(Brick, self).__init__('brick.png', *args, **kwargs)
def get_color(self):
colors = {0: (255, 0, 0), 1: (255, 174, 0), 2: (252, 255, 0), 3: (0, 255, 0), 4: (0, 0, 255)}
return colors.get(self.row, (0, 0, 0))
def get_score(self):
scores = {0: 10, 1: 7, 2: 5, 3: 3, 4: 1}
return scores.get(self.row, 0)
def get_restitution(self):
restitution = {0: 1.5, 1: 1.3, 2: 1.2, 3: 1.15, 4: 1.1}
return restitution.get(self.row, 1.0)
def create_physical_entity(self):
body = self._engine.CreateStaticBody(position=self.physical_position)
body.CreatePolygonFixture(box=(((self.width / 2.0) / self._world.physical_scale), ((self.height / 2.0) / self._world.physical_scale)), density=1.0, friction=0.0, restitution=self.get_restitution())
return body
def on_contact(self, other):
if (not isinstance(other, Ball)):
return
self.kill()
ball_velocity_x = other.body.linearVelocity[0]
if (abs(ball_velocity_x) < 0.2):
other.apply_impulse([(0.2 * np.sign(ball_velocity_x)), 0.0])
self._world._score += self.get_score() |
def create_model(cfg, device):
cfg = copy.deepcopy(cfg)
cfg.freeze()
model = build_detection_model(cfg)
model = model.to(device)
return model |
class Singular(Executable):
def __init__(self):
Executable.__init__(self, 'singular', SINGULAR_BIN, spkg='singular', type='standard') |
class RandomIdentitySamplerAdv(Sampler):
def __init__(self, data_source, batch_size, num_instances):
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = (self.batch_size // self.num_instances)
self.index_dic = defaultdict(list)
for (index, (_, pid, _, _)) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if (num < self.num_instances):
num = self.num_instances
self.length += (num - (num % self.num_instances))
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if (len(idxs) < self.num_instances):
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if (len(batch_idxs) == self.num_instances):
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while (len(avai_pids) >= self.num_pids_per_batch):
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if (len(batch_idxs_dict[pid]) == 0):
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length |
class ChineseCLIPVisionConfig(PretrainedConfig):
model_type = 'chinese_clip_vision_model'
def __init__(self, hidden_size=768, intermediate_size=3072, projection_dim=512, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'chinese_clip'):
config_dict = config_dict['vision_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
class FalseConditionElimination(transformation.MultiStateTransformation):
state_a = transformation.PatternNode(sdfg.SDFGState)
state_b = transformation.PatternNode(sdfg.SDFGState)
def expressions(cls):
return [sdutil.node_path_graph(cls.state_a, cls.state_b)]
def can_be_applied(self, graph: SDFG, expr_index, sdfg: SDFG, permissive=False):
a: SDFGState = self.state_a
b: SDFGState = self.state_b
in_edges = graph.in_edges(b)
if (len(in_edges) <= 1):
return False
edge = graph.edges_between(a, b)[0]
if edge.data.assignments:
return False
if edge.data.is_unconditional():
return False
scond = edge.data.condition_sympy()
if (scond == False):
return True
return False
def apply(self, _, sdfg: SDFG):
a: SDFGState = self.state_a
b: SDFGState = self.state_b
edge = sdfg.edges_between(a, b)[0]
sdfg.remove_edge(edge) |
def check_all_models_are_tested():
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = [file for file in test_files if (f"test_{module.__name__.split('.')[(- 1)]}.py" in file)]
if (len(test_file) == 0):
failures.append(f'{module.__name__} does not have its corresponding test file {test_file}.')
elif (len(test_file) > 1):
failures.append(f'{module.__name__} has several test files: {test_file}.')
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if (new_failures is not None):
failures += new_failures
if (len(failures) > 0):
raise Exception((f'''There were {len(failures)} failures:
''' + '\n'.join(failures))) |
def get_score(submission_folder):
FLAGS(['eval.py'])
if (FLAGS.hint_mode == 'encoded_decoded'):
encode_hints = True
decode_hints = True
elif (FLAGS.hint_mode == 'decoded_only'):
encode_hints = False
decode_hints = True
elif (FLAGS.hint_mode == 'none'):
encode_hints = False
decode_hints = False
else:
raise ValueError('Hint mode not in {encoded_decoded, decoded_only, none}.')
train_lengths = [int(x) for x in FLAGS.train_lengths]
rng = np.random.RandomState(FLAGS.seed)
rng_key = jax.random.PRNGKey(rng.randint((2 ** 32)))
checkpoint_path = os.path.join(submission_folder, 'checkpoints')
spec_list = pickle.load(open(os.path.join(checkpoint_path, 'spec_list.pkl'), 'rb'))
(train_samplers, val_samplers, val_sample_counts, test_samplers, test_sample_counts, spec_list) = create_samplers(rng, train_lengths)
model_params = pickle.load(open(os.path.join(checkpoint_path, 'model_params.pkl'), 'rb'))
(processor_type, use_ln, nb_triplet_fts, nb_heads) = model_params['processor_factory']
model_params['processor_factory'] = clrs.get_processor_factory(processor_type, use_ln=use_ln, nb_triplet_fts=nb_triplet_fts, nb_heads=nb_heads)
model_params['checkpoint_path'] = checkpoint_path
eval_model = BaselineModel(spec=spec_list, dummy_trajectory=[next(t) for t in val_samplers], **model_params)
feedback_list = [next(t) for t in train_samplers]
all_features = [f.features for f in feedback_list]
eval_model.init(all_features, (FLAGS.seed + 1))
logging.set_verbosity(logging.INFO)
logging.info('Restoring best model from checkpoint...')
eval_model.restore_model('best.pkl', only_load_processor=False)
for algo_idx in range(len(train_samplers)):
(new_rng_key, rng_key) = jax.random.split(rng_key)
val_stats = collect_and_eval(val_samplers[algo_idx], functools.partial(eval_model.predict, algorithm_index=algo_idx), val_sample_counts[algo_idx], new_rng_key, extras={})
(new_rng_key, rng_key) = jax.random.split(rng_key)
test_stats = collect_and_eval(test_samplers[algo_idx], functools.partial(eval_model.predict, algorithm_index=algo_idx), test_sample_counts[algo_idx], new_rng_key, extras={})
return test_stats['score'] |
def init_weights(net, init_type='normal', init_gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))):
if (init_type == 'normal'):
init.normal_(m.weight.data, 0.0, init_gain)
elif (init_type == 'xavier'):
init.xavier_normal_(m.weight.data, gain=init_gain)
elif (init_type == 'kaiming'):
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif (init_type == 'orthogonal'):
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError(('initialization method [%s] is not implemented' % init_type))
if (hasattr(m, 'bias') and (m.bias is not None)):
init.constant_(m.bias.data, 0.0)
elif (classname.find('BatchNorm2d') != (- 1)):
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print(('initialize network with %s' % init_type))
net.apply(init_func) |
class VoVNet(Backbone):
def __init__(self, cfg, input_ch, out_features=None):
super(VoVNet, self).__init__()
global _NORM
_NORM = cfg.MODEL.VOVNET.NORM
stage_specs = _STAGE_SPECS[cfg.MODEL.VOVNET.CONV_BODY]
stem_ch = stage_specs['stem']
config_stage_ch = stage_specs['stage_conv_ch']
config_concat_ch = stage_specs['stage_out_ch']
block_per_stage = stage_specs['block_per_stage']
layer_per_block = stage_specs['layer_per_block']
SE = stage_specs['eSE']
depthwise = stage_specs['dw']
self._out_features = out_features
conv_type = (dw_conv3x3 if depthwise else conv3x3)
stem = conv3x3(input_ch, stem_ch[0], 'stem', '1', 2)
stem += conv_type(stem_ch[0], stem_ch[1], 'stem', '2', 1)
stem += conv_type(stem_ch[1], stem_ch[2], 'stem', '3', 2)
self.add_module('stem', nn.Sequential(OrderedDict(stem)))
current_stirde = 4
self._out_feature_strides = {'stem': current_stirde, 'stage2': current_stirde}
self._out_feature_channels = {'stem': stem_ch[2]}
stem_out_ch = [stem_ch[2]]
in_ch_list = (stem_out_ch + config_concat_ch[:(- 1)])
self.stage_names = []
for i in range(4):
name = ('stage%d' % (i + 2))
self.stage_names.append(name)
self.add_module(name, _OSA_stage(in_ch_list[i], config_stage_ch[i], config_concat_ch[i], block_per_stage[i], layer_per_block, (i + 2), SE, depthwise, dcn_config={'stage_with_dcn': cfg.MODEL.VOVNET.STAGE_WITH_DCN[i], 'with_modulated_dcn': cfg.MODEL.VOVNET.WITH_MODULATED_DCN, 'deformable_groups': cfg.MODEL.VOVNET.DEFORMABLE_GROUPS}))
self._out_feature_channels[name] = config_concat_ch[i]
if (not (i == 0)):
self._out_feature_strides[name] = current_stirde = int((current_stirde * 2))
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_AT)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
def _freeze_backbone(self, freeze_at):
if (freeze_at < 0):
return
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
freeze_bn_params(m)
for stage_index in range(freeze_at):
if (stage_index == 0):
m = self.stem
else:
m = getattr(self, ('stage' + str((stage_index + 1))))
for p in m.parameters():
p.requires_grad = False
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
def forward(self, x):
outputs = {}
x = self.stem(x)
if ('stem' in self._out_features):
outputs['stem'] = x
for name in self.stage_names:
x = getattr(self, name)(x)
if (name in self._out_features):
outputs[name] = x
return outputs
def output_shape(self):
return {name: ShapeSpec(channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]) for name in self._out_features} |
def find_library_location(lib_name: str) -> Path:
torch_root = Path(torch.__file__).resolve().parent
path = ((torch_root / 'lib') / lib_name)
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return (((torch_root / 'build') / 'lib') / lib_name) |
.parametrize('inspecs', inspecs_params())
.parametrize('op', ['logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar', 'greater_scalar', 'greater_equal_scalar', 'less_scalar', 'less_equal_scalar', 'equal_scalar', 'not_equal_scalar'])
def test_scalar_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(func, inspecs, [1], {}, nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if (args.work_dir is not None):
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
json_file = osp.join(args.work_dir, f'fps_{timestamp}.json')
else:
work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
json_file = osp.join(work_dir, f'fps_{timestamp}.json')
repeat_times = args.repeat_times
torch.backends.cudnn.benchmark = False
cfg.model.pretrained = None
cfg.data.test.test_mode = True
benchmark_dict = dict(config=args.config, unit='img / s')
overall_fps_list = []
for time_index in range(repeat_times):
print(f'Run {(time_index + 1)}:')
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=False, shuffle=False)
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
if (('checkpoint' in args) and osp.exists(args.checkpoint)):
load_checkpoint(model, args.checkpoint, map_location='cpu')
model = MMDataParallel(model, device_ids=[0])
model.eval()
num_warmup = 5
pure_inf_time = 0
total_iters = 200
for (i, data) in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = (time.perf_counter() - start_time)
if (i >= num_warmup):
pure_inf_time += elapsed
if (((i + 1) % args.log_interval) == 0):
fps = (((i + 1) - num_warmup) / pure_inf_time)
print(f'Done image [{(i + 1):<3}/ {total_iters}], fps: {fps:.2f} img / s')
if ((i + 1) == total_iters):
fps = (((i + 1) - num_warmup) / pure_inf_time)
print(f'''Overall fps: {fps:.2f} img / s
''')
benchmark_dict[f'overall_fps_{(time_index + 1)}'] = round(fps, 2)
overall_fps_list.append(fps)
break
benchmark_dict['average_fps'] = round(np.mean(overall_fps_list), 2)
benchmark_dict['fps_variance'] = round(np.var(overall_fps_list), 4)
print(f"Average fps of {repeat_times} evaluations: {benchmark_dict['average_fps']}")
print(f"The variance of {repeat_times} evaluations: {benchmark_dict['fps_variance']}")
mmcv.dump(benchmark_dict, json_file, indent=4) |
class _ContextMethodMixin(object):
def save_for_backward(self, *tensors):
self.to_save = tensors
def mark_dirty(self, *args):
self.dirty_tensors = args
def mark_shared_storage(self, *pairs):
warnings.warn('mark_shared_storage is deprecated. Tensors with shared storages are automatically tracked. Note that calls to `set_()` are not tracked')
def mark_non_differentiable(self, *args):
self.non_differentiable = args
def set_materialize_grads(self, value):
self.materialize_grads = value |
class BMProfileParserPerfAI(BMProfileParser):
def __init__(self):
super().__init__()
self.gdma_cmd = []
self.bd_cmd = []
self.bd_monitor = []
self.gdma_monitor = []
self.in_dir = None
self.out_dir = None
def parse(self, in_dir):
self.in_dir = in_dir
if (not os.path.exists(in_dir)):
logging.fatal("'{}' does not exist".format(in_dir))
exit((- 1))
no_perf_data = True
global_file_path = os.path.join(in_dir, self.global_filename)
gobal_info = self.__parse_global_file(global_file_path)
iter_count = 0
while True:
block_filename = ((self.iter_prefix + str(iter_count)) + '.profile')
iter_count += 1
block_filename = os.path.join(in_dir, block_filename)
blocks = parse_data_blocks(block_filename)
if (blocks is None):
break
item = IterRecord()
item.command_info = []
blocks_factory = {BlockType.MONITOR_GDMA.value: (item.monitor_gdma, self.__parse_monitor_gdma), BlockType.MONITOR_BD.value: (item.monitor_bd, self.__parse_monitor_tiu), BlockType.COMMAND.value: (item.command_info, self.__parse_command_info)}
for block in blocks:
(item_list, item_func) = blocks_factory.get(block.type.value, (0, (lambda x, y: 0)))
item_func(item_list, block.content)
for (core_num, cmd_info) in enumerate(item.command_info):
self.__read_command_data(cmd_info, core_num)
def to_txt(self, out_dir):
assert ((self.bd_monitor != []) and (self.gdma_monitor != [])), ''
self.__cycle2time()
self.__align_core_time()
self.__shift_time()
self.__time2cycle()
self.out_dir = out_dir
Path(self.out_dir).mkdir(parents=True, exist_ok=True)
dma_file = os.path.join(self.out_dir, 'tdmaRegInfo_{}.txt')
tiu_file = os.path.join(self.out_dir, 'tiuRegInfo_{}.txt')
for (idx, (bd, gdma, bd_cmd, gdma_cmd)) in enumerate(zip(self.bd_monitor, self.gdma_monitor, self.bd_cmd, self.gdma_cmd)):
with open(dma_file.format(idx), 'w') as f:
f.write('__CHIP_ARCH_ARGS__\n')
f.write(''.join((f''' {key}: {value}
''' for (key, value) in self.archlib.DMA_ARCH.items())))
for j in gdma:
reg_info = gdma_cmd[j.inst_id]
dma_info: dict = self.__get_gdma_info(j, reg_info)
dma_info['Core Id'] = idx
f.write('__TDMA_REG_INFO__\n')
f.write(''.join((f''' {key}: {value}
''' for (key, value) in dma_info.items())))
with open(tiu_file.format(idx), 'w') as f:
f.write('__CHIP_ARCH_ARGS__\n')
f.write(''.join((f''' {key}: {value}
''' for (key, value) in self.archlib.TIU_ARCH.items())))
for j in bd:
reg_info = bd_cmd[j.inst_id]
(tiu_info0, tiu_info1) = self.__get_tiu_info(j, reg_info)
tiu_info0['Core Id'] = idx
f.write('__TIU_REG_INFO__\n')
f.write(''.join((f''' {key}: {value}
''' for (key, value) in tiu_info0.items())))
f.write('{}:\n'.format(tiu_info0['Function Type']))
f.write(''.join((f''' {key}: {value}
''' for (key, value) in tiu_info1.items())))
def __cycle2time(self):
for i in self.gdma_monitor:
for j in i:
j.inst_start_time = int(((j.inst_start_time / self.archlib.GDMA_FREQ) * 1000))
j.inst_end_time = int(((j.inst_end_time / self.archlib.GDMA_FREQ) * 1000))
for i in self.bd_monitor:
for j in i:
j.inst_start_time = int(((j.inst_start_time / self.archlib.BD_FREQ) * 1000))
j.inst_end_time = int(((j.inst_end_time / self.archlib.BD_FREQ) * 1000))
def __time2cycle(self):
for i in self.gdma_monitor:
for j in i:
j.inst_start_time = int(((j.inst_start_time * self.archlib.GDMA_FREQ) / 1000))
j.inst_end_time = int(((j.inst_end_time * self.archlib.GDMA_FREQ) / 1000))
for i in self.bd_monitor:
for j in i:
j.inst_start_time = int(((j.inst_start_time * self.archlib.BD_FREQ) / 1000))
j.inst_end_time = int(((j.inst_end_time * self.archlib.BD_FREQ) / 1000))
def __align_core_time(self):
first_wait_cmd_id = []
first_wait_cmd_cycle = []
for (bd_cmd, bd_monitor) in zip(self.bd_cmd, self.bd_monitor):
if (bd_cmd[bd_monitor[0].inst_id].op_name == 'system.send_msg'):
bd_monitor.pop(0)
for i in self.bd_cmd:
for j in i:
if (j.op_name == 'system.wait_msg'):
first_wait_cmd_id.append(j.cmd_id)
break
for (cmd_id, item) in zip(first_wait_cmd_id, self.bd_monitor):
for j in item:
if (j.inst_id == (cmd_id - 1)):
first_wait_cmd_cycle.append(j.inst_start_time)
break
num_one_flag = False
for (bd, gdma, cycle) in zip(self.bd_monitor, self.gdma_monitor, first_wait_cmd_cycle):
if (not num_one_flag):
num_one_flag = True
continue
delta_cyle = (cycle - first_wait_cmd_cycle[0])
for j1 in itertools.chain(bd, gdma):
j1.inst_start_time = int((j1.inst_start_time - delta_cyle))
j1.inst_end_time = int((j1.inst_end_time - delta_cyle))
def __shift_time(self):
start_cycle = self.gdma_monitor[0][0].inst_start_time
for (_, (bd, gdma)) in enumerate(zip(self.bd_monitor, self.gdma_monitor)):
start_cycle = min(bd[0].inst_start_time, start_cycle, gdma[0].inst_start_time)
for (_, (bd, gdma)) in enumerate(zip(self.bd_monitor, self.gdma_monitor)):
for j1 in itertools.chain(bd, gdma):
j1.inst_start_time = int((j1.inst_start_time - start_cycle))
j1.inst_end_time = int((j1.inst_end_time - start_cycle))
def __parse_monitor_tiu(self, monitor_tiu: List, raw_data):
tmp = parse_monitor_bd(raw_data, self.archlib)
self.bd_monitor.append(tmp)
monitor_tiu.append(tmp)
def __parse_monitor_gdma(self, monitor_gdma: List, raw_data):
tmp = parse_monitor_gdma(raw_data, self.archlib)
zero_position = []
for (idx, dma) in enumerate(tmp):
if (dma.inst_id == 0):
zero_position.append(idx)
if (len(zero_position) == 0):
left = 0
right = len(tmp)
elif (len(zero_position) < 2):
left = 1
right = len(tmp)
elif (len(zero_position) == 2):
left = (zero_position[0] + 1)
right = zero_position[1]
else:
left = zero_position[1]
right = zero_position[(- 1)]
self.gdma_monitor.append(tmp[left:right])
monitor_gdma.append(tmp)
def __parse_command_info(self, command_info: List, raw_data):
command_info.append(self._BMProfileParser__parse_command_info(raw_data))
def __parse_global_file(self, filename):
assert os.path.isfile(filename)
re_arch = re_key_value('', 'arch')
ginfo = GlobalInfo()
with open(filename) as f:
for self.line in f:
if (len(self.line) == 0):
continue
if (self.match(re_arch) and (self.archlib is None)):
ginfo.set_arch(self.enum_val('arch', Arch))
self.archlib = ginfo.archlib
break
def __read_command_data(self, cmd_info, core_num):
gdma_num = 0
bd_num = 0
for num_info in cmd_info.group:
gdma_num += num_info[0]
bd_num += num_info[1]
gdma_parser = self.archlib.GDMACommandParser()
bd_parser = self.archlib.BDCommandParser()
gdma_cmd = self.__base_read_command_data(cmd_info.gdma_base, cmd_info.gdma_offset, self.archlib.EngineType.GDMA, core_num, gdma_parser)
bd_cmd = self.__base_read_command_data(cmd_info.bd_base, cmd_info.bd_offset, self.archlib.EngineType.BD, core_num, bd_parser)
self.gdma_cmd.append(gdma_cmd)
self.bd_cmd.append(bd_cmd)
def __base_read_command_data(self, base, offset, engine_type, core_num, command_parser):
basename = 'cmd_%x_%d_%d.dat'
command_filename = os.path.join(self.in_dir, (basename % (base, core_num, engine_type.value)))
if (not os.path.isfile(command_filename)):
return []
with open(command_filename, 'rb') as f:
f.seek(offset)
raw_data = f.read()
command_list = command_parser.parse(raw_data)
return command_list
def __get_gdma_info(self, monitor_info, reg_info):
return get_dma_info(monitor_info, reg_info)
def __get_tiu_info(self, monitor_info, reg_info):
return get_tiu_info(monitor_info, reg_info) |
def add_dataset_arguments(parser):
parser.add_argument('--train-examples-paths', nargs='*', default=[], help='Input training examples')
parser.add_argument('--test-examples-paths', nargs='*', default=[], help='Input test examples')
parser.add_argument('--train-max-examples', type=int, help='Maximum number of training examples')
parser.add_argument('--test-max-examples', type=int, help='Maximum number of test examples')
parser.add_argument('--eval-examples-paths', nargs='*', default=[], help='Path to multi-response evaluation files') |
class WeightRing(CombinatorialFreeModule):
def __classcall__(cls, parent, prefix=None):
return super().__classcall__(cls, parent, prefix=prefix)
def __init__(self, parent, prefix):
self._parent = parent
self._style = parent._style
self._prefix = prefix
self._space = parent._space
self._cartan_type = parent._cartan_type
self._rank = parent._rank
self._origin = parent._origin
self._base_ring = parent._base_ring
if (prefix is None):
if self._parent._prefix.replace('x', '_').isupper():
prefix = self._parent._prefix.lower()
elif self._parent._prefix.islower():
prefix = self._parent._prefix.upper()
else:
prefix = (self._cartan_type[0].lower() + str(self._rank))
self._prefix = prefix
category = AlgebrasWithBasis(self._base_ring).Commutative()
CombinatorialFreeModule.__init__(self, self._base_ring, self._space, category=category)
def _repr_(self):
return ('The Weight ring attached to %s' % self._parent)
def __call__(self, *args):
if (len(args) > 1):
args = (args,)
return super().__call__(*args)
def _element_constructor_(self, weight):
weight = self._space.from_vector_notation(weight, style=self._style)
return self.monomial(weight)
def product_on_basis(self, a, b):
return self((a + b))
def some_elements(self):
return [self.monomial(x) for x in self.fundamental_weights()]
def one_basis(self):
return self._space.zero()
def parent(self):
return self._parent
def weyl_character_ring(self):
return self._parent
def cartan_type(self):
return self._cartan_type
def space(self):
return self._space
def fundamental_weights(self):
return self._space.fundamental_weights()
def simple_roots(self):
return self._space.simple_roots()
def positive_roots(self):
return self._space.positive_roots()
def wt_repr(self, wt):
return (self._prefix + self.parent()._wt_repr(wt))
def _repr_term(self, t):
return self.wt_repr(t)
class Element(CombinatorialFreeModule.Element):
def cartan_type(self):
return self.parent()._cartan_type
def weyl_group_action(self, w):
return self.map_support(w.action)
def character(self):
return self.parent().parent().char_from_weights(self.monomial_coefficients())
def scale(self, k):
if (k == 0):
raise ValueError('parameter must be nonzero')
d1 = self.monomial_coefficients()
d2 = {(k * mu): coeff for (mu, coeff) in d1.items()}
return self.parent()._from_dict(d2)
def shift(self, mu):
d1 = self.monomial_coefficients()
d2 = {(mu + nu): val for (nu, val) in d1.items()}
return self.parent()._from_dict(d2)
def demazure(self, w, debug=False):
if isinstance(w, list):
word = w
else:
word = w.reduced_word()
d1 = self.monomial_coefficients()
d = {}
alphacheck = self.parent()._space.simple_coroots()
for v in d1:
d[tuple((v.inner_product(alphacheck[j]) for j in self.parent().space().index_set()))] = d1[v]
return self.parent()._from_dict(self.parent().parent()._demazure_helper(d, word, debug=debug))
def demazure_lusztig(self, i, v):
if (i in self.parent().space().index_set()):
rho = self.parent().space().from_vector_notation(self.parent().space().rho(), style='coroots')
inv = self.scale((- 1))
return ((- inv.shift((- rho)).demazure([i]).shift(rho)) + (v * inv.demazure([i]))).scale((- 1))
elif isinstance(i, list):
if (not i):
return self
elif (len(i) == 1):
return self.demazure_lusztig(i[0], v)
else:
return self.demazure_lusztig(i[1:], v).demazure_lusztig(i[:1], v)
else:
try:
return self.demazure_lusztig(i.reduced_word(), v)
except Exception:
raise ValueError('unknown index {}'.format(i)) |
class RegLog(nn.Module):
def __init__(self, num_labels, arch='resnet50', global_avg=False, use_bn=True):
super(RegLog, self).__init__()
self.bn = None
if global_avg:
if (arch == 'resnet50'):
s = 2048
elif (arch == 'resnet50w2'):
s = 4096
elif (arch == 'resnet50w4'):
s = 8192
self.av_pool = nn.AdaptiveAvgPool2d((1, 1))
else:
assert (arch == 'resnet50')
s = 8192
self.av_pool = nn.AvgPool2d(6, stride=1)
if use_bn:
self.bn = nn.BatchNorm2d(2048)
self.linear = nn.Linear(s, num_labels)
self.linear.weight.data.normal_(mean=0.0, std=0.01)
self.linear.bias.data.zero_()
def forward(self, x):
x = self.av_pool(x)
if (self.bn is not None):
x = self.bn(x)
x = x.view(x.size(0), (- 1))
return self.linear(x) |
def write_prediction_result(prediction_result: PredictionResult, output_dir) -> None:
input_path = prediction_result.example
representation = prediction_result.inference
input_filename = os.path.basename(input_path.replace('gs://', ''))
output_filename = input_filename.replace('.wav', '.npy')
if output_dir.startswith('gs://'):
(bucket_name, blob_path) = split_gcs_bucket_and_filepath(output_dir)
output_path = os.path.join(blob_path, output_filename)
print(f'[DEBUG] writing output to gs://{bucket_name}/{output_path}')
logging.warning(f'[DEBUG] writing output to gs://{bucket_name}/{output_path}')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(output_path)
with blob.open('wb') as f:
np.save(f, representation)
else:
output_dir = pathlib.Path(args.output_dir)
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, output_filename)
np.save(output_path, representation)
return |
def _unique_python(values, *, return_inverse, return_counts):
try:
uniques_set = set(values)
(uniques_set, missing_values) = _extract_missing(uniques_set)
uniques = sorted(uniques_set)
uniques.extend(missing_values.to_list())
uniques = np.array(uniques, dtype=values.dtype)
except TypeError:
types = sorted((t.__qualname__ for t in set((type(v) for v in values))))
raise TypeError(f'Encoders require their input argument must be uniformly strings or numbers. Got {types}')
ret = (uniques,)
if return_inverse:
ret += (_map_to_integer(values, uniques),)
if return_counts:
ret += (_get_counts(values, uniques),)
return (ret[0] if (len(ret) == 1) else ret) |
class Transformer_exp(FNN_exp):
def __init__(self, data_path, param_dict, config):
super().__init__(data_path, param_dict, config)
def load_model(self):
model = TransformerNet(hidden_size=self.param_dict['hidden_size'], num_layers=self.param_dict['num_layers'], dropout=self.param_dict['dropout'], num_heads=self.param_dict['num_heads'], classification_token=self.param_dict['classification_token'], num_target_label=len(self.dataloader.new_true_label_mapping))
print_network(model)
return model |
def job_fssdq_opt(p, data_source, tr, te, r, J, null_sim=None):
if (null_sim is None):
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
Xtr = tr.data()
with util.ContextTimer() as t:
n_gwidth_cand = 5
gwidth_factors = (2.0 ** np.linspace((- 3), 3, n_gwidth_cand))
med2 = (util.meddistance(Xtr, 1000) ** 2)
k = kernel.KGauss((med2 * 2))
V0 = util.fit_gaussian_draw(Xtr, J, seed=(r + 1), reg=1e-06)
list_gwidth = np.hstack((med2 * gwidth_factors))
(besti, objs) = gof.GaussFSSD.grid_search_gwidth(p, tr, V0, list_gwidth)
gwidth = list_gwidth[besti]
assert util.is_real_num(gwidth), ('gwidth not real. Was %s' % str(gwidth))
assert (gwidth > 0), ('gwidth not positive. Was %.3g' % gwidth)
logging.info(('After grid search, gwidth=%.3g' % gwidth))
ops = {'reg': 0.01, 'max_iter': 50, 'tol_fun': 0.0001, 'disp': True, 'locs_bounds_frac': 10.0, 'gwidth_lb': 0.1, 'gwidth_ub': 1000.0}
(V_opt, gwidth_opt, info) = gof.GaussFSSD.optimize_locs_widths(p, tr, gwidth, V0, **ops)
k_opt = kernel.KGauss(gwidth_opt)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te)
return {'test_result': fssd_opt_result, 'time_secs': t.secs, 'goftest': fssd_opt, 'opt_info': info} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.