code stringlengths 101 5.91M |
|---|
def test_isotonic_calibration_fit_predict():
x_train = np.array([[1, 1], [2, 3.5]])
y_train = np.array([[0.9, 0.1], [0.2, 0.8]])
ic = IsotonicCalibration()
assert (len(ic.regressors) == 0)
ic.fit(x_train=x_train, y_train=y_train)
assert (ic.n_classes == 2)
x_test = np.array([[0, 1]])
y_pred = ic.predict(x=x_test)
assert (y_pred.shape == (1, 2))
assert (np.sum(y_pred) == pytest.approx(1.0))
x_train = np.array([0.1, 0.5, 0.2])
y_train = np.array([0.2, 0.55, 0.3])
ic = IsotonicCalibration()
assert (len(ic.regressors) == 0)
ic.fit(x_train=x_train, y_train=y_train)
assert (ic.n_classes == 1)
x_test = np.array([0.5, 0.1])
y_pred = ic.predict(x=x_test)
assert (y_pred.shape == (2, 1)) |
class TestSimpleDB(unittest.TestCase):
def setUpClass(cls):
cls.sdb = SimpleDB()
cls.sdb.set_db('test_files/db.yaml')
def setUp(self):
self.new_db = TestSimpleDB.sdb
def test_get_order_status(self):
res = self.new_db.get_order_status(1)
self.assertEqual(res, 'placed but not yet shipped')
res = self.new_db.get_order_status(1000)
self.assertEqual(res, 'ERROR')
def test_query_order(self):
res = self.new_db.query_order(1)
expected_qs = (('delivery_address:121 street Forest Hills||' + 'product:pizza||quantity:10||') + 'order_status:placed but not yet shipped')
self.assertEqual(res, expected_qs)
res = self.new_db.query_order(1000)
self.assertEqual(res, 'None')
def test_add_more_to_order(self):
res = self.new_db.add_more_to_order(2, 1)
self.assertEqual(res, '21')
def test_single_step_verify(self):
entities1 = {'email_address': ''}
entities2 = {'email_address': '', 'zip_code': '94301'}
entities3 = {'email_address': '', 'zip_code': '93301'}
res = self.new_db.single_step_verify(entities1)
self.assertTrue(res)
res = self.new_db.single_step_verify(entities2)
self.assertTrue(res)
res = self.new_db.single_step_verify(entities3)
self.assertFalse(res) |
def run(args, graph, feat, labels, train_idx, val_idx, test_idx, n_running):
model = gen_model(args)
model = model.to(device)
TRAIN_NUMBERS = sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
print(f'Number of params: {TRAIN_NUMBERS}')
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wd)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=100, verbose=True, min_lr=0.001)
total_time = 0
(best_val_acc, final_test_acc, best_val_loss) = (0, 0, float('inf'))
for epoch in range(1, (args.n_epochs + 1)):
tic = time.time()
adjust_learning_rate(optimizer, args.lr, epoch)
(loss, pred) = train(model, graph, feat, labels, train_idx, optimizer)
if ((epoch % args.eval_steps) == 0):
(train_acc, val_acc, test_acc, val_loss, test_loss) = evaluate(model, graph, feat, labels, train_idx, val_idx, test_idx, args.metric)
wandb.log({'Train_loss': loss, 'Val_loss': val_loss, 'Test_loss': test_loss})
lr_scheduler.step(loss)
toc = time.time()
total_time += (toc - tic)
if (val_loss < best_val_loss):
best_val_loss = val_loss
best_val_acc = val_acc
final_test_acc = test_acc
if ((epoch % args.log_every) == 0):
print(f'''Run: {n_running}/{args.n_runs}, Epoch: {epoch}/{args.n_epochs}, Average epoch time: {(total_time / epoch):.2f}
Loss: {loss.item():.4f}
Train/Val/Test loss: {loss:.4f}/{val_loss:.4f}/{test_loss:.4f}
Train/Val/Test/Best val/Final test {args.metric}: {train_acc:.4f}/{val_acc:.4f}/{test_acc:.4f}/{best_val_acc:.4f}/{final_test_acc:.4f}''')
print(('*' * 50))
print(f'Best val acc: {best_val_acc}, Final test acc: {final_test_acc}')
print(('*' * 50))
return (best_val_acc, final_test_acc) |
class GreedyRTSPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
valids = self.game.getValidMoves(board, 1)
print('sum valids', sum(valids))
candidates = []
for a in range(self.game.getActionSize()):
if (valids[a] == 0):
continue
(next_board, _) = self.game.getNextState(board, 1, a)
score = self.game.getScore(next_board, 1)
candidates += [((- score), a)]
candidates.sort()
n = board.shape[0]
(y, x, action_index) = np.unravel_index(candidates[0][1], [n, n, NUM_ACTS])
print('returned act', x, y, ACTS_REV[action_index])
return candidates[0][1] |
.parametrize('dt,n', [(ti.i8, 8), (ti.u8, 8), (ti.i16, 16), (ti.u16, 16), (ti.i32, 32), (ti.u32, 32)])
_utils.test(exclude=[ti.opengl, ti.gles, ti.vulkan, ti.dx11])
def test_overflow(dt, n):
_test_overflow(dt, n) |
def add_export_config(cfg):
is_frozen = cfg.is_frozen()
cfg.defrost()
cfg.EXPORT_CAFFE2 = CfgNode()
cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
if is_frozen:
cfg.freeze()
return cfg |
def test_build_gaussian_pyramid_gray():
(rows, cols) = image_gray.shape
pyramid = pyramids.pyramid_gaussian(image_gray, downscale=2, channel_axis=None)
for (layer, out) in enumerate(pyramid):
layer_shape = ((rows / (2 ** layer)), (cols / (2 ** layer)))
assert_array_equal(out.shape, layer_shape) |
def register_optimizer_builder(name, builder):
if (name in _OPTIMIZER_BUILDERS):
raise KeyError('Duplicate keys for {:s} with {} and {}.Solve key conflicts first!'.format(name, _OPTIMIZER_BUILDERS[name], builder))
_OPTIMIZER_BUILDERS[name] = builder |
class AlgebraicScheme_subscheme_projective_field(AlgebraicScheme_subscheme_projective):
def _morphism(self, *args, **kwds):
return SchemeMorphism_polynomial_projective_subscheme_field(*args, **kwds)
def Chow_form(self):
I = self.defining_ideal()
P = self.ambient_space()
R = P.coordinate_ring()
N = (P.dimension() + 1)
d = self.dimension()
SS = PolynomialRing(R.base_ring(), 'u', (N * (d + 1)), order='lex')
vars = (SS.variable_names() + R.variable_names())
S = PolynomialRing(R.base_ring(), vars, order='lex')
n = S.ngens()
newcoords = [S.gen(((n - N) + t)) for t in range(N)]
phi = R.hom(newcoords, S)
phi(self.defining_polynomials()[0])
l = []
for i in range((d + 1)):
t = 0
for j in range(N):
t += (S.gen(((N * i) + j)) * newcoords[j])
l.append(t)
J = (phi(I) + S.ideal(l))
J2 = J.saturation(S.ideal([phi(u) for u in R.gens()]))[0]
E = J2.elimination_ideal(newcoords)
D = binomial(N, ((N - d) - 1))
tvars = [(str('t') + str(i)) for i in range(D)]
T = PolynomialRing(R.base_ring(), (tvars + list(S.variable_names())), order='lex')
L = []
coeffs = [T.gen(i) for i in range((0 + len(tvars)), ((N * (d + 1)) + len(tvars)))]
M = matrix(T, (d + 1), N, coeffs)
i = 0
for c in M.minors((d + 1)):
L.append((T.gen(i) - c))
i += 1
br = T.ideal(L)
psi = S.hom((coeffs + [0 for _ in range(N)]), T)
E2 = T.ideal(([psi(u) for u in E.gens()] + br))
CH = E2.elimination_ideal(coeffs)
rel = br.elimination_ideal(coeffs)
reduced = []
for f in CH.gens():
reduced.append(f.reduce(rel))
T2 = PolynomialRing(R.base_ring(), tvars)
alp = T.hom((tvars + (((N * (d + 1)) + N) * [0])), T2)
degs = [u.degree() for u in reduced]
mind = max(degs)
for d in degs:
if ((d < mind) and (d > 0)):
mind = d
ind = degs.index(mind)
CF = reduced[ind]
rel2 = (rel + [CF])
assert all(((f in rel2) for f in CH.gens())), 'did not find a principal generator'
return alp(CF)
def global_height(self, prec=None):
return self.Chow_form().global_height(prec)
def local_height(self, v, prec=None):
return self.Chow_form().local_height(v, prec)
def local_height_arch(self, i, prec=None):
return self.Chow_form().local_height_arch(i, prec) |
def segment_window_test(x_test, y_test, window_size, n_sensor_val):
segments = np.zeros((((len(x_test) // window_size) + 1), window_size, n_sensor_val))
labels = np.zeros(((len(y_test) // window_size) + 1))
i_segment = 0
i_label = 0
for (start, end) in windowz(x_test, window_size, use_overlap=False):
if (end >= x_test.shape[0]):
pad_len = (window_size - len(x_test[start:end]))
segments[i_segment] = x_test[(start - pad_len):end]
m = stats.mode(y_test[(start - pad_len):end])
labels[i_label] = m[0]
else:
m = stats.mode(y_test[start:end])
segments[i_segment] = x_test[start:end]
labels[i_label] = m[0]
i_label += 1
i_segment += 1
return (segments, labels) |
def test_convert_units_file(tokenizer):
with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as test_dir:
labels = '\n\n000\n\n'
raw_text = 'This is a test.\n\nfoo\n\n'
(txt_file, label_file) = write_tokenizer_input(test_dir, raw_text, labels)
batches = DataLoader(tokenizer.config, input_files={'txt': txt_file, 'label': label_file}, vocab=tokenizer.vocab, evaluation=True, dictionary=tokenizer.trainer.dictionary)
assert (batches.data == EXPECTED_TWO_NL_FILE)
check_labels(batches.labels(), EXPECTED_TWO_NL_FILE_LABELS)
labels = '\n\n'
raw_text = 'This is a test.\nfoo\n\n'
(txt_file, label_file) = write_tokenizer_input(test_dir, raw_text, labels)
batches = DataLoader(tokenizer.config, input_files={'txt': txt_file, 'label': label_file}, vocab=tokenizer.vocab, evaluation=True, dictionary=tokenizer.trainer.dictionary)
assert (batches.data == EXPECTED_ONE_NL_FILE)
check_labels(batches.labels(), EXPECTED_ONE_NL_FILE_LABELS)
skip_newline_config = dict(tokenizer.config)
skip_newline_config['skip_newline'] = True
labels = '\n\n'
raw_text = 'This is a test.\nfoo\n\n'
(txt_file, label_file) = write_tokenizer_input(test_dir, raw_text, labels)
batches = DataLoader(skip_newline_config, input_files={'txt': txt_file, 'label': label_file}, vocab=tokenizer.vocab, evaluation=True, dictionary=tokenizer.trainer.dictionary)
assert (batches.data == EXPECTED_SKIP_NL_FILE)
check_labels(batches.labels(), EXPECTED_SKIP_NL_FILE_LABELS) |
def translate_strips_operator_aux(operator, dictionary, ranges, mutex_dict, mutex_ranges, implied_facts, condition):
effects_by_variable = defaultdict((lambda : defaultdict(list)))
add_conds_by_variable = defaultdict(list)
for (conditions, fact) in operator.add_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary, ranges, mutex_dict, mutex_ranges)
if (eff_condition_list is None):
continue
for (var, val) in dictionary[fact]:
effects_by_variable[var][val].extend(eff_condition_list)
add_conds_by_variable[var].append(conditions)
del_effects_by_variable = defaultdict((lambda : defaultdict(list)))
for (conditions, fact) in operator.del_effects:
eff_condition_list = translate_strips_conditions(conditions, dictionary, ranges, mutex_dict, mutex_ranges)
if (eff_condition_list is None):
continue
for (var, val) in dictionary[fact]:
del_effects_by_variable[var][val].extend(eff_condition_list)
for var in del_effects_by_variable:
no_add_effect_condition = negate_and_translate_condition(add_conds_by_variable[var], dictionary, ranges, mutex_dict, mutex_ranges)
if (no_add_effect_condition is None):
continue
none_of_those = (ranges[var] - 1)
for (val, conds) in del_effects_by_variable[var].items():
for cond in conds:
if ((var in cond) and (cond[var] != val)):
continue
cond[var] = val
for no_add_cond in no_add_effect_condition:
new_cond = dict(cond)
for (cvar, cval) in no_add_cond.items():
if ((cvar in new_cond) and (new_cond[cvar] != cval)):
break
new_cond[cvar] = cval
else:
effects_by_variable[var][none_of_those].append(new_cond)
return build_sas_operator(operator.name, condition, effects_by_variable, operator.cost, ranges, implied_facts) |
('Sigmoid')
def TranslateSigmoid(layer, pretrained_blobs, is_test, **kwargs):
caffe_op = BaseTranslate(layer, 'Sigmoid')
return (caffe_op, []) |
def create_branch_coverage_fitness_functions(executor: AbstractTestCaseExecutor, branch_goal_pool: BranchGoalPool) -> OrderedSet[BranchCoverageTestFitness]:
return OrderedSet([BranchCoverageTestFitness(executor, goal) for goal in branch_goal_pool.branch_coverage_goals]) |
class TransformerConfig(object):
def __init__(self, hidden_size: int=768, num_hidden_layers: int=3, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: str='gelu', hidden_dropout_prob: float=0.1, attention_probs_dropout_prob: float=0.1, initializer_range: float=0.02, layer_norm_eps: float=1e-12, share_layer: bool=False, pre_layer_norm: bool=False):
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.share_layer = share_layer
self.pre_layer_norm = pre_layer_norm |
def test_check_sampling_strategy_error():
with pytest.raises(ValueError, match="'sampling_type' should be one of"):
check_sampling_strategy('auto', np.array([1, 2, 3]), 'rnd')
error_regex = "The target 'y' needs to have more than 1 class."
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy('auto', np.ones((10,)), 'over-sampling')
error_regex = "When 'sampling_strategy' is a string, it needs to be one of"
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy('rnd', np.array([1, 2, 3]), 'over-sampling') |
class TestCensoredData():
def test_basic(self):
uncensored = [1]
left = [0]
right = [2, 5]
interval = [[2, 3]]
data = CensoredData(uncensored, left=left, right=right, interval=interval)
assert_equal(data._uncensored, uncensored)
assert_equal(data._left, left)
assert_equal(data._right, right)
assert_equal(data._interval, interval)
udata = data._uncensor()
assert_equal(udata, np.concatenate((uncensored, left, right, np.mean(interval, axis=1))))
def test_right_censored(self):
x = np.array([0, 3, 2.5])
is_censored = np.array([0, 1, 0], dtype=bool)
data = CensoredData.right_censored(x, is_censored)
assert_equal(data._uncensored, x[(~ is_censored)])
assert_equal(data._right, x[is_censored])
assert_equal(data._left, [])
assert_equal(data._interval, np.empty((0, 2)))
def test_left_censored(self):
x = np.array([0, 3, 2.5])
is_censored = np.array([0, 1, 0], dtype=bool)
data = CensoredData.left_censored(x, is_censored)
assert_equal(data._uncensored, x[(~ is_censored)])
assert_equal(data._left, x[is_censored])
assert_equal(data._right, [])
assert_equal(data._interval, np.empty((0, 2)))
def test_interval_censored_basic(self):
a = [0.5, 2.0, 3.0, 5.5]
b = [1.0, 2.5, 3.5, 7.0]
data = CensoredData.interval_censored(low=a, high=b)
assert_array_equal(data._interval, np.array(list(zip(a, b))))
assert (data._uncensored.shape == (0,))
assert (data._left.shape == (0,))
assert (data._right.shape == (0,))
def test_interval_censored_mixed(self):
a = [0.5, (- np.inf), (- 13.0), 2.0, 1.0, 10.0, (- 1.0)]
b = [0.5, 2500.0, np.inf, 3.0, 1.0, 11.0, np.inf]
data = CensoredData.interval_censored(low=a, high=b)
assert_array_equal(data._interval, [[2.0, 3.0], [10.0, 11.0]])
assert_array_equal(data._uncensored, [0.5, 1.0])
assert_array_equal(data._left, [2500.0])
assert_array_equal(data._right, [(- 13.0), (- 1.0)])
def test_interval_to_other_types(self):
interval = np.array([[0, 1], [2, 2], [3, 3], [9, np.inf], [8, np.inf], [(- np.inf), 0], [1, 2]])
data = CensoredData(interval=interval)
assert_equal(data._uncensored, [2, 3])
assert_equal(data._left, [0])
assert_equal(data._right, [9, 8])
assert_equal(data._interval, [[0, 1], [1, 2]])
def test_empty_arrays(self):
data = CensoredData(uncensored=[], left=[], right=[], interval=[])
assert (data._uncensored.shape == (0,))
assert (data._left.shape == (0,))
assert (data._right.shape == (0,))
assert (data._interval.shape == (0, 2))
assert (len(data) == 0)
def test_invalid_constructor_args(self):
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(uncensored=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(left=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a one-dimensional'):
CensoredData(right=[[1, 2, 3]])
with pytest.raises(ValueError, match='must be a two-dimensional'):
CensoredData(interval=[[1, 2, 3]])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(uncensored=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(left=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(right=[1, np.nan, 2])
with pytest.raises(ValueError, match='must not contain nan'):
CensoredData(interval=[[1, np.nan], [2, 3]])
with pytest.raises(ValueError, match='both values must not be infinite'):
CensoredData(interval=[[1, 3], [2, 9], [np.inf, np.inf]])
with pytest.raises(ValueError, match='left value must not exceed the right'):
CensoredData(interval=[[1, 0], [2, 2]])
.parametrize('func', [CensoredData.left_censored, CensoredData.right_censored])
def test_invalid_left_right_censored_args(self, func):
with pytest.raises(ValueError, match='`x` must be one-dimensional'):
func([[1, 2, 3]], [0, 1, 1])
with pytest.raises(ValueError, match='`censored` must be one-dimensional'):
func([1, 2, 3], [[0, 1, 1]])
with pytest.raises(ValueError, match='`x` must not contain'):
func([1, 2, np.nan], [0, 1, 1])
with pytest.raises(ValueError, match='must have the same length'):
func([1, 2, 3], [0, 0, 1, 1])
def test_invalid_censored_args(self):
with pytest.raises(ValueError, match='`low` must be a one-dimensional'):
CensoredData.interval_censored(low=[[3]], high=[4, 5])
with pytest.raises(ValueError, match='`high` must be a one-dimensional'):
CensoredData.interval_censored(low=[3], high=[[4, 5]])
with pytest.raises(ValueError, match='`low` must not contain'):
CensoredData.interval_censored([1, 2, np.nan], [0, 1, 1])
with pytest.raises(ValueError, match='must have the same length'):
CensoredData.interval_censored([1, 2, 3], [0, 0, 1, 1])
def test_count_censored(self):
x = [1, 2, 3]
data1 = CensoredData(x)
assert (data1.num_censored() == 0)
data2 = CensoredData(uncensored=[2.5], left=[10], interval=[[0, 1]])
assert (data2.num_censored() == 2) |
_config
def task_finetune_tgifqa():
exp_name = 'finetune_tgif_qa'
datasets = ['tgif']
loss_names = _loss_names({'openend_vqa': 1})
batch_size = 512
msrvttqa_label_size = 1541
max_epoch = 20
max_steps = None
warmup_steps = 0.1
draw_false_image = 0
learning_rate = 0.0001
val_check_interval = 1.0
lr_mult = 10 |
_torch
_sentencepiece
_tokenizers
class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon):
def setUp(self):
super().setUp()
args = TrainingArguments('.')
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False):
(a, b) = (self.alternate_trained_model if alternate_seed else self.default_trained_model)
self.assertTrue(torch.allclose(model.a, a))
self.assertTrue(torch.allclose(model.b, b))
def test_reproducible_training(self):
trainer = get_regression_trainer(learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
trainer = get_regression_trainer(learning_rate=0.1, seed=314)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_trainer_with_datasets(self):
import datasets
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = (((2.0 * x) + 3.0) + np.random.normal(scale=0.1, size=(64,)))
train_dataset = datasets.Dataset.from_dict({'input_x': x, 'label': y})
model = RegressionModel()
args = TrainingArguments('./regression', learning_rate=0.1)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
train_dataset.set_format(type='torch', dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({'input_x': x, 'label': y, 'extra': z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_model_init(self):
train_dataset = RegressionDataset()
args = TrainingArguments('./regression', learning_rate=0.1)
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=(lambda : RegressionModel()))
trainer.train()
self.check_trained_model(trainer.model)
trainer.train()
self.check_trained_model(trainer.model)
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_gradient_accumulation(self):
trainer = get_regression_trainer(gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1)
trainer.train()
self.check_trained_model(trainer.model)
def test_training_loss(self):
n_gpus = max(1, get_gpu_count())
trainer = get_regression_trainer(logging_steps=(64 / (8 * n_gpus)))
trainer.train()
log_history = trainer.state.log_history
losses = [log['loss'] for log in log_history if ('loss' in log)]
train_loss = log_history[(- 1)]['train_loss']
self.assertAlmostEqual((sum(losses) / len(losses)), train_loss, places=4)
trainer = get_regression_trainer(logging_steps=5)
trainer.train()
log_history = trainer.state.log_history
new_train_loss = log_history[(- 1)]['train_loss']
self.assertAlmostEqual(train_loss, new_train_loss, places=4)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
args = TrainingArguments('./regression')
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=(lambda x: 1.0))
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()['param_groups'][0]['lr'], 1.0)
def test_adafactor_lr_none(self):
from transformers.optimization import Adafactor, AdafactorSchedule
train_dataset = RegressionDataset()
args = TrainingArguments('./regression')
model = RegressionModel()
optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertGreater(trainer.optimizer.state_dict()['param_groups'][0]['lr'], 0)
_torch_gpu
_torch_bf16
def test_mixed_bf16(self):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True)
trainer.train()
self.check_trained_model(trainer.model)
with self.assertRaises(ValueError):
trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend='apex')
_torch_gpu
_torch_tf32
def test_tf32(self):
trainer = get_regression_trainer(learning_rate=0.1, tf32=True)
trainer.train()
self.check_trained_model(trainer.model) |
def register_Ns3HeCapabilities_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_constructor([param('ns3::HeCapabilities const &', 'arg0')])
cls.add_constructor([])
cls.add_method('DeserializeInformationField', 'uint8_t', [param('ns3::Buffer::Iterator', 'start'), param('uint8_t', 'length')], is_virtual=True)
cls.add_method('ElementId', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('ElementIdExt', 'ns3::WifiInformationElementId', [], is_const=True, is_virtual=True)
cls.add_method('GetChannelWidthSet', 'uint8_t', [], is_const=True)
cls.add_method('GetHeLtfAndGiForHePpdus', 'uint8_t', [], is_const=True)
cls.add_method('GetHeMacCapabilitiesInfo1', 'uint32_t', [], is_const=True)
cls.add_method('GetHeMacCapabilitiesInfo2', 'uint8_t', [], is_const=True)
cls.add_method('GetHePhyCapabilitiesInfo1', 'uint64_t', [], is_const=True)
cls.add_method('GetHePhyCapabilitiesInfo2', 'uint8_t', [], is_const=True)
cls.add_method('GetHighestMcsSupported', 'uint8_t', [], is_const=True)
cls.add_method('GetHighestNssSupported', 'uint8_t', [], is_const=True)
cls.add_method('GetInformationFieldSize', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint16_t', [], is_const=True)
cls.add_method('GetSupportedMcsAndNss', 'uint16_t', [], is_const=True)
cls.add_method('IsSupportedRxMcs', 'bool', [param('uint8_t', 'mcs')], is_const=True)
cls.add_method('IsSupportedTxMcs', 'bool', [param('uint8_t', 'mcs')], is_const=True)
cls.add_method('Serialize', 'ns3::Buffer::Iterator', [param('ns3::Buffer::Iterator', 'start')], is_const=True)
cls.add_method('SerializeInformationField', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetChannelWidthSet', 'void', [param('uint8_t', 'channelWidthSet')])
cls.add_method('SetHeLtfAndGiForHePpdus', 'void', [param('uint8_t', 'heLtfAndGiForHePpdus')])
cls.add_method('SetHeMacCapabilitiesInfo', 'void', [param('uint32_t', 'ctrl1'), param('uint8_t', 'ctrl2')])
cls.add_method('SetHePhyCapabilitiesInfo', 'void', [param('uint64_t', 'ctrl1'), param('uint8_t', 'ctrl2')])
cls.add_method('SetHeSupported', 'void', [param('uint8_t', 'hesupported')])
cls.add_method('SetHighestMcsSupported', 'void', [param('uint8_t', 'mcs')])
cls.add_method('SetHighestNssSupported', 'void', [param('uint8_t', 'nss')])
cls.add_method('SetMaxAmpduLengthExponent', 'void', [param('uint8_t', 'exponent')])
cls.add_method('SetSupportedMcsAndNss', 'void', [param('uint16_t', 'ctrl')])
return |
class CONV_AE(nn.Module):
def __init__(self, input_dims, encoding_dim, kernel, stride, in_channels=1, h_channels=[1]):
super(CONV_AE, self).__init__()
conv_dim = len(input_dims)
all_channels = ([in_channels] + h_channels)
num_layers = (len(all_channels) - 1)
if isinstance(kernel, int):
kernel = ((kernel,) * conv_dim)
if isinstance(stride, int):
stride = ((stride,) * conv_dim)
out_dims = []
for (i, k, s) in zip(input_dims, kernel, stride):
out_dims.append(compute_output_dim(num_layers, i, k, s, []))
out_dims = ([input_dims] + list(zip(*out_dims)))
self.out_dims = out_dims[::(- 1)]
out_dims = self.out_dims[0]
flat_dim = (all_channels[(- 1)] * reduce((lambda x, y: (x * y)), out_dims))
encoder_layers = []
self.decoder_layers = nn.ModuleList([nn.Linear(encoding_dim, flat_dim), UnFlatten(all_channels[(- 1)], out_dims)])
for index in range(num_layers):
conv_layer = ConvUnit(in_channels=all_channels[index], out_channels=all_channels[(index + 1)], kernel=kernel, stride=stride, dim=conv_dim)
deconv_layer = DeConvUnit(in_channels=all_channels[((- index) - 1)], out_channels=all_channels[((- index) - 2)], kernel=kernel, stride=stride, dim=conv_dim)
encoder_layers.append(conv_layer)
self.decoder_layers.append(deconv_layer)
encoder_layers.extend([Flatten(), nn.Linear(flat_dim, encoding_dim)])
self.encoder = nn.Sequential(*encoder_layers)
def decoder(self, x):
for (index, layer) in enumerate(self.decoder_layers):
if isinstance(layer, DeConvUnit):
x = layer(x, output_size=self.out_dims[1:][(index - 2)])
else:
x = layer(x)
return x
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x |
def register_Ns3DownlinkLteGlobalPathlossDatabase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DownlinkLteGlobalPathlossDatabase const &', 'arg0')])
cls.add_method('UpdatePathloss', 'void', [param('std::string', 'context'), param('ns3::Ptr< ns3::SpectrumPhy >', 'txPhy'), param('ns3::Ptr< ns3::SpectrumPhy >', 'rxPhy'), param('double', 'lossDb')], is_virtual=True)
return |
def possible_mu0s(SUK, v):
beta_and_ns = [[beta, beta.valuation(v)] for beta in SUK.fundamental_units()]
(betak, nk) = beta_k(beta_and_ns)
ns = [beta[1] for beta in beta_and_ns if (beta[0] != betak)]
betas = [beta[0] for beta in beta_and_ns if (beta[0] != betak)]
mu0s = []
for rs in combinations_with_replacement(range(abs(nk)), len(betas)):
n_rs = zip(ns, rs)
sigma_tilde = (- sum([(n_r[0] * n_r[1]) for n_r in n_rs]))
if ((sigma_tilde % nk) == 0):
beta_rs = zip(betas, rs)
temp_prod = (prod([(beta_r[0] ** beta_r[1]) for beta_r in beta_rs]) * (betak ** (sigma_tilde / nk)))
for alpha0 in SUK.roots_of_unity():
if ((alpha0 * temp_prod) not in mu0s):
mu0s.append((alpha0 * temp_prod))
return mu0s |
def calc_kl_scaler_by_batch(batch_num, min_kl, max_kl, batches_to_anneal_over):
kl_scaler = ((1.0 * batch_num) / batches_to_anneal_over)
kl_scaler = min(kl_scaler, max_kl)
return kl_scaler |
def experiment(args):
logger = TensorBoardLogger(save_dir=args.save_dir, version=args.model_name, name=None)
lr_logger = LearningRateLogger()
checkpoint_callback = MyModelCheckpoint(verbose=True, save_top_k=1, period=(- 1), save_last=True, prefix='lm_')
early_stop_callback = EarlyStopping(monitor='val_loss', patience=1, verbose=True, mode='min')
resume_from_checkpoint = None
stop_training = False
potential_old_checkpoint = path.join(logger.log_dir, 'checkpoints/lm_last.ckpt')
if path.isfile(potential_old_checkpoint):
resume_from_checkpoint = potential_old_checkpoint
print('Resuming training from: ', potential_old_checkpoint)
last_checkpoint = torch.load(potential_old_checkpoint)
checkpoint_callback.best_model_score = last_checkpoint['checkpoint_callback_best_model_score']
checkpoint_callback.best_model_path = last_checkpoint['checkpoint_callback_best_model_path']
early_stop_dict = last_checkpoint['early_stop_callback_state_dict']
print(early_stop_dict)
if (early_stop_dict['wait_count'] > 0):
print('Early stopping criteria already met, no more training')
stop_training = True
latest_score = last_checkpoint['checkpoint_callback_best_model_score']
best_score = early_stop_dict['best_score']
print('\n\n')
print(best_score, latest_score)
if (best_score.item() < latest_score.item()):
last_checkpoint['checkpoint_callback_best_model_score'] = best_score
checkpoint_callback.best_model_score = best_score
model_directory = path.join(logger.log_dir, 'checkpoints')
base_name = path.basename(last_checkpoint['checkpoint_callback_best_model_path'])
epoch_num = int(base_name.split('_epoch_')[1].split('_')[0])
import glob
print(model_directory)
best_model_path = glob.glob(path.join(model_directory, f'lm_epoch_{(epoch_num - 1)}*'))
if len(best_model_path):
best_model_path = best_model_path[0]
print('\n\nFixed the best model path:', best_model_path)
checkpoint_callback.best_model_path = best_model_path
last_checkpoint['checkpoint_callback_best_model_path'] = best_model_path
if (not stop_training):
sys.stdout.flush()
args.accumulate_grad_batches = max((args.real_batch_size // args.batch_size), 1)
trainer = Trainer.from_argparse_args(args, amp_level='O1', gpus=(- 1), precision=args.precision, weights_save_path=args.save_dir, resume_from_checkpoint=resume_from_checkpoint, checkpoint_callback=checkpoint_callback, early_stop_callback=early_stop_callback, logger=logger, callbacks=[lr_logger], reload_dataloaders_every_epoch=True, gradient_clip_val=1.0, terminate_on_nan=True, row_log_interval=100, log_save_interval=1000)
train_percent_check = (1 if (args.train_percent_check is None) else args.train_percent_check)
one_epoch_games = int((args.train_size * train_percent_check))
one_epoch_batches = int(math.ceil((one_epoch_games / (args.batch_size * args.accumulate_grad_batches))))
print(f'One epoch batches: {one_epoch_batches}')
args.num_training_steps = (one_epoch_batches * args.max_epochs)
print(('Number of training steps: %d' % args.num_training_steps))
lm_model = ChessLM(args, **vars(args))
trainer.fit(lm_model)
print(potential_old_checkpoint)
last_checkpoint = torch.load(potential_old_checkpoint)
print('Best validation model path: ', last_checkpoint['checkpoint_callback_best_model_path'])
print('Best validation performance:', last_checkpoint['checkpoint_callback_best_model_score'])
lm_model = ChessLM.load_from_checkpoint(checkpoint_path=last_checkpoint['checkpoint_callback_best_model_path'], other_eval=args.other_eval)
trainer = Trainer.from_argparse_args(args, amp_level='O1', gpus=1, precision=args.precision, weights_save_path=args.save_dir, logger=logger, callbacks=[lr_logger], row_log_interval=100, log_save_interval=100)
test_perf = trainer.test(lm_model)[0]
print(test_perf)
current_wd = os.getcwd()
best_model_dir = path.join(current_wd, last_checkpoint['checkpoint_callback_best_model_path'])
test_perf['best_model_path'] = best_model_dir
test_perf['best_val_score'] = last_checkpoint['checkpoint_callback_best_model_score']
for key in test_perf:
if isinstance(test_perf[key], torch.Tensor):
test_perf[key] = round(test_perf[key].item(), 4)
output_dir = path.join(current_wd, logger.log_dir)
perf_file = path.join(output_dir, 'perf.json')
with open(perf_file, 'w') as f:
f.write(json.dumps(test_perf)) |
class AttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context'])):
pass |
def test_sorted_slice_sampler():
batch_size = 16
max_length = (16000 * 5)
lengths = [random.randint((16000 * 3), (16000 * 8)) for index in range(1000)]
sampler = SortedSliceSampler(lengths, batch_size=batch_size, max_length=max_length)
for epoch in range(5):
sampler.set_epoch(epoch)
id2length = lengths
for batch_ids in sampler:
batch_lengths = [id2length[idx] for idx in batch_ids]
assert (sorted(batch_lengths, reverse=True) == batch_lengths)
if (batch_lengths[0] > max_length):
assert (len(batch_lengths) == (batch_size // 2))
other_batch_sizes = [len(batch) for batch in sampler if (len(batch) not in [batch_size, (batch_size // 2)])]
assert (len(set(other_batch_sizes)) == len(other_batch_sizes))
assert (len(sampler) == len(lengths)) |
def test_raises_when_source_is_sink():
with pytest.raises(ValueError):
graph = csr_matrix([[0, 1], [0, 0]])
maximum_flow(graph, 0, 0)
maximum_flow(graph, 0, 0, method='edmonds_karp') |
def combine_bc(a: Tensor, kind: str, b: Tensor, *, dim_order: Optional[Sequence[Dim]]=None) -> Tensor:
return combine(a, kind, b, allow_broadcast_all_sources=True, dim_order=dim_order) |
def get_class_labels(data):
class_labels_map = {}
index = 0
for class_label in data['labels']:
class_labels_map[class_label] = index
index += 1
return class_labels_map |
def inference_segmentor_panoptic(model, img):
cfg = model.cfg
device = next(model.parameters()).device
test_pipeline = ([LoadImage()] + cfg.data.test['pipeline'][1:])
test_pipeline = Compose(test_pipeline)
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
data = scatter(data, [device])[0]
else:
data['img_metas'] = [i.data[0] for i in data['img_metas']]
visuals_pan_eval = False
post_proccess_params = {}
post_proccess_params['center_threshold'] = 0.5
post_proccess_params['nms_kernel'] = 7
post_proccess_params['top_k_instance'] = 200
kwargs = {}
kwargs['eval_kwargs'] = dict(interval=1, metric='mIoU', eval_type='panop_deeplab', panop_eval_folder='', panop_eval_temp_folder='', dataset_name='cityscapes', gt_dir='', debug=False, num_samples_debug=12, gt_dir_panop='', post_proccess_params=post_proccess_params, visuals_pan_eval=visuals_pan_eval)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data, **kwargs)
return result |
def _find_parent_directory_containing(base: Path, target: str, predicate) -> Optional[str]:
resolved_base: str = base.resolve(strict=False)
for candidate_directory in itertools.chain([resolved_base], resolved_base.parents):
candidate_path = (candidate_directory / target)
try:
if predicate(candidate_path):
return candidate_directory
except PermissionError:
pass
return None |
class CppEnum(EnumBuilder, CppBase):
def string_cast_type(self):
storage_name = str(self.storage_type)
return {'int8_t': 'int16_t'}.get(storage_name, storage_name) |
class Communication():
def __init__(self, vehicle_id):
self.vehicle_type = 'rover'
self.vehicle_id = vehicle_id
self.local_pose = None
self.target_motion = PositionTarget()
self.arm_state = False
self.motion_type = 0
self.flight_mode = None
self.mission = None
self.motion_type = 1
self.coordinate_frame = 1
self.local_pose_sub = rospy.Subscriber((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/local_position/pose'), PoseStamped, self.local_pose_callback, queue_size=1)
self.mavros_sub = rospy.Subscriber((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/state'), State, self.mavros_state_callback, queue_size=1)
self.cmd_pose_flu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_pose_flu'), Pose, self.cmd_pose_flu_callback, queue_size=1)
self.cmd_pose_enu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_pose_enu'), Pose, self.cmd_pose_enu_callback, queue_size=1)
self.cmd_vel_flu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_vel_flu'), Twist, self.cmd_vel_flu_callback, queue_size=1)
self.cmd_vel_enu_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd_vel_enu'), Twist, self.cmd_vel_enu_callback, queue_size=1)
self.cmd_sub = rospy.Subscriber((((('/xtdrone/' + self.vehicle_type) + '_') + self.vehicle_id) + '/cmd'), String, self.cmd_callback, queue_size=3)
self.target_motion_pub = rospy.Publisher((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/setpoint_raw/local'), PositionTarget, queue_size=1)
self.armService = rospy.ServiceProxy((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/cmd/arming'), CommandBool)
self.flightModeService = rospy.ServiceProxy((((self.vehicle_type + '_') + self.vehicle_id) + '/mavros/set_mode'), SetMode)
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + 'communication initialized'))
def start(self):
while (not rospy.is_shutdown()):
self.target_motion_pub.publish(self.target_motion)
rate.sleep()
def local_pose_callback(self, msg):
self.local_pose = msg
def mavros_state_callback(self, msg):
self.mavros_state = msg.mode
def construct_target(self, x=0, y=0, z=0, vx=0, vy=0, vz=0, yaw=0):
target_raw_pose = PositionTarget()
target_raw_pose.coordinate_frame = self.coordinate_frame
target_raw_pose.position.x = x
target_raw_pose.position.y = y
target_raw_pose.position.z = z
target_raw_pose.velocity.x = vx
target_raw_pose.velocity.y = vy
target_raw_pose.velocity.z = vz
target_raw_pose.yaw = yaw
if (self.motion_type == 0):
target_raw_pose.type_mask = ((((((PositionTarget.IGNORE_VX + PositionTarget.IGNORE_VY) + PositionTarget.IGNORE_VZ) + PositionTarget.IGNORE_AFX) + PositionTarget.IGNORE_AFY) + PositionTarget.IGNORE_AFZ) + PositionTarget.IGNORE_YAW_RATE)
if (self.motion_type == 1):
target_raw_pose.type_mask = ((((((PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY) + PositionTarget.IGNORE_PZ) + PositionTarget.IGNORE_AFX) + PositionTarget.IGNORE_AFY) + PositionTarget.IGNORE_AFZ) + PositionTarget.IGNORE_YAW_RATE)
return target_raw_pose
def cmd_pose_flu_callback(self, msg):
self.coordinate_frame = 9
self.motion_type = 0
self.target_motion = self.construct_target(x=msg.position.x, y=msg.position.y, z=msg.position.z, yaw=0)
def cmd_pose_enu_callback(self, msg):
self.coordinate_frame = 1
self.motion_type = 0
self.target_motion = self.construct_target(x=msg.position.x, y=msg.position.y, z=msg.position.z, yaw=0)
def cmd_vel_flu_callback(self, msg):
self.coordinate_frame = 8
self.motion_type = 1
self.target_motion = self.construct_target(vx=msg.linear.x, vy=msg.angular.z, vz=msg.linear.z, yaw=0)
def cmd_vel_enu_callback(self, msg):
self.coordinate_frame = 1
self.motion_type = 1
self.target_motion = self.construct_target(vx=msg.linear.x, vy=msg.angular.z, vz=msg.linear.z, yaw=0)
def cmd_callback(self, msg):
if (msg.data == ''):
return
elif (msg.data == 'ARM'):
self.arm_state = self.arm()
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': Armed ') + str(self.arm_state)))
elif (msg.data == 'DISARM'):
self.arm_state = (not self.disarm())
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': Armed ') + str(self.arm_state)))
elif ((msg.data[:(- 1)] == 'mission') and (not (msg.data == self.mission))):
self.mission = msg.data
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + msg.data))
else:
self.flight_mode = msg.data
self.flight_mode_switch()
def arm(self):
if self.armService(True):
return True
else:
print((((self.vehicle_type + '_') + self.vehicle_id) + ': arming failed!'))
return False
def disarm(self):
if self.armService(False):
return True
else:
print((((self.vehicle_type + '_') + self.vehicle_id) + ': disarming failed!'))
return False
def flight_mode_switch(self):
if self.flightModeService(custom_mode=self.flight_mode):
print(((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + self.flight_mode))
return True
else:
print((((((self.vehicle_type + '_') + self.vehicle_id) + ': ') + self.flight_mode) + 'failed'))
return False |
def main():
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0', '--caffe2_gpu_memory_tracking=1'])
logger = setup_logging(__name__)
logging.getLogger('detectron.roi_data.loader').setLevel(logging.INFO)
args = parse_args()
logger.info('Called with args:')
logger.info(args)
if (args.cfg_file is not None):
merge_cfg_from_file(args.cfg_file)
if (args.opts is not None):
merge_cfg_from_list(args.opts)
assert_and_infer_cfg()
(smi_output, cuda_ver, cudnn_ver) = c2_utils.get_nvidia_info()
logger.info('cuda version : {}'.format(cuda_ver))
logger.info('cudnn version: {}'.format(cudnn_ver))
logger.info('nvidia-smi output:\n{}'.format(smi_output))
logger.info('Training with config:')
logger.info(pprint.pformat(cfg))
np.random.seed(cfg.RNG_SEED)
checkpoints = detectron.utils.train_wsl.train_model()
if (not args.skip_test):
test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
print('reprint snapshot name for the result: ', checkpoints['final'])
if ('voc' in cfg.TRAIN.DATASETS[0]):
TEST_DATASETS = cfg.TEST.DATASETS
TEST_PROPOSAL_FILES = cfg.TEST.PROPOSAL_FILES
cfg.immutable(False)
cfg.TEST.DATASETS = cfg.TRAIN.DATASETS
cfg.TEST.PROPOSAL_FILES = cfg.TRAIN.PROPOSAL_FILES
cfg.immutable(True)
test_model(checkpoints['final'], args.multi_gpu_testing, args.opts)
print('reprint snapshot name for the result: ', checkpoints['final'])
cfg.immutable(False)
cfg.TEST.DATASETS = TEST_DATASETS
cfg.TEST.PROPOSAL_FILES = TEST_PROPOSAL_FILES
cfg.immutable(True)
cfg.immutable(False)
cfg.TEST.BBOX_AUG.ENABLED = False
cfg.VIS = False
cfg.immutable(True)
for snapshot in sorted(checkpoints.iterkeys(), reverse=True):
test_model(checkpoints[snapshot], args.multi_gpu_testing, args.opts)
print('reprint snapshot name for the result: ', snapshot, checkpoints[snapshot]) |
class RegressionErrorsTest(TestCase):
y = np.array([0.0, 0.1, 1.0, 0.5, 0.1, 0.1, 0.0, 0.5]).reshape((- 1), 1)
y_hat = np.array([0.1, 2.0, 0.5, 0.0, 3.0, 0.1, 5.0, 0.5]).reshape((- 1), 1)
def _run(self, smoothing_window, smooth, expected):
sequences = regression_errors(self.y, self.y_hat, smoothing_window, smooth)
assert_allclose(sequences, expected, rtol=0.01)
def test_no_smooth(self):
smooth = False
smoothing_window = 0
expected = np.array([0.1, 1.9, 0.5, 0.5, 2.9, 0.0, 5.0, 0.0])
self._run(smoothing_window, smooth, expected)
def test_smooth(self):
smooth = True
smoothing_window = 0.125
expected = np.array([0.1, 1.9, 0.5, 0.5, 2.9, 0.0, 5.0, 0.0])
self._run(smoothing_window, smooth, expected)
def test_smooth_span(self):
smooth = True
smoothing_window = 0.25
expected = np.array([0.1, 1.45, 0.792, 0.595, 2.138, 0.71, 3.571, 1.19])
self._run(smoothing_window, smooth, expected) |
class RandomHorizontalFlip(object):
def __call__(self, img):
if (random.random() < 0.5):
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img |
def _setup_learning_rate(config, global_step):
if (config.learning_rate_decay_factor > 0):
learning_rate = tf.train.exponential_decay(learning_rate=float(config.learning_rate), global_step=global_step, decay_steps=config.learning_rate_decay_steps, decay_rate=config.learning_rate_decay_factor, staircase=False)
else:
learning_rate = tf.constant(config.learning_rate)
return learning_rate |
def random(mode='RGB'):
from random import randint
palette = []
for i in range((256 * len(mode))):
palette.append(randint(0, 255))
return ImagePalette(mode, palette) |
def display_checks_statistics(total: dict[(str, dict[((str | Status), int)])]) -> None:
padding = 20
col1_len = (max(map(len, total.keys())) + padding)
col2_len = ((len(str(max(total.values(), key=(lambda v: v['total']))['total'])) * 2) + padding)
col3_len = padding
click.secho('Performed checks:', bold=True)
template = f' {{:{col1_len}}}{{:{col2_len}}}{{:{col3_len}}}'
for (check_name, results) in total.items():
display_check_result(check_name, results, template) |
def make_unet_encoder_decoder_args(encoder_args, decoder_args):
encoder_args = tuple(((in_chan, out_chan, tuple(kernel_size), tuple(stride), (tuple([(n // 2) for n in kernel_size]) if (padding == 'auto') else tuple(padding)), tuple(dilation)) for (in_chan, out_chan, kernel_size, stride, padding, dilation) in encoder_args))
if (decoder_args == 'auto'):
decoder_args = unet_decoder_args(encoder_args, skip_connections=True)
else:
decoder_args = tuple(((in_chan, out_chan, tuple(kernel_size), tuple(stride), (tuple([(n // 2) for n in kernel_size]) if (padding == 'auto') else padding), tuple(dilation), output_padding) for (in_chan, out_chan, kernel_size, stride, padding, dilation, output_padding) in decoder_args))
return (encoder_args, decoder_args) |
def cosine_sim(lf_input, rt_input):
lf_norm = tf.sqrt((tf.reduce_sum((lf_input ** 2), axis=(- 1), keep_dims=True) + 1e-06), name='lf_norm')
lf_norm_hidden = tf.div(lf_input, lf_norm, name='lf_norm_hidden')
rt_norm = tf.sqrt((tf.reduce_sum((rt_input ** 2), axis=(- 1), keep_dims=True) + 1e-06), name='rt_norm')
rt_norm_hidden = tf.div(rt_input, rt_norm, name='rt_norm_hidden')
cosine_score = tf.reduce_sum((lf_norm_hidden * rt_norm_hidden), axis=(- 1), name='cosine_score')
return cosine_score |
class HostAPICodegen():
_output_path = ''
def __init__(self, output_path: str):
self._output_path = output_path
def generateRoutines(self, routines: List[fblas_routine.FBLASRoutine]):
routine_id = 0
json_routines = []
for r in routines:
print(('Generating: ' + r.user_name))
method_name = ('_codegen_' + r.blas_name)
method = getattr(self, method_name)
jr = method(r, routine_id)
routine_id = (routine_id + 1)
json_routines.append(jr)
json_content = {'routine': json_routines}
jw.write_to_file((self._output_path + 'generated_routines.json'), json_content)
def _write_file(self, path, content, append=False):
print(('Generating file: ' + path))
with open(path, ('a' if append else 'w')) as f:
if (append is True):
f.write('\n')
f.write(content)
def _read_template_file(self, path):
templates = os.path.join(os.path.dirname(__file__), '../../templates')
loader = jinja2.FileSystemLoader(searchpath=templates)
logging.basicConfig()
logger = logging.getLogger('logger')
logger = jinja2.make_logging_undefined(logger=logger, base=jinja2.Undefined)
env = jinja2.Environment(loader=loader, undefined=logger)
env.lstrip_blocks = True
env.trim_blocks = True
return env.get_template(path)
def _codegen_dot(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file('1/dot.cl')
chan_in_x_name = (gd.CHANNEL_IN_VECTOR_X_BASE_NAME + str(id))
chan_in_y_name = (gd.CHANNEL_IN_VECTOR_Y_BASE_NAME + str(id))
chan_out = (gd.CHANNEL_OUT_SCALAR_BASE_NAME + str(id))
channels_routine = {'channel_in_vector_x': chan_in_x_name, 'channel_in_vector_y': chan_in_y_name, 'channel_out_scalar': chan_out}
output_path = (((self._output_path + '/') + routine.user_name) + '.cl')
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
template = self._read_template_file(('helpers/' + gd.TEMPLATE_READ_VECTOR_X))
channels_helper = {'channel_out_vector': chan_in_x_name}
helper_name_read_x = (gd.HELPER_READ_VECTOR_X_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
template = self._read_template_file(('helpers/' + gd.TEMPLATE_READ_VECTOR_Y))
channels_helper = {'channel_out_vector': chan_in_y_name}
helper_name_read_y = (gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper), append=True)
template = self._read_template_file(('helpers/' + gd.TEMPLATE_WRITE_SCALAR))
channels_helper = {'channel_in_scalar': chan_out}
helper_name_write_scalar = (gd.HELPER_WRITE_SCALAR_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_write_scalar, helper=routine, channels=channels_helper), append=True)
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_SCALAR, helper_name_write_scalar)
return json
def _codegen_axpy(self, routine: fblas_routine.FBLASRoutine, id: int):
template = self._read_template_file('1/axpy.cl')
chan_in_x_name = (gd.CHANNEL_IN_VECTOR_X_BASE_NAME + str(id))
chan_in_y_name = (gd.CHANNEL_IN_VECTOR_Y_BASE_NAME + str(id))
chan_out = (gd.CHANNEL_OUT_VECTOR_BASE_NAME + str(id))
channels_routine = {'channel_in_vector_x': chan_in_x_name, 'channel_in_vector_y': chan_in_y_name, 'channel_out_vector': chan_out}
output_path = (((self._output_path + '/') + routine.user_name) + '.cl')
self._write_file(output_path, template.render(routine=routine, channels=channels_routine))
template = self._read_template_file(('helpers/' + gd.TEMPLATE_READ_VECTOR_X))
channels_helper = {'channel_out_vector': chan_in_x_name}
helper_name_read_x = (gd.HELPER_READ_VECTOR_X_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_read_x, helper=routine, channels=channels_helper), append=True)
template = self._read_template_file(('helpers/' + gd.TEMPLATE_READ_VECTOR_Y))
channels_helper = {'channel_out_vector': chan_in_y_name}
helper_name_read_y = (gd.HELPER_READ_VECTOR_Y_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_read_y, helper=routine, channels=channels_helper), append=True)
template = self._read_template_file(('helpers/' + gd.TEMPLATE_WRITE_VECTOR))
channels_helper = {'channel_in_vector': chan_out}
helper_name_write_vector = (gd.HELPER_WRITE_VECTOR_BASE_NAME + str(id))
self._write_file(output_path, template.render(helper_name=helper_name_write_vector, helper=routine, channels=channels_helper), append=True)
json = {}
jw.add_commons(json, routine)
jw.add_incx(json, routine)
jw.add_incy(json, routine)
jw.add_item(json, jd.GENERATED_READ_VECTOR_X, helper_name_read_x)
jw.add_item(json, jd.GENERATED_READ_VECTOR_Y, helper_name_read_y)
jw.add_item(json, jd.GENERATED_WRITE_VECTOR, helper_name_write_vector)
return json |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('num_groups', [2, 3])
.parametrize('x_shape , batch_axis, channel_axis', [((2, 6, 3, 3), 0, 1), ((2, 3, 3, 6), 0, 3), ((8, 6), 0, 1), ((4, 3, 6), [0, 1], 2), ((4, 3, 6), [0, (- 2)], (- 1))])
.parametrize('eps', [1e-05])
.parametrize('output_stat', [False])
.parametrize('no_scale', [False, True])
.parametrize('no_bias', [False, True])
def test_group_normalization_double_backward(ctx, func_name, seed, num_groups, x_shape, batch_axis, channel_axis, eps, output_stat, no_scale, no_bias):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
(x, beta, gamma) = create_inputs(rng, x_shape, channel_axis, no_scale, no_bias)
backward = [True, (not no_bias), (not no_scale)]
backward_function_tester(rng, F.group_normalization, inputs=[x, beta, gamma], func_args=[num_groups, channel_axis, batch_axis, eps, output_stat], atol_f=0.0002, backward=backward, ctx=ctx) |
class Seq2SeqModel(BaseModel):
def set_src_vocab_size(self, vocab_size):
self._src_vocab_size = vocab_size
def set_tgt_vocab_size(self, vocab_size):
self._tgt_vocab_size = vocab_size
def set_max_src_len(self, l):
self._max_src_len = l
def set_max_tgt_len(self, l):
self._max_tgt_len = l
def src_vocab_size(self):
return self._src_vocab_size
def tgt_vocab_size(self):
return self._tgt_vocab_size
def max_src_len(self):
return self._max_src_len
def max_tgt_len(self):
return self._max_tgt_len |
def split_vertex(G, u, v=None, edges=None):
if (v is None):
v = G.add_vertex()
elif (v not in G):
G.add_vertex(v)
elif G.degree(v):
raise ValueError('v must be a new vertex or an isolated vertex')
if (edges is None):
edges = []
edges_on_u = G.edges_incident(u)
for e in edges:
if (e not in edges_on_u):
if (e[0] == e[1]):
G.add_edge(u, v, e[2])
G.delete_edge(e)
else:
raise ValueError('the edges are not all incident with u')
elif (e[0] == u):
G.add_edge(v, e[1], e[2])
elif (e[1] == u):
G.add_edge(e[0], v, e[2])
G.delete_edge(e)
return |
def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-06):
assert (mu1.shape == mu2.shape), 'Two mean vectors have different lengths'
assert (sigma1.shape == sigma2.shape), 'Two covariances have different dimensions'
(cov_sqrt, _) = linalg.sqrtm((sigma1 sigma2), disp=False)
if (not np.isfinite(cov_sqrt).all()):
print('Product of cov matrices is singular. Adding {eps} to diagonal of cov estimates')
offset = (np.eye(sigma1.shape[0]) * eps)
cov_sqrt = linalg.sqrtm(((sigma1 + offset) (sigma2 + offset)))
if np.iscomplexobj(cov_sqrt):
if (not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=0.001)):
m = np.max(np.abs(cov_sqrt.imag))
raise ValueError(f'Imaginary component {m}')
cov_sqrt = cov_sqrt.real
mean_diff = (mu1 - mu2)
mean_norm = (mean_diff mean_diff)
trace = ((np.trace(sigma1) + np.trace(sigma2)) - (2 * np.trace(cov_sqrt)))
fid = (mean_norm + trace)
return fid |
def get_env_module() -> Tuple[str]:
var_name = 'ENV_MODULE'
return (var_name, os.environ.get(var_name, '<not set>')) |
.parametrize('data_dict', [pytest.param('full_spark_dataset', marks=pytest.mark.spark), pytest.param('full_pandas_dataset', marks=pytest.mark.core)])
def test_feature_schema_schema_dict(data_dict, request):
dataset = create_dataset(request.getfixturevalue(data_dict))
assert (dataset.feature_schema.items() is not None)
assert (dataset.feature_schema.values() is not None)
assert (dataset.feature_schema.keys() is not None) |
def ratio_iou(x1, y1, w1, h1, x2, y2, w2, h2, eps=1e-05):
xi = torch.max(x1, x2)
yi = torch.max(y1, y2)
wi = torch.clamp((torch.min((x1 + w1), (x2 + w2)) - xi), min=0)
hi = torch.clamp((torch.min((y1 + h1), (y2 + h2)) - yi), min=0)
area_i = (wi * hi)
area_u = (((w1 * h1) + (w2 * h2)) - (wi * hi))
return (area_i / torch.clamp(area_u, min=eps)) |
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse |
def test_intglobal():
some_glob = 124
def func(A):
var = some_glob
tmp = 1
for it in range(100):
if ((123 == it) or (it == (var - 1))):
tmp = 0
A[...] = tmp
func(np.empty((10,))) |
(name='start')
('-p', '--plan', required=False, help='Federated learning plan [plan/plan.yaml]', default='plan/plan.yaml', type=ClickPath(exists=True))
('-c', '--authorized_cols', required=False, help='Authorized collaborator list [plan/cols.yaml]', default='plan/cols.yaml', type=ClickPath(exists=True))
('-s', '--secure', required=False, help='Enable Intel SGX Enclave', is_flag=True, default=False)
def start_(plan, authorized_cols, secure):
from pathlib import Path
from openfl.federated import Plan
if is_directory_traversal(plan):
echo('Federated learning plan path is out of the openfl workspace scope.')
sys.exit(1)
if is_directory_traversal(authorized_cols):
echo('Authorized collaborator list file path is out of the openfl workspace scope.')
sys.exit(1)
plan = Plan.parse(plan_config_path=Path(plan).absolute(), cols_config_path=Path(authorized_cols).absolute())
logger.info(' Starting the Aggregator Service.')
plan.get_server().serve() |
def aux_models(in_channels, num_domains, num_classes, layers_dis=[], layers_cls=[]):
dis_model = DisNet(in_channels, num_domains, layers_dis)
c_model = ClsNet(in_channels, num_domains, num_classes, reverse=False, layers=layers_cls)
cp_model = ClsNet(in_channels, num_domains, num_classes, reverse=True, layers=layers_cls)
return (dis_model, c_model, cp_model) |
def GenerateSM80_TensorOp_1688(manifest, cuda_version):
if (not CudaToolkitVersionSatisfies(cuda_version, 11, 0)):
return
layouts = [(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor), (LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor)]
math_instructions = [MathInstruction([16, 8, 8], DataType.tf32, DataType.tf32, DataType.f32, OpcodeClass.TensorOp, MathOperation.multiply_add)]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc), TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc), TileDescription([64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc), TileDescription([64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc)]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, math_inst.element_accumulator]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, math_inst.element_accumulator]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type_mixed, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints) |
def check_prior_BO_limit(prior):
df = simple_run_experiments(get_prior_BO_limit, prior=prior, mx_hat=np.linspace(1, 3, 10), tx0_hat=1.0)
return df |
class EpicFHIRGetPatientDetails(VirtualFunctionTool):
name = 'EpicFHIRGetPatientDetails'
summary = 'Retrieve patient demographics and clinical data, such as medications, allergies, and conditions.'
parameters: List[ArgParameter] = [{'name': 'patient_id', 'type': 'string', 'description': 'The unique identifier of the patient.', 'required': True}]
returns: List[ArgReturn] = [{'name': 'patient_data', 'type': 'object', 'description': "The patient demographics and clinical data, including fields 'name', 'age', 'gender', 'contact_info', 'medications', 'allergies', and 'conditions'."}]
exceptions: List[ArgException] = [{'name': 'NotFoundException', 'description': "The 'patient_id' does not exist."}] |
class ImgGenerator(nn.Module):
def __init__(self, opt=None, input_nc=3, output_nc=3, ngf=32, n_down=6, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=9, padding_type='reflect'):
assert (n_blocks >= 0)
super(ImgGenerator, self).__init__()
self.opt = opt
self.state_dim = opt.state_dim
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
if (type(norm_layer) == functools.partial):
use_bias = (norm_layer.func == nn.InstanceNorm2d)
else:
use_bias = (norm_layer == nn.InstanceNorm2d)
self.inc = Inconv(input_nc, ngf, norm_layer, use_bias)
self.n_down = n_down
self.state_fc = nn.Sequential(nn.Linear(self.state_dim, 32), nn.ReLU(), nn.Linear(32, 128), nn.ReLU(), nn.Linear(128, 512), nn.ReLU(), nn.Linear(512, 2048))
self.upnet = nn.Sequential(Up(512, 512, norm_layer, use_bias), Up(512, 512, norm_layer, use_bias), Up(512, 256, norm_layer, use_bias), Up(256, 256, norm_layer, use_bias), Up(256, 128, norm_layer, use_bias), Up(128, 64, norm_layer, use_bias), Up(64, 32, norm_layer, use_bias))
self.outc = Outconv(ngf, 3)
def forward(self, state):
feature = self.state_fc(state).view((- 1), 512, 2, 2)
grid = self.outc(self.upnet(feature))
return grid |
class AlternatingBlock():
def __init__(self, var_names, size_per_variable, start_index=0, reverse=False):
self.var_names = var_names
self.size_per_variable = size_per_variable
self.reverse = reverse
indices = range(start_index, (start_index + size_per_variable))
if reverse:
indices = reversed(indices)
names = []
for i in indices:
for n in var_names:
names.append((((n + '(') + str(i)) + ')'))
self.indices = indices
self.index2pos = {v: k for (k, v) in enumerate(indices)}
self.names = names
def __len__(self):
return (self.size_per_variable * len(self.var_names))
def __iter__(self):
return iter(self.names)
def __getitem__(self, i):
return self.names[i]
def register(self, start, context):
def gen_var_func(var_pos):
class var_factory():
def __init__(self, ring, index2pos, size):
self.ring = ring
self.index2pos = index2pos
self.size = size
def __call__(self, idx):
return self.ring.variable((((self.index2pos[idx] * self.size) + var_pos) + start))
ring_context = context
while isinstance(ring_context, PrefixedDictProxy):
ring_context = ring_context.wrapped
ring = ring_context['r']
return var_factory(ring, self.index2pos, len(self.var_names))
for (var_pos, n) in enumerate(self.var_names):
var_func = gen_var_func(var_pos)
var_func.__name__ = n
context[n] = var_func |
class TestBatchMomentsOp(serial.SerializedTestCase):
def batch_moments_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, (- 1))
mu = np.mean(X, axis=(0, 2))
var = np.mean(np.square(X), axis=(0, 2))
return [mu, var]
def batch_moments_nhwc_ref(self, X):
dims = X.shape
C = dims[(- 1)]
X = X.reshape((- 1), C)
mu = np.mean(X, axis=0)
var = np.mean(np.square(X), axis=0)
return [mu, var]
(N=st.integers(1, 5), C=st.integers(1, 5), H=st.integers(1, 5), W=st.integers(1, 5), order=st.sampled_from(['NCHW', 'NHWC']), **hu.gcs)
def test_batch_moments_2d(self, N, C, H, W, order, gc, dc):
op = core.CreateOperator('BatchMoments', ['X'], ['mu', 'var'], order=order)
if (order == 'NCHW'):
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
X = np.random.randn(N, H, W, C).astype(np.float32)
def ref(X):
if (order == 'NCHW'):
return self.batch_moments_nchw_ref(X)
else:
return self.batch_moments_nhwc_ref(X)
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X], reference=ref)
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertGradientChecks(gc, op, [X], 0, [0, 1])
(N=st.integers(1, 5), C=st.integers(1, 5), T=st.integers(1, 3), H=st.integers(1, 3), W=st.integers(1, 3), order=st.sampled_from(['NCHW', 'NHWC']), **hu.gcs)
(deadline=10000)
def test_batch_moments_3d(self, N, C, T, H, W, order, gc, dc):
op = core.CreateOperator('BatchMoments', ['X'], ['mu', 'var'], order=order)
if (order == 'NCHW'):
X = np.random.randn(N, C, T, H, W).astype(np.float32)
else:
X = np.random.randn(N, T, H, W, C).astype(np.float32)
def ref(X):
if (order == 'NCHW'):
return self.batch_moments_nchw_ref(X)
else:
return self.batch_moments_nhwc_ref(X)
self.assertReferenceChecks(device_option=gc, op=op, inputs=[X], reference=ref)
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertGradientChecks(gc, op, [X], 0, [0, 1]) |
def kl_loss_gaussian(mu1, mu2, sigma1, sigma2):
with tf.name_scope('KL_loss'):
return ((tf.log(tf.clip_by_value((sigma2 / sigma1), 1e-06, 1000000.0)) + (((sigma1 ** 2) + ((mu1 - mu2) ** 2)) / (2 * (sigma2 ** 2)))) - 0.5) |
class SymplecticFormParal(SymplecticForm, DiffFormParal):
_poisson: TensorFieldParal
def __init__(self, manifold: Union[(VectorFieldModule, DifferentiableManifold)], name: Optional[str], latex_name: Optional[str]=None):
try:
vector_field_module = manifold.vector_field_module()
except AttributeError:
vector_field_module = manifold
if (name is None):
name = 'omega'
DiffFormParal.__init__(self, vector_field_module, 2, name=name, latex_name=latex_name)
dim = self._ambient_domain.dimension()
if ((dim % 2) == 1):
raise ValueError(f'the dimension of the manifold must be even but it is {dim}')
self._dim_half = (dim // 2)
SymplecticFormParal._init_derived(self)
def _init_derived(self):
DiffFormParal._init_derived(self)
SymplecticForm._init_derived(self)
def _del_derived(self, del_restrictions: bool=True):
DiffFormParal._del_derived(self, del_restrictions=del_restrictions)
if (self._poisson is not None):
self._poisson._components.clear()
self._poisson._del_derived()
SymplecticForm._del_derived(self)
def restrict(self, subdomain: DifferentiableManifold, dest_map: Optional[DiffMap]=None) -> SymplecticFormParal:
if (subdomain == self._domain):
return self
if (subdomain not in self._restrictions):
resu = DiffFormParal.restrict(self, subdomain, dest_map=dest_map)
self._restrictions[subdomain] = SymplecticFormParal.wrap(resu)
return self._restrictions[subdomain]
def poisson(self, expansion_symbol: Optional[Expression]=None, order: int=1) -> TensorFieldParal:
super().poisson()
if (expansion_symbol is not None):
if ((self._poisson is not None) and bool(self._poisson._components) and (list(self._poisson._components.values())[0][(0, 0)]._expansion_symbol == expansion_symbol) and (list(self._poisson._components.values())[0][(0, 0)]._order == order)):
return self._poisson
if (order != 1):
raise NotImplementedError('only first order inverse is implemented')
decompo = self.series_expansion(expansion_symbol, order)
g0 = decompo[0]
g1 = decompo[1]
g0m = self._new_instance()
g0m.set_comp()[:] = g0[:]
contraction = g1.contract(0, g0m.inverse(), 0)
contraction = contraction.contract(1, g0m.inverse(), 1)
self._poisson = (- (g0m.inverse() - (expansion_symbol * contraction)))
self._poisson.set_calc_order(expansion_symbol, order)
return self._poisson
from sage.matrix.constructor import matrix
from sage.tensor.modules.comp import CompFullyAntiSym
for frame in self._components:
if (frame not in self._poisson._components):
fmodule = self._fmodule
si = fmodule._sindex
nsi = (fmodule.rank() + si)
dom = self._domain
comp_poisson = CompFullyAntiSym(fmodule._ring, frame, 2, start_index=si, output_formatter=fmodule._output_formatter)
comp_poisson_scal = {}
for i in fmodule.irange():
for j in range(i, nsi):
comp_poisson_scal[(i, j)] = dom.scalar_field()
for chart in dom.top_charts():
try:
self_matrix = matrix([[self.comp(frame)[(i, j, chart)].expr(method='SR') for j in fmodule.irange()] for i in fmodule.irange()])
self_matrix_inv = self_matrix.inverse()
except (KeyError, ValueError):
continue
for i in fmodule.irange():
for j in range(i, nsi):
val = chart.simplify((- self_matrix_inv[((i - si), (j - si))]), method='SR')
comp_poisson_scal[(i, j)].add_expr(val, chart=chart)
for i in range(si, nsi):
for j in range(i, nsi):
comp_poisson[(i, j)] = comp_poisson_scal[(i, j)]
self._poisson._components[frame] = comp_poisson
return self._poisson |
class DropExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_drop(state, node, info):
char_node = _get_character_node(state)
char_room = _get_room_node(state, char_node)
(yield state.change_state([DeleteEdges(CharacterNode(), [Relation.HOLDS_LH, Relation.HOLDS_RH], NodeInstance(node)), AddEdges(NodeInstance(node), Relation.INSIDE, NodeInstance(char_room)), ClearExecDataKey((Action.GRAB, node.id))]))
def check_drop(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo):
char_node = _get_character_node(state)
nodes_in_hands = _find_nodes_from(state, char_node, relations=[Relation.HOLDS_LH, Relation.HOLDS_RH])
if (not any([(n.id == node.id) for n in nodes_in_hands])):
info.error('{} is not holding {}', char_node, node)
return False
return True |
def get_config_list(ranking, ckpt_path2is_3class):
config_list = []
for (ckpt_path, value) in ranking:
is3_class = ckpt_path2is_3class[ckpt_path]
ckpt_info = {'ckpt_path': str(ckpt_path), 'is_3class': is3_class, 'value': value}
config_list.append(ckpt_info)
return config_list |
class CountNode(ASTNode):
def __init__(self, data_type, fields):
super().__init__('COUNT', 'COUNT', data_type, fields)
def textual_form_core(self):
return ('how many ' + self.fields[0].textual_form()) |
def load_pkl(filename: Path) -> Dict[(str, np.ndarray)]:
with open(filename, 'rb') as f:
return pickle.load(f) |
(frozen=True)
class PDistMetricWrapper():
metric_name: str
def __call__(self, X, *, out=None, **kwargs):
X = np.ascontiguousarray(X)
(m, n) = X.shape
metric_name = self.metric_name
metric_info = _METRICS[metric_name]
(X, typ, kwargs) = _validate_pdist_input(X, m, n, metric_info, **kwargs)
out_size = ((m * (m - 1)) // 2)
w = kwargs.pop('w', None)
if (w is not None):
metric = metric_info.dist_func
return _pdist_callable(X, metric=metric, out=out, w=w, **kwargs)
dm = _prepare_out_argument(out, np.float64, (out_size,))
pdist_fn = getattr(_distance_wrap, f'pdist_{metric_name}_{typ}_wrap')
pdist_fn(X, dm, **kwargs)
return dm |
def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, drop_last=True) for ds in dataset]
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
optimizer = build_optimizer(model, cfg.optimizer)
if (cfg.get('runner') is None):
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
runner = build_runner(cfg.runner, default_args=dict(model=model, batch_processor=None, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
runner.timestamp = timestamp
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) |
def count_arithmetic_ops_state(state: dace.SDFGState, symbols: Dict[(str, Any)]) -> int:
global INDENT
stree_root = state.scope_tree()[None]
sdict = state.scope_dict(node_to_children=True)
result = 0
def traverse(scope: Scope) -> int:
global INDENT
result = 0
repetitions = 1
if (scope.entry is not None):
repetitions = scope.entry.map.range.num_elements()
for node in sdict[scope.entry]:
node_result = 0
if isinstance(node, dace.nodes.NestedSDFG):
nested_syms = {}
nested_syms.update(symbols)
nested_syms.update(evalsyms(symbols, node.symbol_mapping))
INDENT += 1
node_result += count_arithmetic_ops(node.sdfg, nested_syms)
INDENT -= 1
elif isinstance(node, dace.nodes.LibraryNode):
node_result += LIBNODES_TO_ARITHMETICS[type(node)](node, symbols, state)
elif isinstance(node, dace.nodes.Tasklet):
if (node._code['language'] == dace.Language.CPP):
for oedge in state.out_edges(node):
node_result += bigo(oedge.data.num_accesses)
else:
node_result += count_arithmetic_ops_code(node._code['code_or_block'])
elif isinstance(node, dace.nodes.Reduce):
node_result += (state.in_edges(node)[0].data.subset.num_elements() * count_arithmetic_ops_code(node.wcr))
if isinstance(node, (dace.nodes.CodeNode, dace.nodes.AccessNode)):
for oedge in state.out_edges(node):
if (oedge.data.wcr is not None):
node_result += count_arithmetic_ops_code(oedge.data.wcr)
if ((node_result != 0) and (INDENT <= 1)):
if (isinstance(node, dace.nodes.NestedSDFG) and (INDENT == 0)):
iprint('*', type(node).__name__, node.sdfg.name, ': `', node_result, '`')
elif (isinstance(node, dace.nodes.Tasklet) and (INDENT == 1)):
iprint('*', type(node).__name__, node, ': `', node_result, '`')
result += node_result
for child in scope.children:
if (INDENT == 0):
iprint('Scope', child.entry)
INDENT += 1
result += traverse(child)
INDENT -= 1
return (repetitions * result)
return traverse(stree_root) |
def module_cppgen(parser: argparse.ArgumentParser):
parser.add_argument('MODOLE', help='Path to the module directory.')
parser.add_argument('-n', '--namespace', type=str, help='C++ namespace if wanted.')
parser.add_argument('-m', '--module-name', type=str, help="Module name to be a part of the module class. By default, it's the directory name.", default=None)
parser.add_argument('-o', '--output', type=str, help='Output C++ header path.', default='module.h')
parser.add_argument('--bin2c', help='Save the entire TCM archive to an in-memory buffer. This flag is ignored if the module is not a TCM archive', action='store_true')
parser.set_defaults(func=module_cppgen_impl) |
def write_supported_languages(path):
languages = sorted([lang_ontology['language'] for lang_ontology in ONTOLOGY])
table = _build_supported_languages_table(languages)
content = ((LANGUAGES_DOC_HEADER + table) + LANGUAGES_DOC_FOOTER)
with path.open(mode='w') as f:
f.write(content) |
def read_hyperparameter_grid(method: str) -> pd.DataFrame:
with open(GRID_SEARCH_JSON, 'r', encoding='utf-8') as file:
all_grids = json.load(file)
if (method not in all_grids):
raise ValueError(f'No available hyperparameter grid for {method} in {str(GRID_SEARCH_JSON)}.')
grid = all_grids[method]
return pd.DataFrame(unroll_grid(grid)) |
def LF_history_of(span):
rgx = '\\bfamily (history of|hx)'
text = get_left_span(span, span.sentence, window=6).text
return (OTHER if re.search(rgx, text.strip(), re.I) else ABSTAIN) |
class LinearAssignment(Benchmark):
sizes = range(100, 401, 100)
shapes = [(i, i) for i in sizes]
shapes.extend([(i, (2 * i)) for i in sizes])
shapes.extend([((2 * i), i) for i in sizes])
cost_types = ['uniform', 'spatial', 'logarithmic', 'integer', 'binary']
param_names = ['shape', 'cost_type']
params = [shapes, cost_types]
def setup(self, shape, cost_type):
cost_func = {'uniform': random_uniform, 'spatial': random_spatial, 'logarithmic': random_logarithmic, 'integer': random_integer, 'binary': random_binary}[cost_type]
self.cost_matrix = cost_func(shape)
def time_evaluation(self, *args):
linear_sum_assignment(self.cost_matrix) |
def lrs2pretrain_max_inplen_checker():
maxInpLen = 0
numWords = args['PRETRAIN_NUM_WORDS']
for (root, dirs, files) in os.walk((args['DATA_DIRECTORY'] + '/pretrain')):
for file in files:
if file.endswith('.mp4'):
visualFeaturesFile = (os.path.join(root, file[:(- 4)]) + '.npy')
targetFile = (os.path.join(root, file[:(- 4)]) + '.txt')
with open(targetFile, 'r') as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
trgt = lines[0][7:]
words = trgt.split(' ')
if (len(words) <= numWords):
if ((len(trgt) + 1) > 256):
print('Max target length reached. Exiting')
exit()
visualFeatures = np.load(visualFeaturesFile)
inpLen = len(visualFeatures)
reqLen = (req_input_length(trgt) + 1)
if (reqLen > inpLen):
inpLen = reqLen
if (inpLen > maxInpLen):
maxInpLen = inpLen
else:
nWords = np.array([' '.join(words[i:(i + numWords)]) for i in range(((len(words) - numWords) + 1))])
nWordLens = np.array([(len(nWord) + 1) for nWord in nWords]).astype(np.float)
nWordLens[(nWordLens > 256)] = (- np.inf)
if np.all((nWordLens == (- np.inf))):
print('Max target length reached. Exiting')
exit()
nWords = nWords[(nWordLens > 0)]
for ix in range(len(nWords)):
trgt = nWords[ix]
videoStartTime = float(lines[(4 + ix)].split(' ')[1])
videoEndTime = float(lines[(((4 + ix) + numWords) - 1)].split(' ')[2])
inpLen = int((np.ceil((args['VIDEO_FPS'] * videoEndTime)) - np.floor((args['VIDEO_FPS'] * videoStartTime))))
reqLen = (req_input_length(trgt) + 1)
if (reqLen > inpLen):
inpLen = reqLen
if (inpLen > maxInpLen):
maxInpLen = inpLen
print(maxInpLen)
return |
def compare_optimizer(config, parameters, config_cpu, parameters_cpu, result_array):
loaded_data = {}
for (opt, opt_cpu) in zip(config.optimizers.values(), config_cpu.optimizers.values()):
o = opt.optimizer
o_cpu = opt_cpu.optimizer
opts = [o, o_cpu]
result_name = ("optimizer '%s' with network '%s'" % (o.name, o.network.name))
result_dict = OrderedDict()
logger.log(99, (('Comparing ' + result_name) + ' ...'))
logger.log(99, 'process(func, variable), norm_diff, current_context_std, cpu_std, diff_std')
for (p, p_cpu) in zip(parameters.values(), parameters_cpu.values()):
p_cpu.d = p.d
di = opt.data_iterator
if (di not in loaded_data):
loaded_data[di] = di.next()
data = loaded_data[di]
for (v, d) in o.dataset_assign.items():
let_data_to_variable(v.variable_instance, data[di.variables.index(d)], data_name=d, variable_name=v.name)
for (v, d) in o_cpu.dataset_assign.items():
let_data_to_variable(v.variable_instance, data[di.variables.index(d)], data_name=d, variable_name=v.name)
generated = {}
for (v, generator) in o.generator_assign.items():
generated[v.name] = generator(v.shape)
dest_context = (config.global_config.default_context if ((not o.forward_sequence) or (v not in o.forward_sequence[0].inputs)) else None)
let_data_to_variable(v.variable_instance, data=generated[v.name], ctx=dest_context, variable_name=v.name)
for (v, generator) in o_cpu.generator_assign.items():
dest_context = (config.global_config.default_context if ((not o.forward_sequence) or (v not in o.forward_sequence[0].inputs)) else None)
let_data_to_variable(v.variable_instance, data=generated[v.name], ctx=dest_context, variable_name=v.name)
last_max_diff = 1e-05
for (func, func_cpu) in zip(o.forward_sequence, o_cpu.forward_sequence):
o.network.forward_function(func)
o_cpu.network.forward_function(func_cpu)
large_diff = False
for (v, v_cpu) in zip(func.outputs, func_cpu.outputs):
name = ('forward_function (%s, %s)' % (func.name, v.name))
if (v.variable_instance.d.shape != v_cpu.variable_instance.d.shape):
logger.log(99, ('Variable shape is different in %s (current_context=%s, cpu=%s)' % (v.name, str(v.variable_instance.d.shape), str(v_cpu.variable_instance.d.shape))))
(norm_diff, std1, std2, diff_std) = calc_norm_diff(v.variable_instance.d, v_cpu.variable_instance.d)
logger.log(99, ('%s, %f, %f, %f, %f' % (name, norm_diff, std1, std2, diff_std)))
result_dict[name] = norm_diff
if (norm_diff > last_max_diff):
if (norm_diff > (last_max_diff * 10)):
logger.log(99, (' current_context(data)=' + str(v.variable_instance.d.flatten())))
logger.log(99, (' cpu(data)=' + str(v_cpu.variable_instance.d.flatten())))
large_diff = True
last_max_diff = norm_diff
if large_diff:
logger.log(99, ' x_data:')
for (v, v_cpu) in zip(func.inputs, func_cpu.inputs):
logger.log(99, (' current_context(%s.d)=%s' % (v.name, str(v.variable_instance.d.flatten()))))
logger.log(99, (' cpu(%s.d)=%s' % (v_cpu.name, str(v_cpu.variable_instance.d.flatten()))))
o.network.prepare_backward(o.backward_sequence)
o_cpu.network.prepare_backward(o_cpu.backward_sequence)
for (seq, seq_cpu) in zip(o.backward_sequence.sequence, o_cpu.backward_sequence.sequence):
o.network.backward_function(seq)
o_cpu.network.backward_function(seq_cpu)
large_diff = False
for (v, v_cpu) in zip(seq.func.inputs, seq_cpu.func.inputs):
if v.variable_instance.need_grad:
name = ('backward_function (%s, %s)' % (seq.func.name, v.name))
(norm_diff, std1, std2, diff_std) = calc_norm_diff(v.variable_instance.g, v_cpu.variable_instance.g)
logger.log(99, ('%s, %f, %f, %f, %f' % (name, norm_diff, std1, std2, diff_std)))
result_dict[name] = norm_diff
if (norm_diff > last_max_diff):
if (norm_diff > (last_max_diff * 10)):
logger.log(99, ((' current_context(diff)=' + str(v.variable_instance)) + str(v.variable_instance.g.flatten())))
logger.log(99, ((' cpu(diff)=' + str(v_cpu.variable_instance)) + str(v_cpu.variable_instance.g.flatten())))
large_diff = True
last_max_diff = norm_diff
if large_diff:
logger.log(99, ' x_data:')
for (v, v_cpu) in zip(seq.func.inputs, seq_cpu.func.inputs):
logger.log(99, (' current_context(%s.d)=%s' % (v.name, str(v.variable_instance.d.flatten()))))
logger.log(99, (' cpu(%s.d)=%s' % (v_cpu.name, str(v_cpu.variable_instance.d.flatten()))))
logger.log(99, ' y_diff:')
for (v, v_cpu) in zip(seq.func.outputs, seq_cpu.func.outputs):
logger.log(99, (' current_context(%s.g)=%s' % (v.name, str(v.variable_instance.g.flatten()))))
logger.log(99, (' cpu(%s.g)=%s' % (v_cpu.name, str(v_cpu.variable_instance.g.flatten()))))
if (o.weight_decay > 0):
o.solver.weight_decay(o.weight_decay)
o_cpu.solver.weight_decay(o_cpu.weight_decay)
o.solver.update()
o_cpu.solver.update()
for (i, ((v, lr), (v_cpu, lr_cpu))) in enumerate(zip(o.parameter_learning_rate_multipliers.items(), o_cpu.parameter_learning_rate_multipliers.items())):
if (lr > 0):
name = ('update (%s, %s)' % (o.solver.name, v.name))
(norm_diff, std1, std2, diff_std) = calc_norm_diff(v.variable_instance.d, v_cpu.variable_instance.d)
logger.log(99, ('%s, %f, %f, %f, %f' % (name, norm_diff, std1, std2, diff_std)))
result_dict[name] = norm_diff
result_array = add_result(result_name, result_dict, result_array)
return result_array |
class GradientAccumulator(object):
def __init__(self):
self._gradients = []
self._accum_steps = None
def step(self):
if (self._accum_steps is None):
self._accum_steps = tf.Variable(tf.constant(0, dtype=tf.int64), trainable=False, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
return self._accum_steps.value()
def gradients(self):
if (not self._gradients):
raise ValueError('The accumulator should be called first to initialize the gradients')
return list(((gradient.value() if (gradient is not None) else gradient) for gradient in self._gradients))
def __call__(self, gradients):
if (not self._gradients):
_ = self.step
self._gradients.extend([(tf.Variable(tf.zeros_like(gradient), trainable=False, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) if (gradient is not None) else gradient) for gradient in gradients])
if (len(gradients) != len(self._gradients)):
raise ValueError(('Expected %s gradients, but got %d' % (len(self._gradients), len(gradients))))
for (accum_gradient, gradient) in zip(self._gradients, gradients):
if ((accum_gradient is not None) and (gradient is not None)):
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
if (not self._gradients):
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if (gradient is not None):
gradient.assign(tf.zeros_like(gradient)) |
def sanity_check(state_dict, pretrained_weights, semi_supervised):
if semi_supervised:
print('SKIPPING SANITY CHECK for semi-supervised learning')
return
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location='cpu')
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (('fc.weight' in k) or ('fc.bias' in k)):
continue
k_pre = (('module.encoder_q.' + k[len('module.'):]) if k.startswith('module.') else ('module.encoder_q.' + k))
assert (state_dict[k].cpu() == state_dict_pre[k_pre]).all(), '{} is changed in linear classifier training.'.format(k)
print('=> sanity check passed.') |
def module_init():
root_module = Module('ns.click', cpp_namespace='::ns3')
return root_module |
class Trainer():
def __init__(self):
self.args = args
self.input_transform = Compose([Resize((512, 512)), ToTensor(), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
self.label_transform = Compose([Resize((512, 512)), CenterCrop(512), ToLabel(), Relabel()])
self.net = model().cuda()
self.net = nn.DataParallel(self.net, device_ids=self.args.gpu_ids)
self.train_data_loader = DataLoader(coseg_train_dataset(self.args.train_data, self.args.train_label, self.args.train_txt, self.input_transform, self.label_transform), num_workers=self.args.num_worker, batch_size=self.args.batch_size, shuffle=True)
self.val_data_loader = DataLoader(coseg_val_dataset(self.args.val_data, self.args.val_label, self.args.val_txt, self.input_transform, self.label_transform), num_workers=self.args.num_worker, batch_size=self.args.batch_size, shuffle=False)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
self.loss_func = nn.CrossEntropyLoss()
def pixel_accuracy(self, output, label):
correct = len(output[(output == label)])
wrong = len(output[(output != label)])
return (correct, wrong)
def jaccard(self, output, label):
temp = output[(label == 1)]
i = len(temp[(temp == 1)])
temp = (output + label)
u = len(temp[(temp > 0)])
return (i, u)
def precision(self, output, label):
temp = output[(label == 1)]
tp = len(temp[(temp == 1)])
p = len(output[(output > 0)])
return (tp, p)
def evaluate(self, net, epoch):
self.net.eval()
correct = 0
wrong = 0
intersection = 0
union = 0
true_positive = 0
positive = 1
for (i, (image1, image2, label1, label2)) in enumerate(self.val_data_loader):
(image1, image2, label1, label2) = (image1.cuda(), image2.cuda(), label1.cuda(), label2.cuda())
(output1, output2) = self.net(image1, image2)
output1 = torch.argmax(output1, dim=1)
output2 = torch.argmax(output2, dim=1)
(c, w) = self.pixel_accuracy(output1, label1)
correct += c
wrong += w
(i, u) = self.jaccard(output1, label1)
intersection += i
union += u
(tp, p) = self.precision(output1, label1)
true_positive += tp
positive += p
(c, w) = self.pixel_accuracy(output2, label2)
correct += c
wrong += w
(i, u) = self.jaccard(output2, label2)
intersection += i
union += u
(tp, p) = self.precision(output2, label2)
true_positive += tp
positive += p
print('pixel accuracy: {} correct: {} wrong: {}'.format((correct / (correct + wrong)), correct, wrong))
print('precision: {} true_positive: {} positive: {}'.format((true_positive / positive), true_positive, positive))
print('jaccard score: {} intersection: {} union: {}'.format((intersection / union), intersection, union))
self.net.train()
return ((correct / (correct + wrong)), (intersection / union), (true_positive / positive))
def train(self):
for epoch in range(self.args.epoches):
losses = []
for (i, (image1, image2, label1, label2)) in enumerate(self.train_data_loader):
(image1, image2, label1, label2) = (image1.cuda(), image2.cuda(), label1.cuda(), label2.cuda())
(output1, output2) = self.net(image1, image2)
loss = self.loss_func(output1, label1)
loss += self.loss_func(output2, label2)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses.append(loss.data.cpu().numpy())
if ((i % 2000) == 0):
print('')
print('epoch{} iter {}/{} BCE loss:'.format(epoch, i, len(self.train_data_loader), np.mean(losses)))
print('testing......')
(acc, jac, pre) = self.evaluate(self.net, epoch)
if (((i % 2000) == 0) and (i != 0)):
torch.save(self.net.state_dict(), ('epoch{}iter{}.pkl' % (epoch, i))) |
def rebuild_val_unit_col(valid_col_units, val_unit, kmap):
if (val_unit is None):
return val_unit
(unit_op, col_unit1, col_unit2) = val_unit
col_unit1 = rebuild_col_unit_col(valid_col_units, col_unit1, kmap)
col_unit2 = rebuild_col_unit_col(valid_col_units, col_unit2, kmap)
return (unit_op, col_unit1, col_unit2) |
def create_tensorkey_dicts(tensor_dict, metric_dict, col_name, round_num, logger, tensor_dict_split_fn_kwargs):
origin = col_name
tags = ('trained',)
output_metric_dict = {}
for (k, v) in metric_dict.items():
tk = TensorKey(k, origin, round_num, True, ('metric',))
output_metric_dict[tk] = np.array(v)
(global_model_dict, local_model_dict) = split_tensor_dict_for_holdouts(logger, tensor_dict, **tensor_dict_split_fn_kwargs)
global_tensorkey_model_dict = {TensorKey(tensor_name, origin, round_num, False, tags): nparray for (tensor_name, nparray) in global_model_dict.items()}
local_tensorkey_model_dict = {TensorKey(tensor_name, origin, round_num, False, tags): nparray for (tensor_name, nparray) in local_model_dict.items()}
next_local_tensorkey_model_dict = {TensorKey(tensor_name, origin, (round_num + 1), False, ('model',)): nparray for (tensor_name, nparray) in local_model_dict.items()}
global_tensor_dict = {**output_metric_dict, **global_tensorkey_model_dict}
local_tensor_dict = {**local_tensorkey_model_dict, **next_local_tensorkey_model_dict}
return (global_tensor_dict, local_tensor_dict) |
class BlockGather(MPINode):
implementations = {'MPI': ExpandBlockGatherMPI}
default_implementation = 'MPI'
subarray_type = properties.Property(dtype=str, default='tmp')
gather_grid = properties.Property(dtype=str, default='tmp')
reduce_grid = properties.Property(dtype=str, allow_none=True, default=None)
def __init__(self, name, subarray_type='tmp', gather_grid='tmp', reduce_grid=None, *args, **kwargs):
super().__init__(name, *args, inputs={'_inp_buffer'}, outputs={'_out_buffer'}, **kwargs)
self.subarray_type = subarray_type
self.gather_grid = gather_grid
self.reduce_grid = reduce_grid
def validate(self, sdfg, state):
(inp_buffer, out_buffer) = (None, None)
for e in state.out_edges(self):
if (e.src_conn == '_out_buffer'):
out_buffer = sdfg.arrays[e.data.data]
for e in state.in_edges(self):
if (e.dst_conn == '_inp_buffer'):
inp_buffer = sdfg.arrays[e.data.data]
return (inp_buffer, out_buffer) |
class NimbleInferenceWrapper(EventSynchronizedInferenceWrapperBase):
def __init__(self, model, dummy_input, use_multi_stream):
super(NimbleInferenceWrapper, self).__init__()
self.nimble_model = torch.cuda.Nimble(model)
self.nimble_model.prepare(dummy_input, use_multi_stream=use_multi_stream)
self.nimble_model.forward_graph.inputs[0].copy_(dummy_input)
def launch(self):
self.nimble_model.launch()
def get_output(self):
return self.nimble_model.forward_graph.outputs[0].cpu().numpy() |
class Graph(Model):
def _build_graph(self, inputs):
is_training = get_current_tower_context().is_training
(images, truemap_coded) = inputs
orig_imgs = images
true = truemap_coded[(..., 0)]
true = tf.cast(true, tf.int32)
true = tf.identity(true, name='truemap')
one_hot = tf.one_hot(true, 2, axis=(- 1))
true = tf.expand_dims(true, axis=(- 1))
with argscope(Conv2D, activation=tf.identity, use_bias=False, W_init=tf.variance_scaling_initializer(scale=2.0, mode='fan_out')), argscope([Conv2D], data_format=self.data_format):
i = (images if (not self.input_norm) else (images / 255.0))
feat = net('net', i, self.basis_filter_list, self.rot_matrix_list, self.nr_orients, self.filter_type, is_training)
o_logi = Conv2D('output', feat, 2, 1, use_bias=True, nl=tf.identity)
soft = tf.nn.softmax(o_logi, axis=(- 1))
prob = tf.identity(soft, name='predmap-prob')
predmap_coded = tf.concat(prob, axis=(- 1), name='predmap-coded')
if get_current_tower_context().is_training:
loss = 0
for (term, weight) in self.loss_term.items():
if (term == 'bce'):
term_loss = categorical_crossentropy(soft, one_hot)
term_loss = tf.reduce_mean(term_loss, name='loss-bce')
else:
assert False, ('Not support loss term: %s' % term)
add_moving_summary(term_loss)
loss += (term_loss * weight)
wd_loss = regularize_cost('.*/W', l2_regularizer(1e-07), name='l2_wd_loss')
add_moving_summary(wd_loss)
self.cost = tf.identity((loss + wd_loss), name='overall-loss')
add_moving_summary(self.cost)
add_param_summary(('.*/W', ['histogram']))
orig_imgs = tf.cast(orig_imgs, tf.uint8)
tf.summary.image('input', orig_imgs, max_outputs=1)
return |
def get_global_memlet_path_src(sdfg: SDFG, state: SDFGState, edge: MultiConnectorEdge) -> nd.Node:
src = state.memlet_path(edge)[0].src
if (isinstance(src, nd.AccessNode) and (not sdfg.arrays[src.data].transient) and (sdfg.parent is not None)):
psdfg = sdfg.parent_sdfg
pstate = sdfg.parent
pnode = sdfg.parent_nsdfg_node
pedges = list(pstate.in_edges_by_connector(pnode, src.data))
if (len(pedges) > 0):
pedge = pedges[0]
return get_global_memlet_path_src(psdfg, pstate, pedge)
else:
pedges = list(pstate.out_edges_by_connector(pnode, src.data))
if (len(pedges) > 0):
pedge = pedges[0]
return get_global_memlet_path_dst(psdfg, pstate, pedge)
return src |
def load_replay_buffer(agent, load_path=None):
if (agent.config.other_args['env'] in DATASET_NAMES):
dummy_env = gym.make(agent.config.other_args['env'])
dataset = dummy_env.get_dataset()
dummy_env.close()
dataset = (dataset['observations'][:(- 1)], dataset['actions'][:(- 1)], dataset['rewards'][:(- 1)].reshape((- 1), 1), dataset['observations'][1:], dataset['terminals'][:(- 1)].reshape((- 1), 1))
agent.replay_buffer.buffer.add_batch(*dataset)
else:
assert (load_path is not None)
agent.replay_buffer.load(load_path) |
def tensor_to_img(tensor, transpose=False):
im = np.asarray(np.clip((np.squeeze(tensor.numpy()) * 255), 0, 255), dtype=np.uint8)
if transpose:
im = im.transpose((1, 2, 0))
return im |
def split_by_parents(self, valid_names: 'ItemList') -> 'ItemLists':
return self.split_by_valid_func((lambda o: (o.parent.name in valid_names))) |
def build_detection_test_loader(cfg, dataset_name, mapper=None):
_add_category_whitelists_to_metadata(cfg)
_add_category_maps_to_metadata(cfg)
_maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg)
dataset_dicts = combine_detection_dataset_dicts([dataset_name], keep_instance_predicate=_get_test_keep_instance_predicate(cfg), proposal_files=([cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]] if cfg.MODEL.LOAD_PROPOSALS else None))
sampler = None
if (not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE):
sampler = torch.utils.data.SequentialSampler(dataset_dicts)
if (mapper is None):
mapper = DatasetMapper(cfg, False)
return d2_build_detection_test_loader(dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler) |
def from_config(model, control_params, env):
control_params = control_params.copy()
control_type = control_params.pop('control_type')
return CONTROL_MAP[control_type](model, env, **control_params) |
class MTask(nn.Module):
def __init__(self, vision, audio):
super(MTask, self).__init__()
self.vision = vision
self.audio = audio
self.avc = nn.Sequential(nn.Linear(1024, 128), nn.ReLU(True), nn.Linear(128, 2))
self.class_a = nn.Conv2d(512, 7, 1, bias=False)
self.class_v = nn.Conv2d(512, 7, 1, bias=False)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
def forward(self, spec, img):
N = spec.shape[0]
feat_a = self.audio(spec)
feat_v = self.vision(img)
feat_a = self.avgpool(feat_a)
feat_v = self.avgpool(feat_v)
fusion = torch.cat([feat_a.unsqueeze(1).repeat([1, N, 1, 1, 1]), feat_v.unsqueeze(0).repeat([N, 1, 1, 1, 1])], 2)
fusion = torch.flatten(fusion, 2)
avc = self.avc(fusion)
cls_a = self.class_a(feat_a)
cls_v = self.class_v(feat_v)
return (avc, cls_a.flatten(1), cls_v.flatten(1)) |
def reinit_layer_(layer: torch.nn.Module, nonlinearity='relu'):
for (name, param) in layer.named_parameters():
if name.startswith('bias'):
torch.nn.init.zeros_(param.data)
elif name.startswith('weight'):
if (nonlinearity.lower() in ('relu', 'leaky_relu')):
torch.nn.init.kaiming_uniform_(param.data, nonlinearity=nonlinearity)
elif (nonlinearity.lower() in ('glu',)):
torch.nn.init.xavier_uniform_(param.data, gain=torch.nn.init.calculate_gain('sigmoid'))
else:
torch.nn.init.xavier_uniform_(param.data, gain=torch.nn.init.calculate_gain(nonlinearity))
else:
raise TypeError(f'Invalid Layer {layer}') |
def blink(clip, d_on, d_off):
newclip = copy.copy(clip)
if (newclip.mask is None):
newclip = newclip.with_mask()
D = (d_on + d_off)
newclip.mask = newclip.mask.fl((lambda gf, t: (gf(t) * ((t % D) < d_on))))
return newclip |
def SO(n, R, e=None, var='a', invariant_form=None):
return _OG(n, R, True, e=e, var=var, invariant_form=invariant_form) |
class FloatVector():
x: np.float32
y: np.float32
def to_protobuf(self) -> pb.FloatVector:
vector = pb.FloatVector()
vector.x = self.x
vector.y = self.y
assert vector.IsInitialized()
return vector
def from_protobuf(vector: pb.FloatVector) -> 'FloatVector':
return FloatVector(x=vector.x, y=vector.y)
def __add__(self, other: 'FloatVector') -> 'FloatVector':
return FloatVector(x=(self.x + other.x), y=(self.y + other.y))
def __mul__(self, scaler: float) -> 'FloatVector':
return FloatVector(x=(self.x * scaler), y=(self.y * scaler))
def __iter__(self) -> Iterator[float]:
(yield from (self.x, self.y))
def coord(self) -> Tuple[(float, float)]:
return (self.x, self.y)
def scale(self, width_factor: float=1, height_factor: float=1) -> None:
self.x *= width_factor
self.y *= height_factor |
def process_main(device, eval_mode: bool, enable_render: bool, queue):
env = os.environ.copy()
env['CUDA_VISIBLE_DEVICES'] = str(device)
while True:
task = queue.get()
if (len(task) == 0):
break
run_exp(env, eval_mode, enable_render, **task) |
def job_fssdJ1q_med(p, data_source, tr, te, r, J=1, null_sim=None):
if (null_sim is None):
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=r)
data = (tr + te)
X = data.data()
with util.ContextTimer() as t:
med = util.meddistance(X, subsample=1000)
k = kernel.KGauss((med ** 2))
V = util.fit_gaussian_draw(X, J, seed=(r + 3))
fssd_med = gof.FSSD(p, k, V, null_sim=null_sim, alpha=alpha)
fssd_med_result = fssd_med.perform_test(data)
return {'goftest': fssd_med, 'test_result': fssd_med_result, 'time_secs': t.secs} |
(Output('clustering-parsing-param-table', 'children'), Input('parsing-algo-select', 'value'))
def select_parsing_algorithm(algorithm):
param_info = LogPattern().get_parameter_info(algorithm)
param_table = create_param_table(param_info)
return param_table |
def bcs(CG1, geometry):
return cashocs.create_dirichlet_bcs(CG1, Constant(0), geometry.boundaries, [1, 2, 3, 4]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.