code stringlengths 101 5.91M |
|---|
def run_bipartite_example():
run_on_network_attr('../data/bipartite/Inouye_Pyke_pollinator_web/inouye_bipartite.net', [partial(changeBipartiteDensity, MODE_A), partial(changeBipartiteActivity, MODE_A), partial(changeBipartiteEgoTwoStar, MODE_A), partial(changeBipartiteAlterTwoStar1, MODE_A), partial(changeBipartiteAlterTwoStar2, MODE_A), partial(changeBipartiteFourCycle1, MODE_A), partial(changeBipartiteFourCycle2, MODE_A)], ['bipartiteDensityA', 'bipartiteActivityA', 'bipartiteEgoTwoStarA', 'bipartiteAlterTwoStar1A', 'bipartiteAlterTwoStar2A', 'bipartiteFourCycle1A', 'bipartiteFourCycle2A'], '../data/bipartite/Inouye_Pyke_pollinator_web/inouye_outcome_BNA.txt', sampler_func=partial(bipartiteALAAMsampler, MODE_A), bipartite=True, bipartiteGoFfixedMode=MODE_B) |
def test_scalar_write_shadow_fused():
sdfg = dace.SDFG('scalar_fused')
N = dace.symbol('N')
sdfg.add_array('A', [N], dace.int32)
sdfg.add_array('B', [N], dace.int32)
sdfg.add_array('tmp', [1], dace.int32, transient=True)
init_state = sdfg.add_state('init')
guard_1 = sdfg.add_state('guard_1')
loop_1 = sdfg.add_state('loop_1')
intermediate = sdfg.add_state('intermediate')
guard_2 = sdfg.add_state('guard_2')
loop_2 = sdfg.add_state('loop_2')
end_state = sdfg.add_state('end')
init_tasklet = init_state.add_tasklet('init', {}, {'out'}, 'out = 0')
init_write = init_state.add_write('tmp')
init_state.add_edge(init_tasklet, 'out', init_write, None, dace.Memlet('tmp[0]'))
tmp1_tasklet = loop_1.add_tasklet('tmp1', {'a', 'b'}, {'out'}, 'out = a * b')
loop1_tasklet_1 = loop_1.add_tasklet('loop1_1', {'ap', 't'}, {'a'}, 'a = ap + 2 * t')
loop1_tasklet_2 = loop_1.add_tasklet('loop1_2', {'bp', 't'}, {'b'}, 'b = bp - 2 * t')
tmp1_read_write = loop_1.add_access('tmp')
a1_read = loop_1.add_read('A')
b1_read = loop_1.add_read('B')
a1_write = loop_1.add_write('A')
b1_write = loop_1.add_write('B')
loop_1.add_edge(a1_read, None, tmp1_tasklet, 'a', dace.Memlet('A[i]'))
loop_1.add_edge(b1_read, None, tmp1_tasklet, 'b', dace.Memlet('B[i]'))
loop_1.add_edge(tmp1_tasklet, 'out', tmp1_read_write, None, dace.Memlet('tmp[0]'))
loop_1.add_edge(tmp1_read_write, None, loop1_tasklet_1, 't', dace.Memlet('tmp[0]'))
loop_1.add_edge(tmp1_read_write, None, loop1_tasklet_2, 't', dace.Memlet('tmp[0]'))
loop_1.add_edge(a1_read, None, loop1_tasklet_1, 'ap', dace.Memlet('A[i + 1]'))
loop_1.add_edge(b1_read, None, loop1_tasklet_2, 'bp', dace.Memlet('B[i + 1]'))
loop_1.add_edge(loop1_tasklet_1, 'a', a1_write, None, dace.Memlet('A[i]'))
loop_1.add_edge(loop1_tasklet_2, 'b', b1_write, None, dace.Memlet('B[i]'))
tmp2_tasklet = loop_2.add_tasklet('tmp2', {'a', 'b'}, {'out'}, 'out = a / b')
loop2_tasklet_1 = loop_2.add_tasklet('loop2_1', {'ap', 't'}, {'a'}, 'a = ap + t * t')
loop2_tasklet_2 = loop_2.add_tasklet('loop2_2', {'bp', 't'}, {'b'}, 'b = bp - t * t')
tmp2_read_write = loop_2.add_access('tmp')
a2_read = loop_2.add_read('A')
b2_read = loop_2.add_read('B')
a2_write = loop_2.add_write('A')
b2_write = loop_2.add_write('B')
loop_2.add_edge(a2_read, None, tmp2_tasklet, 'a', dace.Memlet('A[i + 1]'))
loop_2.add_edge(b2_read, None, tmp2_tasklet, 'b', dace.Memlet('B[i + 1]'))
loop_2.add_edge(tmp2_tasklet, 'out', tmp2_read_write, None, dace.Memlet('tmp[0]'))
loop_2.add_edge(tmp2_read_write, None, loop2_tasklet_1, 't', dace.Memlet('tmp[0]'))
loop_2.add_edge(tmp2_read_write, None, loop2_tasklet_2, 't', dace.Memlet('tmp[0]'))
loop_2.add_edge(a2_read, None, loop2_tasklet_1, 'ap', dace.Memlet('A[i]'))
loop_2.add_edge(b2_read, None, loop2_tasklet_2, 'bp', dace.Memlet('B[i]'))
loop_2.add_edge(loop2_tasklet_1, 'a', a2_write, None, dace.Memlet('A[i + 1]'))
loop_2.add_edge(loop2_tasklet_2, 'b', b2_write, None, dace.Memlet('B[i + 1]'))
sdfg.add_edge(init_state, guard_1, dace.InterstateEdge(assignments={'i': 0}))
sdfg.add_edge(guard_1, loop_1, dace.InterstateEdge(condition='i < (N - 1)'))
sdfg.add_edge(loop_1, guard_1, dace.InterstateEdge(assignments={'i': 'i + 1'}))
sdfg.add_edge(guard_1, intermediate, dace.InterstateEdge(condition='i >= (N - 1)'))
sdfg.add_edge(intermediate, guard_2, dace.InterstateEdge(assignments={'i': 0}))
sdfg.add_edge(guard_2, loop_2, dace.InterstateEdge(condition='i < (N - 1)'))
sdfg.add_edge(loop_2, guard_2, dace.InterstateEdge(assignments={'i': 'i + 1'}))
sdfg.add_edge(guard_2, end_state, dace.InterstateEdge(condition='i >= (N - 1)'))
pipeline = Pipeline([ScalarWriteShadowScopes()])
results = pipeline.apply_pass(sdfg, {})[ScalarWriteShadowScopes.__name__]
assert (results[0]['tmp'][(loop_1, tmp1_read_write)] == {(loop_1, tmp1_read_write)})
assert (results[0]['tmp'][(loop_2, tmp2_read_write)] == {(loop_2, tmp2_read_write)})
assert (results[0]['A'][None] == {(loop_1, a1_read), (loop_2, a2_read), (loop_1, a1_write), (loop_2, a2_write)})
assert (results[0]['B'][None] == {(loop_1, b1_read), (loop_2, b2_read), (loop_1, b1_write), (loop_2, b2_write)}) |
class FlipAugmenter(dptspatialaugmenterbase.SpatialAugmenterBase):
def __init__(self, flip_list):
super().__init__(keyword='flip')
self.__flip_list = []
self.__flip = None
self.__setfliplist(flip_list=flip_list)
def __setfliplist(self, flip_list):
if (not (set(flip_list) <= {'none', 'vertical', 'horizontal', 'both'})):
raise Exception('InvalidFlipListError(flip_list)')
self.__flip_list = flip_list
self.__flip = self.__flip_list[0]
def transform(self, patch):
if (self.__flip == 'none'):
patch_transformed = np.transpose(a=patch, axes=(1, 2, 0))
elif (self.__flip == 'vertical'):
patch_transformed = np.flipud(np.transpose(a=patch, axes=(1, 2, 0)))
elif (self.__flip == 'horizontal'):
patch_transformed = np.fliplr(np.transpose(a=patch, axes=(1, 2, 0)))
elif (self.__flip == 'both'):
patch_transformed = np.fliplr(np.flipud(np.transpose(a=patch, axes=(1, 2, 0))))
else:
raise Exception('InvalidFlipMode(self.__flip)')
patch_transformed = np.transpose(a=patch_transformed, axes=(2, 0, 1))
return patch_transformed
def randomize(self):
self.__flip = np.random.choice(a=self.__flip_list, size=None) |
class BrewTest(unittest.TestCase):
def setUp(self):
def myhelper(model, val=(- 1)):
return val
if (not brew.has_helper(myhelper)):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=(- 1)):
return val
if (not brew.has_helper(myhelper2)):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name='test_model')
def test_dropout(self):
p = 0.2
X = (np.ones((100, 100)).astype(np.float32) - p)
workspace.FeedBlob('x', X)
model = ModelHelper(name='test_model')
brew.dropout(model, 'x', 'out', is_test=False)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob('out')
self.assertLess(abs((out.mean() - (1 - p))), 0.05)
def test_fc(self):
(m, n, k) = (15, 15, 15)
X = (np.random.rand(m, k).astype(np.float32) - 0.5)
workspace.FeedBlob('x', X)
model = ModelHelper(name='test_model')
brew.fc(model, 'x', 'out_1', k, n)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
def test_relu(self):
Xpos = (np.ones((5, 5)).astype(np.float32) - 0.5)
Xneg = (np.ones((5, 5)).astype(np.float32) - 1.5)
workspace.FeedBlob('xpos', Xpos)
workspace.FeedBlob('xneg', Xneg)
model = ModelHelper(name='test_model')
brew.relu(model, 'xpos', 'out_xpos')
brew.relu(model, 'xneg', 'out_xneg')
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
pos = workspace.FetchBlob('out_xpos')
self.assertAlmostEqual(pos.mean(), 0.5)
neg = workspace.FetchBlob('out_xneg')
self.assertAlmostEqual(neg.mean(), 0)
def test_tanh(self):
X = (np.ones((5, 5)).astype(np.float32) - 0.5)
workspace.FeedBlob('x', X)
model = ModelHelper(name='test_model')
brew.tanh(model, 'x', 'out_tanh')
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob('out_tanh')
self.assertAlmostEqual(out.mean(), np.tanh(0.5), places=5)
def test_validate(self):
model = ModelHelper(name='test_model')
model.params.append('aaa')
model.params.append('bbb')
self.assertEqual(model._Validate(), [])
model.params.append('xxx')
model.params.append('bbb')
self.assertEqual(model._Validate(), ['bbb'])
def test_arg_scope(self):
myhelper = self.myhelper
myhelper2 = self.myhelper2
n = 15
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = (np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5)
workspace.FeedBlob('x', X)
model = ModelHelper(name='test_model')
with brew.arg_scope(brew.conv, stride=2, pad=2, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {})):
brew.conv(model=model, blob_in='x', blob_out='out', dim_in=3, dim_out=64, kernel=3)
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob('out')
self.assertEqual(out.shape, (64, 64, 17, 17))
def test_arg_scope_nested(self):
myhelper = self.myhelper
n = 16
with brew.arg_scope([myhelper], val=(- 3)), brew.arg_scope([myhelper], val=(- 2)):
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, (- 2))
res = brew.myhelper(self.model, val=15)
self.model.Validate()
self.assertEqual(res, 15)
def test_double_register(self):
myhelper = self.myhelper
with self.assertRaises(AttributeError):
brew.Register(myhelper)
def test_has_helper(self):
self.assertTrue(brew.has_helper(brew.conv))
self.assertTrue(brew.has_helper('conv'))
def myhelper3():
pass
self.assertFalse(brew.has_helper(myhelper3))
def test_model_helper(self):
X = (np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5)
workspace.FeedBlob('x', X)
my_arg_scope = {'order': 'NHWC'}
model = ModelHelper(name='test_model', arg_scope=my_arg_scope)
with brew.arg_scope(brew.conv, stride=2, pad=2, weight_init=('XavierFill', {}), bias_init=('ConstantFill', {})):
brew.conv(model=model, blob_in='x', blob_out='out', dim_in=3, dim_out=64, kernel=[8, 3])
model.Validate()
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob('out')
self.assertEqual(out.shape, (64, 15, 17, 64))
def test_cnn_model_helper_deprecated(self):
X = (np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5)
workspace.FeedBlob('x', X)
model = CNNModelHelper(name='test_model', order='NHWC')
self.assertEqual(model.arg_scope['order'], 'NHWC')
def test_get_params(self):
def param(x):
return core.ScopedBlobReference(x)
def to_str_list(x):
return sorted([str(p) for p in x])
model = ModelHelper(name='test_model')
model.AddParameter(param('a'))
model.AddParameter(param('b'), tags=ParameterTags.COMPUTED_PARAM)
with scope.NameScope('c'):
model.AddParameter(param('a'))
model.AddParameter(param('d'), tags=ParameterTags.COMPUTED_PARAM)
self.assertEqual(to_str_list(model.GetParams()), ['c/a'])
self.assertEqual(to_str_list(model.GetComputedParams()), ['c/d'])
self.assertEqual(to_str_list(model.GetAllParams()), ['c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('')), ['a', 'b', 'c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetParams()), ['a', 'c/a'])
self.assertEqual(to_str_list(model.GetComputedParams()), ['b', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams()), ['a', 'b', 'c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('')), ['a', 'b', 'c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('c')), ['c/a', 'c/d'])
self.assertEqual(to_str_list(model.GetAllParams('c/')), ['c/a', 'c/d'])
def test_param_consistence(self):
model = ModelHelper(name='test_mode')
cnv = brew.conv(model, 'data', 'cnv', 32, 32, 4)
step_model = ModelHelper(name='step_model', param_model=model)
a = brew.fc(step_model, cnv, 'a', 100, 200)
brew.fc(model, a, 'b', 200, 5)
self.assertEqual(model._parameters_info, step_model._parameters_info)
def test_cond(self):
workspace.FeedBlob('cond', np.array(True))
workspace.FeedBlob('then_value', np.array(1))
workspace.FeedBlob('else_value', np.array(2))
then_model = ModelHelper(name='then_test_model')
then_model.net.Copy('then_value', 'output_blob')
else_model = ModelHelper(name='else_test_model')
else_model.net.Copy('else_value', 'output_blob')
model = ModelHelper(name='test_model')
brew.cond(model=model, cond_blob='cond', external_blobs=['then_value', 'else_value', 'output_blob'], then_model=then_model, else_model=else_model)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob('output_blob')
self.assertEqual(output_value, 1)
workspace.FeedBlob('cond', np.array(False))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob('output_blob')
self.assertEqual(output_value, 2)
def test_loop(self):
workspace.FeedBlob('cond', np.array(True))
workspace.FeedBlob('ONE', np.array(1))
workspace.FeedBlob('TWO', np.array(2))
workspace.FeedBlob('TEN', np.array(10))
workspace.FeedBlob('counter', np.array(0))
workspace.FeedBlob('output_blob', np.array(0))
loop_model = ModelHelper(name='loop_test_model')
loop_model.net.Add(['output_blob', 'TWO'], 'output_blob')
cond_model = ModelHelper(name='cond_test_model')
cond_model.net.Add(['counter', 'ONE'], 'counter')
comp_res = cond_model.net.LT(['counter', 'TEN'])
cond_model.net.Copy(comp_res, 'cond')
model = ModelHelper(name='test_model')
brew.loop(model=model, cond_blob='cond', external_blobs=['cond', 'ONE', 'TWO', 'TEN', 'counter', 'output_blob'], loop_model=loop_model, cond_model=cond_model)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
output_value = workspace.FetchBlob('output_blob')
self.assertEqual(output_value, 18) |
def get_tree(filename):
file_str = open(filename, encoding='utf8', errors='backslashreplace').read()
tree = parser.parse(bytes(file_str, 'utf-8'))
root_node = tree.root_node
return root_node |
def getUserBankAccount(userId, connection):
if isAuthorizedUser(userId):
try:
sql = (("SELECT * FROM user_bank_account WHERE user_id = '" + userId) + "'")
result = connection.execute(sql)
return result
except Exception as e:
logging.error(f'Unable to retrieve account information from database. Query: {sql}')
raise e
else:
return None |
def stretch_with_scpml(dxes: fdfd_tools.GridSpacing, axis: int, polarity: int, omega: float, epsilon_effective: float=1.0, thickness: int=10, s_function: s_function_type=None) -> fdfd_tools.GridSpacing:
if (s_function is None):
s_function = prepare_s_function()
dx_ai = dxes[0][axis].astype(complex)
dx_bi = dxes[1][axis].astype(complex)
pos = np.hstack((0, dx_ai.cumsum()))
pos_a = ((pos[:(- 1)] + pos[1:]) / 2)
pos_b = pos[:(- 1)]
s_correction = (np.sqrt(epsilon_effective) * np.real(omega))
if (polarity > 0):
bound = pos[thickness]
d = (bound - pos[0])
def l_d(x):
return ((bound - x) / (bound - pos[0]))
slc = slice(thickness)
else:
bound = pos[((- thickness) - 1)]
d = (pos[(- 1)] - bound)
def l_d(x):
return ((x - bound) / (pos[(- 1)] - bound))
if (thickness == 0):
slc = slice(None)
else:
slc = slice((- thickness), None)
dx_ai[slc] *= (1 + (((1j * s_function(l_d(pos_a[slc]))) / d) / s_correction))
dx_bi[slc] *= (1 + (((1j * s_function(l_d(pos_b[slc]))) / d) / s_correction))
dxes[0][axis] = dx_ai
dxes[1][axis] = dx_bi
return dxes |
def get_default_frameworks():
frameworks = []
if is_torch_available():
frameworks.append('pt')
if is_tf_available():
frameworks.append('tf')
if is_flax_available():
frameworks.append('flax')
return frameworks |
class BaseMultiModalDataset(abc.ABC):
def feature_columns(self):
pass
def label_columns(self):
pass
def label_types(self):
raise NotImplementedError
def data(self):
pass
def metric(self):
pass
def problem_type(self):
pass |
def test_cartesian():
one = ak.Array(np.arange((((2 * 3) * 5) * 7)).reshape(2, 3, 5, 7).tolist())
two = ak.Array(np.arange((((2 * 3) * 5) * 7)).reshape(2, 3, 5, 7).tolist())
assert (str(ak.operations.cartesian([one, two], axis=0, nested=True).type) == '2 * 2 * (var * var * var * int64, var * var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=1, nested=True).type) == '2 * var * var * (var * var * int64, var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=2, nested=True).type) == '2 * var * var * var * (var * int64, var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=3, nested=True).type) == '2 * var * var * var * var * (int64, int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 1), nested=True).type) == '2 * var * var * var * var * (int64, int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 2), nested=True).type) == '2 * var * var * var * (var * int64, var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 3), nested=True).type) == '2 * var * var * (var * var * int64, var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 4), nested=True).type) == '2 * 2 * (var * var * var * int64, var * var * var * int64)')
with pytest.raises(ValueError):
ak.operations.cartesian([one, two], axis=(- 5), nested=True)
assert (str(ak.operations.cartesian([one, two], axis=0).type) == '4 * (var * var * var * int64, var * var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=1).type) == '2 * var * (var * var * int64, var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=2).type) == '2 * var * var * (var * int64, var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=3).type) == '2 * var * var * var * (int64, int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 1)).type) == '2 * var * var * var * (int64, int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 2)).type) == '2 * var * var * (var * int64, var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 3)).type) == '2 * var * (var * var * int64, var * var * int64)')
assert (str(ak.operations.cartesian([one, two], axis=(- 4)).type) == '4 * (var * var * var * int64, var * var * var * int64)')
with pytest.raises(ValueError):
ak.operations.cartesian([one, two], axis=(- 5)) |
class DistributionModelTestCase(unittest.TestCase):
def test_prediction_in_eval_should_be_consistent(self):
model = DistributionPredictionModel(input_size=10)
model.eval()
tensor = torch.randn(size=[10])
pred_1 = float(model(tensor))
pred_2 = float(model(tensor))
self.assertEqual(pred_1, pred_2)
def test_prediction_in_eval_should_be_inconsistent(self):
model = DistributionPredictionModel(input_size=10)
model.train()
tensor = torch.randn(size=[10])
pred_1 = float(model(tensor))
pred_2 = float(model(tensor))
self.assertNotEqual(pred_1, pred_2) |
def create_lvis_semantic_from_instance(instance_json, sem_seg_root):
os.makedirs(sem_seg_root, exist_ok=True)
lvis_detection = LVIS(instance_json)
def iter_annotations():
for img_id in lvis_detection.get_img_ids():
anns_ids = lvis_detection.get_ann_ids([img_id])
anns = lvis_detection.load_anns(anns_ids)
img = lvis_detection.load_imgs([img_id])[0]
file_name = os.path.splitext(img['file_name'])[0]
output = os.path.join(sem_seg_root, (file_name + '.npz'))
(yield (anns, output, img))
pool = mp.Pool(processes=max((mp.cpu_count() // 2), 4))
print('Start writing to {} ...'.format(sem_seg_root))
start = time.time()
pool.starmap(functools.partial(_process_instance_to_semantic), iter_annotations(), chunksize=100)
print('Finished. time: {:.2f}s'.format((time.time() - start))) |
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite |
class ProjectiveSpace_rational_field(ProjectiveSpace_field):
def rational_points(self, bound=0):
if (not (bound > 0)):
raise ValueError('argument bound (= %s) must be a positive integer')
n = self.dimension_relative()
Q = [(k - bound) for k in range(((2 * bound) + 1))]
R = [(k + 1) for k in range(bound)]
S = [Tuples(Q, (k + 1)) for k in range(n)]
pts = []
i = n
while (i > 0):
P = [0 for _ in range((n + 1))]
for ai in R:
P[i] = ai
for tup in S[(i - 1)]:
if (gcd(((ai,) + tup)) == 1):
for j in range(i):
P[j] = tup[j]
pts.append(self(P))
i -= 1
P = [0 for _ in range((n + 1))]
P[0] = 1
pts.append(self(P))
return pts |
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, add_bias_kv, add_zero_attn, export)
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) |
def test_get_function_description_nested():
module = astroid.parse('\ndef foo():\n def bar():\n return False\n yield 5')
description = get_function_description(get_function_node_from_ast(module, 'foo'))
assert (description.has_return is False)
assert (description.has_yield is True) |
.parametrize('csr_container', CSR_CONTAINERS)
def test_dbscan_precomputed_metric_with_initial_rows_zero(csr_container):
ar = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0], [0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1], [0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0]])
matrix = csr_container(ar)
labels = DBSCAN(eps=0.2, metric='precomputed', min_samples=2).fit(matrix).labels_
assert_array_equal(labels, [(- 1), (- 1), 0, 0, 0, 1, 1]) |
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ((~ ender) if (ender is not None) else None)
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = (self.not_ender is not None)
if check_ender:
try_not_ender = self.not_ender.tryParse
if check_ender:
try_not_ender(instring, loc)
(loc, tokens) = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = bool(self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
(loc, tmptokens) = self_expr_parse(instring, preloc, doActions)
if (tmptokens or tmptokens.haskeys()):
tokens += tmptokens
except (ParseException, IndexError):
pass
return (loc, tokens) |
class BaseDiscriminator(nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[(torch.Tensor, List[torch.Tensor])]:
raise NotImplemented() |
def openfile(filename, *args, **kwargs):
try:
return gzip.open((filename + '.gz'), *args, **kwargs)
except FileNotFoundError:
return open(filename, *args, **kwargs) |
('/macbert_correct', methods=['POST', 'GET'])
def correct_api():
if (request.method == 'POST'):
data = request.json
logger.info('Received data: {}'.format(data))
text = data['text']
r = macbert_model.correct(text)
return r
elif ('text' in request.args):
text = request.args.get('text')
logger.info('Received data: {}'.format(text))
r = macbert_model.correct(text)
return r
return help |
class EPOptRunner(BaseRunner):
def run(self, *, paths, epsilon):
multienvs = (self.env.num_envs > 1)
(n_mb_obs, n_mb_rewards, n_mb_actions, n_mb_values, n_mb_dones, n_mb_neglogpacs) = ([[] for _ in range(paths)], [[] for _ in range(paths)], [[] for _ in range(paths)], [[] for _ in range(paths)], [[] for _ in range(paths)], [[] for _ in range(paths)])
n_epinfos = [[] for _ in range(paths)]
mb_states = self.states
num_episodes = 0
for N in range(paths):
(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs, epinfos) = (n_mb_obs[N], n_mb_rewards[N], n_mb_actions[N], n_mb_values[N], n_mb_dones[N], n_mb_neglogpacs[N], n_epinfos[N])
for _ in range(self.env.venv.envs[0].spec.max_episode_steps):
(actions, values, self.states, neglogpacs) = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
(self.obs[:], rewards, self.dones, infos) = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
if self.dones:
break
episode_returns = [sum(r) for r in n_mb_rewards]
cutoff = np.percentile(episode_returns, (100 * epsilon))
(mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs) = ([], [], [], [], [], [])
epinfos = []
for N in range(paths):
if (episode_returns[N] <= cutoff):
num_episodes += 1
next_obs = n_mb_obs[N]
next_rewards = n_mb_rewards[N]
next_actions = n_mb_actions[N]
next_values = n_mb_values[N]
next_dones = n_mb_dones[N]
next_neglogpacs = n_mb_neglogpacs[N]
next_epinfos = n_epinfos[N]
mb_obs.extend(next_obs)
mb_rewards.extend(next_rewards)
mb_actions.extend(next_actions)
mb_values.extend(next_values)
mb_dones.extend(next_dones)
mb_neglogpacs.extend(next_neglogpacs)
epinfos.extend(next_epinfos)
total_steps = len(mb_rewards)
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(total_steps)):
if (t == (total_steps - 1)):
nextnonterminal = (1.0 - self.dones)
nextvalues = last_values
else:
nextnonterminal = (1.0 - mb_dones[(t + 1)])
nextvalues = mb_values[(t + 1)]
delta = ((mb_rewards[t] + ((self.gamma * nextvalues) * nextnonterminal)) - mb_values[t])
mb_advs[t] = lastgaelam = (delta + (((self.gamma * self.lam) * nextnonterminal) * lastgaelam))
mb_returns = (mb_advs + mb_values)
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)), mb_states, epinfos, num_episodes) |
class CrystalDiagramAutomorphism(CrystalMorphism):
def __init__(self, C, on_hw, index_set=None, automorphism=None, cache=True):
if (automorphism is None):
automorphism = (lambda i: i)
if (index_set is None):
index_set = ()
self._twist = automorphism
if isinstance(on_hw, dict):
self._on_hw = on_hw.__getitem__
else:
self._on_hw = on_hw
parent = Hom(C, C)
self._cache = {}
self._cache_result = bool(cache)
if isinstance(cache, dict):
self._cache = cache
self._index_set = tuple(index_set)
CrystalMorphism.__init__(self, parent, C.cartan_type())
def _call_(self, x):
if (x in self._cache):
return self._cache[x]
ind = self._index_set
cur = x
path = []
while (cur not in self._cache):
n = None
for i in ind:
n = cur.e(i)
if (n is not None):
path.append(self._twist(i))
cur = n
break
if (n is None):
break
if (cur in self._cache):
cur = self._cache[cur]
else:
cur = self._on_hw(cur)
y = cur.f_string(reversed(path))
assert (y is not None)
self._cache[x] = y
return y
def _repr_type(self):
return 'Diagram automorphism'
def is_isomorphism(self):
return True
is_surjective = is_isomorphism
is_embedding = is_isomorphism
is_strict = is_isomorphism
__bool__ = is_isomorphism |
def variable_on_cpu(name, shape, initializer, trainable=True):
with tf.device('/cpu:0'):
dtype = (tf.float16 if FLAGS.use_fp16 else tf.float32)
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
return var |
def evaluate(model, eval_iterator, do_mi=False, do_contrast_spearmanr=True, latent_space_type='plain', return_pred=False):
eval_contrastive_loss_total = 0
eval_same_label_loss_total = 0
model.eval()
num_eval_batch = 0
contrast_preds = []
contrast_targs = []
with torch.no_grad():
for (step, batch) in enumerate(eval_iterator):
input_ids = batch['input_ids'].to(model.device)
contrast_targets = batch['labels'].to(model.device)
model_outputs = model(input_ids, contrast_targets=contrast_targets, mask_similar_contrast_label=True, return_same_label_loss=True)
(contrastive_loss, contrastive_value) = (model_outputs[0], model_outputs[1])
same_label_loss = model_outputs[2]
eval_same_label_loss_total = (eval_same_label_loss_total + same_label_loss)
for (pred, target) in zip(contrastive_value.squeeze().cpu().numpy(), contrast_targets.cpu().numpy()):
contrast_targs.append(target)
contrast_preds.append(pred)
eval_contrastive_loss_total = (eval_contrastive_loss_total + contrastive_loss)
num_eval_batch += 1
eval_contrastive_loss = (eval_contrastive_loss_total / num_eval_batch)
eval_same_label_loss = (eval_same_label_loss_total / num_eval_batch)
eval_output = {'contrastive_loss': eval_contrastive_loss, 'same_label_loss': eval_same_label_loss}
if do_contrast_spearmanr:
spearmanr_value = spearmanr(contrast_targs, contrast_preds)
print('spearmanr_value: ', spearmanr_value)
eval_output['spearmanr'] = spearmanr_value
if return_pred:
eval_output['contrast_preds'] = contrast_preds
eval_output['contrast_targs'] = contrast_targs
return eval_output |
def make_index(data_path):
index = {'version': '1.0', 'clips': {}, 'metadata': {'BirdVoxDCASE20k_csvpublic': ['BirdVoxDCASE20k_csvpublic.csv', md5(os.path.join(data_path, 'BirdVoxDCASE20k_csvpublic.csv'))]}}
clips = glob.glob(os.path.join(data_path, '*.wav'))
for clip in tqdm(clips):
clip_id = os.path.basename(clip).replace('.wav', '')
index['clips'][clip_id] = {'audio': [os.path.join(os.path.basename(clip)), md5(clip)]}
with open(INDEX_PATH, 'w') as fhandle:
json.dump(index, fhandle, indent=2) |
def _random_package_name(filename):
return (((_CFG_PACKAGE_NAME + str(uuid.uuid4())[:4]) + '.') + os.path.basename(filename)) |
class ConvertBlock(nn.Module):
def __init__(self, in_channels, out_channels, blocks):
super(ConvertBlock, self).__init__()
self.body = nn.Sequential(nn.Conv2d((in_channels * blocks), ((out_channels * blocks) // 2), 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(((out_channels * blocks) // 2), ((out_channels * blocks) // 4), 3, 1, 1), nn.ReLU(inplace=True), nn.Conv2d(((out_channels * blocks) // 4), out_channels, 3, 1, 1))
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out |
def basic_unit(x, rate):
in_channels = x.shape[3]
x = slim.conv2d(x, in_channels, (1, 1), stride=1, scope='conv1x1_before')
x = separable_conv2d(x, kernel=3, stride=1, rate=rate, activation_fn=None, scope='depthwise')
x = slim.conv2d(x, in_channels, (1, 1), stride=1, scope='conv1x1_after')
return x |
def _calculate_mcd_f0(file_list, gt_root, f0_all, results):
for (i, cvt_wav_path) in enumerate(file_list):
basename = get_basename(cvt_wav_path)
(trgspk, number) = get_trgspk_and_number(basename)
f0min = f0_all[trgspk]['f0min']
f0max = f0_all[trgspk]['f0max']
gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav'))
(gt_wav, gt_fs) = librosa.load(gt_wav_path, sr=None)
(cvt_wav, _) = librosa.load(cvt_wav_path, sr=gt_fs)
(mcd, f0rmse, f0corr, ddur) = calculate_mcd_f0(cvt_wav, gt_wav, gt_fs, f0min, f0max)
results.append([basename, mcd, f0rmse, f0corr, ddur]) |
def test_benchmark_clone(benchmark_test_case):
cloned = benchmark_test_case.clone()
for i in range(BENCHMARK_REPETITIONS):
cloned = cloned.clone()
assert (cloned == benchmark_test_case) |
def initialize():
for i in range(n_particle_x):
for j in range(n_particle_y):
t = mesh(i, j)
x[t] = [(0.1 + ((i * dx) * 0.5)), (0.7 + ((j * dx) * 0.5))]
v[t] = [0, (- 1)]
for i in range((n_particle_x - 1)):
for j in range((n_particle_y - 1)):
eid = (((i * (n_particle_y - 1)) + j) * 2)
vertices[(eid, 0)] = mesh(i, j)
vertices[(eid, 1)] = mesh((i + 1), j)
vertices[(eid, 2)] = mesh(i, (j + 1))
eid = ((((i * (n_particle_y - 1)) + j) * 2) + 1)
vertices[(eid, 0)] = mesh(i, (j + 1))
vertices[(eid, 1)] = mesh((i + 1), (j + 1))
vertices[(eid, 2)] = mesh((i + 1), j)
for i in range(n_elements):
restT[i] = compute_T(i) |
def sentence_loader(root_dir):
for doc in DocumentLoader(root_dir):
for sent in doc.sentences:
(yield {'doc_name': doc.name, 'i': sent.i, 'words': sent.words, 'abs_char_offsets': sent.abs_char_offsets, 'pos_tags': sent.pos_tags, 'text': sent.text}) |
def bias_init_with_prob(prior_prob):
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init |
def test_clean_fuzzy_dist(df_typo_countries: pd.DataFrame) -> None:
df_clean_dist1 = clean_country(df_typo_countries, 'messy_country', fuzzy_dist=1)
df_clean_dist2 = clean_country(df_typo_countries, 'messy_country', fuzzy_dist=2)
df_check_dist1 = df_typo_countries.copy()
df_check_dist1['messy_country_clean'] = ['Canada', 'Canada', 'Australia', 'Antarctica', 'South Korea', np.nan, np.nan, 'Canada', 'Indonesia', 'DR Congo', np.nan, np.nan, np.nan]
df_check_dist2 = df_typo_countries.copy()
df_check_dist2['messy_country_clean'] = ['Canada', 'Canada', 'Australia', 'Antarctica', 'South Korea', 'Canada', 'Afghanistan', 'Canada', 'Indonesia', 'DR Congo', 'Greece', np.nan, np.nan]
assert df_clean_dist1.equals(df_check_dist1)
assert df_clean_dist2.equals(df_check_dist2) |
class NoiseScheduleVP():
def __init__(self, schedule='discrete', betas=None, alphas_cumprod=None, continuous_beta_0=0.1, continuous_beta_1=20.0):
if (schedule not in ['discrete', 'linear', 'cosine']):
raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule))
self.schedule = schedule
if (schedule == 'discrete'):
if (betas is not None):
log_alphas = (0.5 * torch.log((1 - betas)).cumsum(dim=0))
else:
assert (alphas_cumprod is not None)
log_alphas = (0.5 * torch.log(alphas_cumprod))
self.total_N = len(log_alphas)
self.T = 1.0
self.t_array = torch.linspace(0.0, 1.0, (self.total_N + 1))[1:].reshape((1, (- 1)))
self.log_alpha_array = log_alphas.reshape((1, (- 1)))
else:
self.total_N = 1000
self.beta_0 = continuous_beta_0
self.beta_1 = continuous_beta_1
self.cosine_s = 0.008
self.cosine_beta_max = 999.0
self.cosine_t_max = ((((math.atan(((self.cosine_beta_max * (1.0 + self.cosine_s)) / math.pi)) * 2.0) * (1.0 + self.cosine_s)) / math.pi) - self.cosine_s)
self.cosine_log_alpha_0 = math.log(math.cos((((self.cosine_s / (1.0 + self.cosine_s)) * math.pi) / 2.0)))
self.schedule = schedule
if (schedule == 'cosine'):
self.T = 0.9946
else:
self.T = 1.0
def marginal_log_mean_coeff(self, t):
if (self.schedule == 'discrete'):
return interpolate_fn(t.reshape(((- 1), 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((- 1))
elif (self.schedule == 'linear'):
return ((((- 0.25) * (t ** 2)) * (self.beta_1 - self.beta_0)) - ((0.5 * t) * self.beta_0))
elif (self.schedule == 'cosine'):
log_alpha_fn = (lambda s: torch.log(torch.cos(((((s + self.cosine_s) / (1.0 + self.cosine_s)) * math.pi) / 2.0))))
log_alpha_t = (log_alpha_fn(t) - self.cosine_log_alpha_0)
return log_alpha_t
def marginal_alpha(self, t):
return torch.exp(self.marginal_log_mean_coeff(t))
def marginal_std(self, t):
return torch.sqrt((1.0 - torch.exp((2.0 * self.marginal_log_mean_coeff(t)))))
def marginal_lambda(self, t):
log_mean_coeff = self.marginal_log_mean_coeff(t)
log_std = (0.5 * torch.log((1.0 - torch.exp((2.0 * log_mean_coeff)))))
return (log_mean_coeff - log_std)
def inverse_lambda(self, lamb):
if (self.schedule == 'linear'):
tmp = ((2.0 * (self.beta_1 - self.beta_0)) * torch.logaddexp(((- 2.0) * lamb), torch.zeros((1,)).to(lamb)))
Delta = ((self.beta_0 ** 2) + tmp)
return ((tmp / (torch.sqrt(Delta) + self.beta_0)) / (self.beta_1 - self.beta_0))
elif (self.schedule == 'discrete'):
log_alpha = ((- 0.5) * torch.logaddexp(torch.zeros((1,)).to(lamb.device), ((- 2.0) * lamb)))
t = interpolate_fn(log_alpha.reshape(((- 1), 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
return t.reshape(((- 1),))
else:
log_alpha = ((- 0.5) * torch.logaddexp(((- 2.0) * lamb), torch.zeros((1,)).to(lamb)))
t_fn = (lambda log_alpha_t: ((((torch.arccos(torch.exp((log_alpha_t + self.cosine_log_alpha_0))) * 2.0) * (1.0 + self.cosine_s)) / math.pi) - self.cosine_s))
t = t_fn(log_alpha)
return t |
def test_categorical_encoder(device):
from speechbrain.dataio.encoder import CategoricalEncoder
encoder = CategoricalEncoder()
encoder.expect_len(4)
encoder.update_from_iterable('abcd')
integers = encoder.encode_sequence('dcba')
assert all((isinstance(i, int) for i in integers))
assert encoder.is_continuous()
with pytest.raises(KeyError):
encoder.add_label('a')
encoder.ensure_label('a')
with pytest.raises(KeyError):
encoder.insert_label('a', (- 3))
encoder.enforce_label('a', (- 3))
assert (encoder.encode_label('a') == (- 3))
assert (not encoder.is_continuous())
import torch
encoder = CategoricalEncoder()
encoder.expect_len(4)
encoder.update_from_iterable('abcd')
result = encoder.decode_torch(torch.tensor([[0, 0], [1, 1], [2, 2], [3, 3]], device=device))
assert (result == [['a', 'a'], ['b', 'b'], ['c', 'c'], ['d', 'd']])
result = encoder.decode_ndim([[0, 0], [1, 1], [2, 2], [3, 3]])
assert (result == [['a', 'a'], ['b', 'b'], ['c', 'c'], ['d', 'd']])
result = encoder.decode_ndim(torch.tensor([[0, 0], [1, 1], [2, 2], [3, 3]]))
assert (result == [['a', 'a'], ['b', 'b'], ['c', 'c'], ['d', 'd']])
result = encoder.decode_ndim([[[[[0, 0], [1, 1], [2, 2], [3, 3]]]]])
assert (result == [[[[['a', 'a'], ['b', 'b'], ['c', 'c'], ['d', 'd']]]]])
result = encoder.decode_torch(torch.tensor([[[[[0, 0], [1, 1], [2, 2], [3, 3]]]]], device=device))
assert (result == [[[[['a', 'a'], ['b', 'b'], ['c', 'c'], ['d', 'd']]]]])
result = encoder.decode_ndim([[0, 0], [1], [2, 2, 2], []])
assert (result == [['a', 'a'], ['b'], ['c', 'c', 'c'], []])
encoder = CategoricalEncoder()
encoder.expect_len(3)
encoder.limited_labelset_from_iterable('aabbbcccd', n_most_common=3)
encoder.encode_sequence('abc')
with pytest.raises(KeyError):
encoder.encode_label('d')
encoder = CategoricalEncoder()
encoder.expect_len(2)
encoder.limited_labelset_from_iterable('aabbbcccd', min_count=3)
encoder.encode_sequence('cbcb')
with pytest.raises(KeyError):
encoder.encode_label('a')
with pytest.raises(KeyError):
encoder.encode_label('d')
encoder = CategoricalEncoder()
encoder.expect_len(2)
encoder.limited_labelset_from_iterable('aabbbcccd', n_most_common=3, min_count=3)
encoder.encode_sequence('cbcb')
with pytest.raises(KeyError):
encoder.encode_label('a')
with pytest.raises(KeyError):
encoder.encode_label('d')
encoder = CategoricalEncoder(unk_label='<unk>')
encoder.expect_len(4)
encoder.update_from_iterable('abc')
assert (encoder.encode_label('a') == 1)
assert (encoder.encode_label('d') == 0)
assert (encoder.decode_ndim(encoder.encode_label('d')) == '<unk>') |
def build_ftrl(model, engine='SIMD', **kwargs):
if (engine == 'SIMD'):
assert core.IsOperator('Ftrl_ENGINE_SIMD')
assert core.IsOperator('SparseFtrl_ENGINE_SIMD')
ftrl_optimizer = FtrlOptimizer(engine=engine, **kwargs)
return _build(model, ftrl_optimizer) |
def load_graph(file_name):
with open(file_name, 'rb') as f:
content = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(content)
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
return graph |
def add_edge(G, center_feature):
num = center_feature.shape[0]
for i in range(num):
for j in range((i + 1), num):
distance = get_distance(G._node[i]['coordinate'], G._node[j]['coordinate'])
G.add_edge(i, j, weight=distance)
return G |
class CreateDefaultMaterials(bpy.types.Operator):
bl_idname = 'object.create_default_mats'
bl_label = 'Create Default Materials'
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if (bpy.data.materials.get('ClothMaterial') is None):
mat_name = 'ClothMaterial'
mat = bpy.data.materials.new(mat_name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
diffnode = nodes['Diffuse BSDF']
checkernode = nodes.new('ShaderNodeTexChecker')
uvmapnode = nodes.new('ShaderNodeUVMap')
diffnode.location = (100, 300)
checkernode.location = ((- 100), 300)
uvmapnode.location = ((- 300), 300)
checkernode.inputs[1].default_value = (0.456, 0.386, 0.15, 1)
checkernode.inputs[2].default_value = (0.08, 0, 0, 1)
links = mat.node_tree.links
links.new(checkernode.outputs[0], diffnode.inputs[0])
links.new(uvmapnode.outputs[0], checkernode.inputs[0])
if (bpy.data.materials.get('CheckerGreyscale') is None):
mat_name = 'CheckerGreyscale'
mat = bpy.data.materials.new(mat_name)
mat.use_nodes = True
nodes = mat.node_tree.nodes
diffnode = nodes['Diffuse BSDF']
checkernode = nodes.new('ShaderNodeTexChecker')
uvmapnode = nodes.new('ShaderNodeUVMap')
diffnode.location = (100, 300)
checkernode.location = ((- 100), 300)
uvmapnode.location = ((- 300), 300)
checkernode.inputs[1].default_value = (0.0, 0.0, 0.0, 1)
checkernode.inputs[2].default_value = (0.141, 0.133, 0.13, 1)
links = mat.node_tree.links
links.new(checkernode.outputs[0], diffnode.inputs[0])
links.new(uvmapnode.outputs[0], checkernode.inputs[0])
if (bpy.data.materials.get('CubeMaterial') is None):
mat_name2 = 'CubeMaterial'
mat2 = bpy.data.materials.new(mat_name2)
mat2.use_nodes = True
nodes2 = mat2.node_tree.nodes
diffnode2 = nodes2['Diffuse BSDF']
diffnode2.inputs[0].default_value = (0.198, 0.371, 0.694, 1)
return {'FINISHED'} |
class MeshElementFieldProxy():
def __init__(self, mesh: MeshInstance, element_type: MeshElementType, entry_expr: impl.Expr):
ast_builder = impl.get_runtime().compiling_callable.ast_builder()
self.mesh = mesh
self.element_type = element_type
self.entry_expr = entry_expr
element_field = self.mesh.fields[self.element_type]
for (key, attr) in element_field.field_dict.items():
global_entry_expr = impl.Expr(ast_builder.mesh_index_conversion(self.mesh.mesh_ptr, element_type, entry_expr, (ConvType.l2r if element_field.attr_dict[key].reorder else ConvType.l2g), _ti_core.DebugInfo(impl.get_runtime().get_current_src_info())))
global_entry_expr_group = impl.make_expr_group(*tuple([global_entry_expr]))
if isinstance(attr, MatrixField):
setattr(self, key, impl.Expr(ast_builder.expr_subscript(attr.ptr, global_entry_expr_group, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()))))
elif isinstance(attr, StructField):
raise RuntimeError('MeshTaichi has not support StructField yet')
else:
var = attr._get_field_members()[0].ptr
setattr(self, key, impl.Expr(ast_builder.expr_subscript(var, global_entry_expr_group, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info()))))
for element_type in {MeshElementType.Vertex, MeshElementType.Edge, MeshElementType.Face, MeshElementType.Cell}:
setattr(self, element_type_name(element_type), impl.mesh_relation_access(self.mesh, self, element_type))
def ptr(self):
return self.entry_expr
def id(self):
ast_builder = impl.get_runtime().compiling_callable.ast_builder()
l2g_expr = impl.Expr(ast_builder.mesh_index_conversion(self.mesh.mesh_ptr, self.element_type, self.entry_expr, ConvType.l2g, _ti_core.DebugInfo(impl.get_runtime().get_current_src_info())))
return l2g_expr |
_module()
class BerHuLoss(nn.Module):
def __init__(self, loss_name, loss_weight):
super(BerHuLoss, self).__init__()
def forward(self, pred, label, is_vector=None):
if (not is_vector):
(n, c, h, w) = pred.size()
assert (c == 1)
pred = pred.squeeze()
label = label.squeeze()
adiff = torch.abs((pred - label))
batch_max = (0.2 * torch.max(adiff).item())
t1_mask = adiff.le(batch_max).float()
t2_mask = adiff.gt(batch_max).float()
t1 = (adiff * t1_mask)
t2 = (((adiff * adiff) + (batch_max * batch_max)) / (2 * batch_max))
t2 = (t2 * t2_mask)
return ((torch.sum(t1) + torch.sum(t2)) / torch.numel(pred.data)) |
def get_loader(img_root, gt_root, img_size, batch_size, max_num=float('inf'), istrain=True, shuffle=False, num_workers=0, pin=False):
if istrain:
transform = Compose([RandomScaleCrop((img_size * 2), (img_size * 2)), FixedResize(img_size), RandomHorizontalFlip(), RandomRotation(((- 90), 90)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
else:
transform = Compose([FixedResize(img_size), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset = CoData(img_root, gt_root, img_size, transform, max_num, is_train=istrain)
data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=pin)
return data_loader |
class SentenceAnnotation(object):
def __init__(self, text):
self.text = text
self.tokens = []
self.postags = []
self.nltkpostags = []
self.nltklemmas = []
self.foundpos = False
self.stindices = {}
self.enindices = {}
def add_token(self, startend):
(st, en) = startend
st = int(st)
en = int(en)
self.stindices[st] = len(self.tokens)
self.enindices[en] = len(self.tokens)
def normalize_tokens(self, logger):
if (len(self.stindices) != len(self.enindices)):
logger.write('\t\tIssue: overlapping tokenization for multiple tokens\n')
return
start = {}
idx = 0
for s in sorted(self.stindices):
self.stindices[s] = idx
start[idx] = s
idx += 1
end = {}
idx = 0
for t in sorted(self.enindices):
self.enindices[t] = idx
end[idx] = t
if ((idx > 0) and (end[(idx - 1)] > start[idx])):
logger.write('\t\tIssue: overlapping tokenization of neighboring tokens\n')
return
token = self.text[start[idx]:(t + 1)].strip()
if (' ' in token):
logger.write((('\t\tIssue: incorrect tokenization ' + token) + '\n'))
return
if (token == ''):
continue
self.tokens.append(token)
idx += 1
try:
self.nltkpostags = [ele[1] for ele in nltk.pos_tag(self.tokens)]
for idx in range(len(self.tokens)):
tok = self.tokens[idx]
if self.nltkpostags[idx].startswith('V'):
self.nltklemmas.append(lemmatizer.lemmatize(tok, pos='v'))
else:
self.nltklemmas.append(lemmatizer.lemmatize(tok))
except IndexError:
print(self.tokens)
print(nltk.pos_tag(self.tokens))
return True
def get_tokens_by_offset(self, startend):
(st, en) = startend
st = int(st)
en = int(en)
if ((st not in self.stindices) or (en not in self.enindices)):
raise Exception('\t\tBug: broken tokenization', st, en)
return (self.stindices[st], self.enindices[en])
def add_postag(self, postag):
self.foundpos = True
self.postags.append(postag)
def size(self):
return len(self.tokens)
def info_at_idx(self, idx):
if (len(self.tokens) <= idx):
raise Exception('\t\tBug: invalid index', idx)
if (len(self.postags) <= idx):
postag = EMPTY_LABEL
else:
postag = self.postags[idx]
return (self.tokens[idx], postag, self.nltkpostags[idx], self.nltklemmas[idx]) |
def expect_quitall(verbose=False):
for P in expect_objects:
R = P()
if (R is not None):
try:
R.quit(verbose=verbose)
except RuntimeError:
pass
kill_spawned_jobs() |
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if (v.lower() in ('yes', 'true', 't', 'y', '1')):
return True
elif (v.lower() in ('no', 'false', 'f', 'n', '0')):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument('-n', '--name', type=str, const=True, default='', nargs='?', help='postfix for logdir')
parser.add_argument('-r', '--resume', type=str, const=True, default='', nargs='?', help='resume from logdir or checkpoint in logdir')
parser.add_argument('-b', '--base', nargs='*', metavar='base_config.yaml', help='paths to base configs. Loaded from left-to-right. Parameters can be overwritten or added with command-line options of the form `--key value`.', default=list())
parser.add_argument('-t', '--train', type=str2bool, const=True, default=False, nargs='?', help='train')
parser.add_argument('--no-test', type=str2bool, const=True, default=False, nargs='?', help='disable test')
parser.add_argument('-p', '--project', help='name of new or path to existing project')
parser.add_argument('-d', '--debug', type=str2bool, nargs='?', const=True, default=False, help='enable post-mortem debugging')
parser.add_argument('-s', '--seed', type=int, default=23, help='seed for seed_everything')
parser.add_argument('-f', '--postfix', type=str, default='', help='post-postfix for default name')
parser.add_argument('-l', '--logdir', type=str, default='logs', help='directory for logging')
return parser |
.core
.parametrize('borders', [{'beta': [1, 2]}, {'lambda_': [1, 2]}])
def test_partial_borders(borders):
model = SLIM()
res = model._prepare_param_borders(borders)
assert (len(res) == len(model._search_space)) |
def convert_checkpoint_helper(max_position_embeddings, orig_state_dict):
for key in orig_state_dict.copy().keys():
val = orig_state_dict.pop(key)
if (('pooler' in key) or ('sen_class' in key)):
continue
else:
orig_state_dict[rename_key(key)] = val
orig_state_dict['cls.predictions.bias'] = orig_state_dict['cls.predictions.decoder.bias']
orig_state_dict['yoso.embeddings.position_ids'] = (torch.arange(max_position_embeddings).expand((1, (- 1))) + 2)
return orig_state_dict |
class DetectionEvalWrapper(nn.Module):
def __init__(self, model, device):
super(DetectionEvalWrapper, self).__init__()
self.model = model
self.device = device
self.anchor_boxes = Anchors(cfg.MIN_LEVEL, cfg.MAX_LEVEL, cfg.NUM_SCALES, cfg.ASPECT_RATIOS, cfg.ANCHOR_SCALE, cfg.MODEL.IMAGE_SIZE, device).boxes
self.model.eval()
def forward(self, image_paths):
(x, img_sizes, img_scales) = preprocess(image_paths)
(cls_outs, box_outs) = self.model(x.to(self.device))
(cls_outs, box_outs, indices, classes) = postprocess(cls_outs, box_outs)
batch_detections = generate_detections(cls_outs, box_outs, self.anchor_boxes, indices, classes, img_sizes, img_scales, cfg.MAX_DETECTIONS_PER_IMAGE)
return batch_detections |
def update(G, B, h):
R = h.parent()
C = set(((h, g) for g in G))
D = set()
while C:
(h, g) = C.pop()
lcm_divides = (lambda rhs: R.monomial_divides(LCM(LM(h), LM(rhs[1])), LCM(LM(h), LM(g))))
if (R.monomial_pairwise_prime(LM(h), LM(g)) or ((not any((lcm_divides(f) for f in C))) and (not any((lcm_divides(f) for f in D))))):
D.add((h, g))
E = set()
while D:
(h, g) = D.pop()
if (not R.monomial_pairwise_prime(LM(h), LM(g))):
E.add((h, g))
B_new = set()
while B:
(g1, g2) = B.pop()
if ((not R.monomial_divides(LM(h), LCM(LM(g1), LM(g2)))) or (R.monomial_lcm(LM(g1), LM(h)) == LCM(LM(g1), LM(g2))) or (R.monomial_lcm(LM(h), LM(g2)) == LCM(LM(g1), LM(g2)))):
B_new.add((g1, g2))
B_new = B_new.union(E)
G_new = set()
while G:
g = G.pop()
if (not R.monomial_divides(LM(h), LM(g))):
G_new.add(g)
G_new.add(h)
return (G_new, B_new) |
def register_Ns3DsrNetworkKey_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_constructor([])
cls.add_constructor([param('ns3::dsr::NetworkKey const &', 'arg0')])
cls.add_instance_attribute('m_ackId', 'uint16_t', is_const=False)
cls.add_instance_attribute('m_destination', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('m_nextHop', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('m_ourAdd', 'ns3::Ipv4Address', is_const=False)
cls.add_instance_attribute('m_source', 'ns3::Ipv4Address', is_const=False)
return |
def save_epoch_accuracy(tb, set, iou, miou, epoch):
for i in range(NUM_CLASSES):
tb.add_scalar(('%sAccuracy/%s class accuracy' % (set, trainId2label[i].name)), iou[i], epoch)
tb.add_scalar(('%sAccuracy/Accuracy History [mIoU]' % set), miou, epoch) |
class CyclicPermutationsOfPartition(Permutations):
def __classcall_private__(cls, partition):
partition = tuple(map(tuple, partition))
return super().__classcall__(cls, partition)
def __init__(self, partition):
self.partition = partition
Permutations.__init__(self, category=FiniteEnumeratedSets())
class Element(ClonableArray):
def check(self):
if ([sorted(_) for _ in self] != [sorted(_) for _ in self.parent().partition]):
raise ValueError(('Invalid cyclic permutation of the partition' % self.parent().partition))
def _repr_(self):
return 'Cyclic permutations of partition {}'.format([list(_) for _ in self.partition])
def __iter__(self, distinct=False):
if (len(self.partition) == 1):
for i in CyclicPermutations(self.partition[0]).iterator(distinct=distinct):
(yield self.element_class(self, [i], check=False))
else:
for right in CyclicPermutationsOfPartition(self.partition[1:]).iterator(distinct=distinct):
for perm in CyclicPermutations(self.partition[0]).iterator(distinct=distinct):
(yield self.element_class(self, ([perm] + list(right)), check=False))
iterator = __iter__
def list(self, distinct=False):
return list(self.iterator(distinct=distinct)) |
def test_isotonic_non_regression_inf_slope():
X = np.array([0.0, 4.1e-320, 4.4e-314, 1.0])
y = np.array([0.42, 0.42, 0.44, 0.44])
ireg = IsotonicRegression().fit(X, y)
y_pred = ireg.predict(np.array([0, 2.1e-319, 5.4e-316, 1e-10]))
assert np.all(np.isfinite(y_pred)) |
class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast):
def outputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_outputs = super(OnnxConfigWithPast, self).outputs
for (name, axes_names) in common_outputs.items():
sequence_name = ('encoder_sequence' if ('encoder' in name) else 'decoder_sequence')
for (axis_idx, name) in axes_names.items():
if ('sequence' in name):
axes_names[axis_idx] = sequence_name
else:
axes_names[axis_idx] = name
if self.use_past:
self.fill_with_past_key_values_(common_outputs, direction='outputs')
return common_outputs
def num_layers(self) -> Tuple[int]:
try:
num_layers = super().num_layers
num_layers = (num_layers, num_layers)
except AttributeError:
if (hasattr(self._config, 'encoder_layers') and hasattr(self._config, 'decoder_layers')):
num_layers = (self._config.encoder_layers, self._config.decoder_layers)
else:
raise AttributeError('could not find the number of encoder and decoder layers attributes in the model configuration, override the num_layers property of the model OnnxConfig to solve this')
return num_layers
def num_attention_heads(self) -> Tuple[int]:
try:
num_attention_heads = super().num_attention_heads
num_attention_heads = (num_attention_heads, num_attention_heads)
except AttributeError:
if (hasattr(self._config, 'encoder_attention_heads') and hasattr(self._config, 'decoder_attention_heads')):
num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads)
else:
raise AttributeError('could not find the number of attention heads for the encoder and the decoder attributes in the model configuration, override the num_attention_heads property of the model OnnxConfig to solve this')
return num_attention_heads
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
decoder_seq_length = (seq_length if (not self.use_past) else 1)
decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size, decoder_seq_length, is_pair, framework)
decoder_inputs = {f'decoder_{name}': tensor for (name, tensor) in decoder_inputs.items()}
common_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
batch = common_inputs['input_ids'].shape[0]
encoder_seq_length = common_inputs['input_ids'].shape[1]
decoder_seq_length = common_inputs['decoder_input_ids'].shape[1]
(num_encoder_attention_heads, num_decoder_attention_heads) = self.num_attention_heads
encoder_shape = (batch, num_encoder_attention_heads, encoder_seq_length, (self._config.hidden_size // num_encoder_attention_heads))
decoder_shape = (batch, num_decoder_attention_heads, (decoder_seq_length + 3), (self._config.hidden_size // num_decoder_attention_heads))
common_inputs['past_key_values'] = []
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
for _ in range(min_num_layers):
common_inputs['past_key_values'].append((torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape)))
shape = (encoder_shape if (remaining_side_name == 'encoder') else decoder_shape)
for _ in range(min_num_layers, max_num_layers):
common_inputs['past_key_values'].append((torch.zeros(shape), torch.zeros(shape)))
return common_inputs
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[(str, Mapping[(int, str)])], direction: str):
if (direction not in ['inputs', 'outputs']):
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = ('past_key_values' if (direction == 'inputs') else 'present')
(num_encoder_layers, num_decoder_layers) = self.num_layers
min_num_layers = min(num_encoder_layers, num_decoder_layers)
max_num_layers = (max(num_encoder_layers, num_decoder_layers) - min_num_layers)
remaining_side_name = ('encoder' if (num_encoder_layers > num_decoder_layers) else 'decoder')
encoder_sequence = 'past_encoder_sequence'
decoder_sequence = ('past_decoder_sequence' if (direction == 'inputs') else 'past_decoder_sequence + sequence')
for i in range(min_num_layers):
inputs_or_outputs[f'{name}.{i}.decoder.key'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.decoder.value'] = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.key'] = {0: 'batch', 2: encoder_sequence}
inputs_or_outputs[f'{name}.{i}.encoder.value'] = {0: 'batch', 2: encoder_sequence}
for i in range(min_num_layers, max_num_layers):
if (remaining_side_name == 'encoder'):
axes_info = {0: 'batch', 2: encoder_sequence}
else:
axes_info = {0: 'batch', 2: decoder_sequence}
inputs_or_outputs[f'{name}.{i}.{remaining_side_name}.key'] = axes_info
def _flatten_past_key_values_(self, flattened_output, name, idx, t):
flattened_output[f'{name}.{idx}.decoder.key'] = t[0]
flattened_output[f'{name}.{idx}.decoder.value'] = t[1]
flattened_output[f'{name}.{idx}.encoder.key'] = t[2]
flattened_output[f'{name}.{idx}.encoder.value'] = t[3] |
def pad_batch(batch, padding=(- 1)):
max_len = max([len(b) for b in batch])
new_batch = []
for b in batch:
b_ = (np.zeros(max_len, dtype=b.dtype) + padding)
b_[:len(b)] = b
new_batch.append(b_)
return new_batch |
(spacepy.lib.have_libspacepy, 'No C backend')
class BootstrapTestsPython(BootstrapTests):
def setUp(self):
spacepy.lib.have_libspacepy = False
super(BootstrapTestsPython, self).setUp()
def tearDown(self):
super(BootstrapTestsPython, self).tearDown()
spacepy.lib.have_libspacepy = True |
def vae_loss_mse(y_hat, target, mu, logvar, *, kld_prefactor=1.0):
recon_loss = torch.nn.functional.mse_loss(y_hat, target, reduction='mean')
KLD = (((- 0.5) * torch.sum((((1 + logvar) - mu.pow(2)) - logvar.exp()))) / y_hat.shape[0])
return (recon_loss + (kld_prefactor * KLD)) |
def assert_close(actual: Any, expected: Any, *, allow_subclasses: bool=True, rtol: Optional[float]=None, atol: Optional[float]=None, equal_nan: bool=False, check_device: bool=True, check_dtype: bool=True, check_stride: bool=False, check_is_coalesced: bool=True, msg: Optional[Union[(str, Callable[([Tensor, Tensor, Diagnostics], str)])]]=None) -> None:
__tracebackhide__ = True
if ((rtol is None) ^ (atol is None)):
raise ValueError(f"Both 'rtol' and 'atol' must be either specified or omitted, but got no {('rtol' if (rtol is None) else 'atol')}.")
(error_meta, pair) = _parse_inputs(actual, expected, allow_subclasses=allow_subclasses)
if error_meta:
raise error_meta.to_error()
else:
pair = cast(Union[(_TensorPair, List, Dict)], pair)
error_meta = _check_pair_close(pair, rtol=rtol, atol=atol, equal_nan=equal_nan, check_device=check_device, check_dtype=check_dtype, check_stride=check_stride, check_is_coalesced=check_is_coalesced, msg=msg)
if error_meta:
raise error_meta.to_error() |
class StructField(object):
def __init__(self, parent, member, type_map, args):
self.args = args
self.parent = parent
self.struct = parent.struct
self.member = member
self.lcm_name = member.name
self.proto_name = member.name.lower()
self.repeated = isinstance(member, ArrayMember)
self.field_id = member.field_id
self.type_ref = member.type_ref
self._type_map = type_map
def proto_type_declaration(self):
if ((self.type_ref.package_name is None) and (self.type_ref.name == 'byte') and self.repeated):
return 'bytes'
if ((self.type_ref.package_name is None) and (self.type_ref.name in PRIMITIVE_MAP)):
primitive_info = PRIMITIVE_MAP[self.type_ref.name]
if (primitive_info.short_int_warning and (not self.struct.get_notation_property('#protobuf', 'allow_short_ints'))):
raise TypeError('Using type {} is not allowed by default. Use #protobuf{{allow_short_ints = true}} to suppress this error.'.format(self.type_ref.name))
name = primitive_info.proto_decl
elif self.referenced_type:
name = self.referenced_type.proto_reference_name
else:
raise KeyError('Missing referenced type: {}'.format(self.type_ref))
return ('repeated {}'.format(name) if self.repeated else name)
def get_type(self):
if (self.type_ref.package_name is None):
return PRIMITIVE_MAP[self.type_ref.name]
return self.referenced_type
def referenced_type(self):
full_type_name = '{}.{}'.format(self.type_ref.package_name, self.type_ref.name)
return self._type_map.get(full_type_name)
def pb_to_lcm(self):
try:
if (self.field_id is None):
array_field = self.parent.array_for_dim(self.lcm_name)
if (array_field.type_ref.name == 'byte'):
return 'out.{} = in.{}().size();'.format(self.lcm_name, array_field.proto_name)
return 'out.{} = in.{}_size();'.format(self.lcm_name, array_field.proto_name)
field_type = self.get_type()
if (not self.repeated):
in_expression = field_type.convert_pb_to_lcm('in.{}()'.format(self.proto_name))
return 'out.{} = {};'.format(self.lcm_name, in_expression)
in_expression = field_type.convert_pb_to_lcm('in.{}(i)'.format(self.proto_name))
if (self.type_ref.name == 'byte'):
in_expression = 'in.{}()'.format(self.proto_name)
return 'out.{} = std::vector<uint8_t>({in_expr}.begin(), {in_expr}.end());'.format(self.lcm_name, in_expr=in_expression)
dim = self.struct.member_map[self.lcm_name].dims[0]
var_max_expression = 'in.{}_size()'.format(self.proto_name)
if (dim.size_int is None):
return 'for (int i = 0; i < {}; i++) {{\n out.{}.push_back({});\n}}'.format(var_max_expression, self.lcm_name, in_expression)
main_loop = 'for (int i = 0; i < {} && i < {}; i++) {{\n out.{}[i] = {};\n}}'.format(dim.size_int, var_max_expression, self.lcm_name, in_expression)
fill_loop = 'for (int i = {}; i < {}; i++) {{\n out.{}[i] = {};\n}}'.format(var_max_expression, dim.size_int, self.lcm_name, field_type.default_lcm_value)
if (self.type_ref.name == 'string'):
return main_loop
return '{}\n{}'.format(main_loop, fill_loop)
except Exception as e:
print(e)
raise
def lcm_to_pb(self):
if (self.field_id is None):
array_field = self.parent.array_for_dim(self.lcm_name)
return '// skip {} (size of {})'.format(self.lcm_name, array_field.lcm_name)
field_type = self.get_type()
if (not self.repeated):
in_expression = 'in.{}'.format(self.lcm_name)
return '{};'.format(field_type.single_lcm_to_pb(self.proto_name, in_expression))
dim = self.struct.member_map[self.lcm_name].dims[0]
if (self.type_ref.name == 'byte'):
in_expression = 'in.{}'.format(self.lcm_name)
if dim.auto_member:
return 'out->set_{}(std::string({in_expr}.begin(), {in_expr}.end()));'.format(self.proto_name, in_expr=in_expression)
return 'out->set_{}(std::string({expr}.begin(), {expr}.begin() + in.{dim}));'.format(self.proto_name, expr=in_expression, dim=dim.size_str)
max_expression = ('in.{}'.format(dim.size_str) if (dim.size_int is None) else dim.size_int)
if dim.auto_member:
max_expression = 'in.{}.size()'.format(self.lcm_name)
add_statement = field_type.add_lcm_to_pb(self.proto_name, 'in.{}[i]'.format(self.lcm_name))
return 'for (int i = 0; i < {}; i++) {{\n {};\n}}'.format(max_expression, add_statement) |
class MapDatasetBase(object):
def __init__(self, data_types=None):
self.data_types = (data_types or {})
def __len__(self):
raise NotImplementedError
def __getitem__(self, seq_idx):
raise NotImplementedError
def get_seq_len(self, seq_idx):
raise OptionalNotImplementedError
def get_seq_tag(self, seq_idx):
return ('seq-%i' % seq_idx)
def get_seq_order(self, epoch=None):
raise OptionalNotImplementedError |
def test_ignore_between():
for what in ['null', 'true', '2', '2.2', '[]', '[2]', '[2, 2.2]', '{}', '{"z": 2.2}', '{"z": []}', '{"z": [2]}', '{"z": [2, 2.2]}']:
array = ak.from_json((('[{"x": 1, "y": ' + what) + ', "z": true}, {"x": 3, "z": false}]'), schema={'type': 'array', 'items': {'type': 'object', 'properties': {'z': {'type': 'boolean'}, 'x': {'type': 'integer'}}, 'required': ['z', 'x']}})
assert (array.to_list() == [{'x': 1, 'z': True}, {'x': 3, 'z': False}])
assert (str(array.type) == '2 * {z: bool, x: int64}') |
def valid(args, model, data_loader):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = misc.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
print('++++++ Running Validation ++++++')
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[(- 1)]
images = images.to(args.device, non_blocking=True)
target = target.to(args.device, non_blocking=True)
with torch.no_grad():
output = model(images)
loss = criterion(output, target)
(acc1, _) = accuracy(output, target, topk=(1, 2))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
print('* {top1.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
def ref_max_pooling_3d(x, kernel, stride, ignore_border, pad):
y = []
for xx in x.reshape((((- 1),) + x.shape[(- 4):])):
if (xx.ndim == 3):
xx = xx[np.newaxis]
y += [refs.pooling_3d(xx, 'max', kernel, stride, pad, ignore_border)[np.newaxis]]
y = np.vstack(y)
if (x.ndim == 3):
y = np.squeeze(y, 1)
return y.reshape((x.shape[:(- 4)] + y.shape[1:])) |
def U_6(params, wires):
qml.RX(params[0], wires=wires[0])
qml.RX(params[1], wires=wires[1])
qml.RZ(params[2], wires=wires[0])
qml.RZ(params[3], wires=wires[1])
qml.CRX(params[4], wires=[wires[1], wires[0]])
qml.CRX(params[5], wires=[wires[0], wires[1]])
qml.RX(params[6], wires=wires[0])
qml.RX(params[7], wires=wires[1])
qml.RZ(params[8], wires=wires[0])
qml.RZ(params[9], wires=wires[1]) |
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init) |
def test_StaticDataset_utf8():
s = 'wer'
print('some unicode str:', s, 'repr:', repr(s), 'type:', type(s), 'len:', len(s))
assert (len(s) == 3)
if PY3:
assert isinstance(s, str)
s_byte_list = list(s.encode('utf8'))
else:
assert isinstance(s, unicode)
s_byte_list = list(map(ord, s.encode('utf8')))
print('utf8 byte list:', s_byte_list)
assert (len(s_byte_list) == 4 > 3)
raw = numpy.array(s_byte_list, dtype='uint8')
assert_equal(raw.tolist(), [119, 195, 171, 114])
data = StaticDataset([{'data': raw}], output_dim={'data': (255, 1)})
if ('data' not in data.labels):
data.labels['data'] = [chr(i) for i in range(255)]
data.init_seq_order(epoch=1)
data.load_seqs(0, 1)
raw_ = data.get_data(seq_idx=0, key='data')
assert_equal(raw.tolist(), raw_.tolist())
assert data.can_serialize_data(key='data')
s_serialized = data.serialize_data(key='data', data=raw)
print('serialized:', s_serialized, 'repr:', repr(s_serialized), 'type:', type(s_serialized))
assert_equal(s, s_serialized) |
_builder('violin_entailment_instruct')
class ViolinEntailmentInstructBuilder(BaseDatasetBuilder):
train_dataset_cls = ViolinVideoEntailmentInstructDataset
eval_dataset_cls = ViolinVideoEntailmentInstructDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/violin/defaults_entail_instruct.yaml'} |
class Cache():
def __init__(self, enabled=True, gpu=False):
self.cache = {}
self._mutex = Lock()
self.enabled = enabled
self.gpu = gpu
def get(self, fun, args):
if (not self.enabled):
return fun(*args)
self._mutex.acquire(blocking=True)
result = self.cache.get((fun, args), None)
status = 'hit'
if (result is None):
status = 'miss'
result = fun(*args)
if self.gpu:
if isinstance(result, (tuple, list)):
result = [x.to(GPU) for x in result]
else:
result = result.to(GPU)
self.cache[(fun, args)] = result
self._mutex.release()
return result
def clear(self):
self._mutex.acquire(blocking=True)
self.cache = {}
self._mutex.release() |
class Categorical(D.Categorical, Likelihood):
def __prior__(cls):
return E.Dirichlet
def from_model_params(cls, x):
return cls(x.softmax((- 1)))
def mean(self):
return self.logits.argmax((- 1))
def sufficient_statistic_mean(self):
return self.probs
def to(self, *args, **kwargs):
if ('probs' in self.__dict__):
self.probs = self.probs.to(*args, **kwargs)
else:
self.logits = self.logits.to(*args, **kwargs)
return self |
class Conv2DTransposeBNFoldingTest(BaseBatchNormalizationFolding):
def __init__(self, unit_test):
super().__init__(unit_test, linear_layer=layers.Conv2DTranspose)
def create_networks(self):
inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
x = self.linear_layer(2, 3, padding='same')(inputs)
x = layers.BatchNormalization(beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones')(x)
x = layers.Activation('relu')(x)
return tf.keras.models.Model(inputs=inputs, outputs=x) |
class TestParameterCounter(unittest.TestCase):
def representative_dataset(self, in_shape=(1, 8, 8, 3)):
for _ in range(1):
(yield [np.random.randn(*in_shape)])
def test_conv_layer(self):
out_channels = 2
in_channels = 1
kernel_size = 3
use_bias = True
inputs = layers.Input(shape=(8, 8, in_channels))
x = layers.Conv2D(filters=out_channels, kernel_size=kernel_size, use_bias=use_bias)(inputs)
model = keras.Model(inputs=inputs, outputs=x)
fw_info = DEFAULT_KERAS_INFO
fw_impl = PruningKerasImplementation()
tpc = mct.get_target_platform_capabilities('tensorflow', 'imx500')
float_graph = read_model_to_graph(model, self.representative_dataset, tpc, DEFAULT_KERAS_INFO, fw_impl)
float_graph_with_compression_config = set_quantization_configuration_to_graph(float_graph, quant_config=mct.DEFAULTCONFIG, mixed_precision_enable=False)
self.memory_calculator = MemoryCalculator(graph=float_graph_with_compression_config, fw_info=fw_info, fw_impl=fw_impl)
counted_params = self.memory_calculator.get_pruned_graph_num_params(masks=None, include_padded_channels=tpc.is_simd_padding)
simd_groups = np.ceil((out_channels / 32.0))
expected_params = ((32 * simd_groups) * (((in_channels * kernel_size) * kernel_size) + int(use_bias)))
self.assertEqual(counted_params, expected_params)
counted_params = self.memory_calculator.get_pruned_graph_num_params(masks=None, include_padded_channels=False)
expected_params = (out_channels * (((in_channels * kernel_size) * kernel_size) + int(use_bias)))
self.assertEqual(counted_params, expected_params) |
def plot_flows(fdf, map_f=None, min_flow=0, tiles='cartodbpositron', zoom=6, flow_color='red', opacity=0.5, flow_weight=5, flow_exp=0.5, style_function=flow_style_function, flow_popup=False, num_od_popup=5, tile_popup=True, radius_origin_point=5, color_origin_point='#3186cc', control_scale=True):
if (map_f is None):
(lon, lat) = np.mean(np.array(list(fdf.tessellation.geometry.apply(utils.get_geom_centroid).values)), axis=0)
map_f = folium.Map(location=[lat, lon], tiles=tiles, zoom_start=zoom, control_scale=control_scale)
mean_flows = fdf[constants.FLOW].mean()
O_groups = fdf.groupby(by=constants.ORIGIN)
for (O, OD) in O_groups:
geom = fdf.get_geometry(O)
(lonO, latO) = utils.get_geom_centroid(geom)
for (D, T) in OD[[constants.DESTINATION, constants.FLOW]].values:
if (O == D):
continue
if (T < min_flow):
continue
geom = fdf.get_geometry(D)
(lonD, latD) = utils.get_geom_centroid(geom)
gjc = LineString([(lonO, latO), (lonD, latD)])
fgeojson = folium.GeoJson(gjc, name='geojson', style_function=style_function((T / mean_flows), flow_color, opacity, flow_weight, flow_exp))
if flow_popup:
popup = folium.Popup(('flow from %s to %s: %s' % (O, D, int(T))), max_width=300)
fgeojson = fgeojson.add_child(popup)
fgeojson.add_to(map_f)
if (radius_origin_point > 0):
for (O, OD) in O_groups:
name = ('origin: %s' % O.replace("'", '_'))
T_D = [[T, D] for (D, T) in OD[[constants.DESTINATION, constants.FLOW]].values]
trips_info = '<br/>'.join([('flow to %s: %s' % (dd.replace("'", '_'), int(tt))) for (tt, dd) in sorted(T_D, reverse=True)[:num_od_popup]])
geom = fdf.get_geometry(O)
(lonO, latO) = utils.get_geom_centroid(geom)
fmarker = folium.CircleMarker([latO, lonO], radius=radius_origin_point, weight=2, color=color_origin_point, fill=True, fill_color=color_origin_point)
if tile_popup:
popup = folium.Popup(((name + '<br/>') + trips_info), max_width=300)
fmarker = fmarker.add_child(popup)
fmarker.add_to(map_f)
return map_f |
class Pool2DBlock(nn.Module):
def __init__(self, pool_size):
super(Pool2DBlock, self).__init__()
self.pool_size = pool_size
def forward(self, x):
return F.max_pool2d(x, kernel_size=self.pool_size, stride=self.pool_size) |
def seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
dgl.random.seed(seed) |
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}".format(checkpoint_path, iteration))
return (model, optimizer, learning_rate, iteration) |
def barrier(group=group.WORLD):
assert (torch.distributed.deprecated._initialized == _INITIALIZED_PG), 'collective only supported in process-group mode'
return torch._C._dist_barrier(group) |
def check_requirements(cargs):
lcov = which('lcov')
gcov = which('gcov')
genhtml = which('genhtml')
timeout = which('timeout')
if (timeout == None):
timeout = which(cargs.timeout_path)
if (lcov == None):
lcov = which(cargs.lcov_path)
if (genhtml == None):
genhtml = which(cargs.genhtml_path)
if ((lcov == None) or (gcov == None) or (timeout == None)):
print('Required command not found :')
elif ((genhtml == None) and (not cargs.disable_lcov_web)):
print('Required command not found :')
else:
return True
if (lcov == None):
print(('[*] lcov command does not exist : %s' % cargs.lcov_path))
if ((genhtml == None) and (not cargs.disable_lcov_web)):
print(('[*] genhtml command does not exist : %s' % cargs.genhtml_path))
if (gcov == None):
print(('[*] gcov command does not exist : %s' % cargs.gcov_path))
return False |
def match_file(dir_name: str, cache_dir: Path) -> str:
files = os.listdir(cache_dir)
matched_filenames = []
for file_name in files:
if (re.match((dir_name + '$'), file_name) or re.match((dir_name + '\\..*'), file_name)):
matched_filenames.append(file_name)
if (len(matched_filenames) == 0):
return ''
elif (len(matched_filenames) == 1):
return matched_filenames[(- 1)]
else:
raise RuntimeError(f'Duplicate matched files:{matched_filenames}, this should be caused by a bug.') |
class OptunaTuner(ParamsTuner):
_name: str = 'OptunaTuner'
study: optuna.study.Study = None
estimated_n_trials: int = None
mean_trial_time: Optional[int] = None
def __init__(self, timeout: Optional[int]=1000, n_trials: Optional[int]=100, direction: Optional[str]='maximize', fit_on_holdout: bool=True, random_state: int=42):
self.timeout = timeout
self.n_trials = n_trials
self.estimated_n_trials = n_trials
self.direction = direction
self._fit_on_holdout = fit_on_holdout
self.random_state = random_state
def _upd_timeout(self, timeout):
self.timeout = min(self.timeout, timeout)
def fit(self, ml_algo: TunableAlgo, train_valid_iterator: Optional[TrainValidIterator]=None) -> Tuple[(Optional[TunableAlgo], Optional[LAMLDataset])]:
assert (not ml_algo.is_fitted), 'Fitted algo cannot be tuned.'
estimated_tuning_time = ml_algo.timer.estimate_tuner_time(len(train_valid_iterator))
if estimated_tuning_time:
estimated_tuning_time = max(estimated_tuning_time, 1)
self._upd_timeout(estimated_tuning_time)
logger.info(f'Start hyperparameters optimization for [1m{ml_algo._name}[0m ... Time budget is {self.timeout:.2f} secs')
metric_name = train_valid_iterator.train.task.get_dataset_metric().name
ml_algo = deepcopy(ml_algo)
flg_new_iterator = False
if (self._fit_on_holdout and (type(train_valid_iterator) != HoldoutIterator)):
train_valid_iterator = train_valid_iterator.convert_to_holdout_iterator()
flg_new_iterator = True
def update_trial_time(study: optuna.study.Study, trial: optuna.trial.FrozenTrial):
ml_algo.mean_trial_time = study.trials_dataframe()['duration'].mean().total_seconds()
self.estimated_n_trials = min(self.n_trials, (self.timeout // ml_algo.mean_trial_time))
logger.info3(f'[1mTrial {len(study.trials)}[0m with hyperparameters {trial.params} scored {trial.value} in {trial.duration}')
try:
sampler = optuna.samplers.TPESampler(seed=self.random_state)
self.study = optuna.create_study(direction=self.direction, sampler=sampler)
self.study.optimize(func=self._get_objective(ml_algo=ml_algo, estimated_n_trials=self.estimated_n_trials, train_valid_iterator=train_valid_iterator), n_trials=self.n_trials, timeout=self.timeout, callbacks=[update_trial_time])
self._best_params = self.study.best_params
ml_algo.params = self._best_params
logger.info(f'Hyperparameters optimization for [1m{ml_algo._name}[0m completed')
logger.info2(f'''The set of hyperparameters [1m{self._best_params}[0m
achieve {self.study.best_value:.4f} {metric_name}''')
if flg_new_iterator:
return (None, None)
preds_ds = ml_algo.fit_predict(train_valid_iterator)
return (ml_algo, preds_ds)
except optuna.exceptions.OptunaError:
return (None, None)
def _get_objective(self, ml_algo: TunableAlgo, estimated_n_trials: int, train_valid_iterator: TrainValidIterator) -> Callable[([optuna.trial.Trial], Union[(float, int)])]:
assert isinstance(ml_algo, MLAlgo)
def objective(trial: optuna.trial.Trial) -> float:
_ml_algo = deepcopy(ml_algo)
optimization_search_space = _ml_algo.optimization_search_space
if (not optimization_search_space):
optimization_search_space = _ml_algo._get_default_search_spaces(suggested_params=_ml_algo.init_params_on_input(train_valid_iterator), estimated_n_trials=estimated_n_trials)
if callable(optimization_search_space):
_ml_algo.params = optimization_search_space(trial=trial, optimization_search_space=optimization_search_space, suggested_params=_ml_algo.init_params_on_input(train_valid_iterator))
else:
_ml_algo.params = self._sample(trial=trial, optimization_search_space=optimization_search_space, suggested_params=_ml_algo.init_params_on_input(train_valid_iterator))
output_dataset = _ml_algo.fit_predict(train_valid_iterator=train_valid_iterator)
return _ml_algo.score(output_dataset)
return objective
def _sample(self, optimization_search_space, trial: optuna.trial.Trial, suggested_params: dict) -> dict:
trial_values = copy(suggested_params)
for (parameter, SearchSpace) in optimization_search_space.items():
if (SearchSpace.distribution_type in OPTUNA_DISTRIBUTIONS_MAP):
trial_values[parameter] = getattr(trial, OPTUNA_DISTRIBUTIONS_MAP[SearchSpace.distribution_type])(name=parameter, **SearchSpace.params)
else:
raise ValueError(f'Optuna does not support distribution {SearchSpace.distribution_type}')
return trial_values
def plot(self):
return optuna.visualization.plot_optimization_history(self.study) |
def compute_influences_parallel(device_ids: List[int], train_dataset: GlueDataset, batch_size: int, model: torch.nn.Module, test_inputs: Dict[(str, torch.Tensor)], params_filter: Optional[List[str]]=None, weight_decay: Optional[float]=None, weight_decay_ignores: Optional[List[str]]=None, s_test_damp: float=3e-05, s_test_scale: float=10000.0, s_test_num_samples: Optional[int]=None, random: bool=True, debug: bool=False, return_s_test: bool=False, train_indices_to_include: Optional[Union[(np.ndarray, List[int])]]=None) -> Tuple[(Dict[(int, float)], Optional[List[torch.FloatTensor]])]:
if (s_test_num_samples is None):
raise ValueError('`s_test_num_samples` cannot be None')
dataloders = prepare_small_dataloaders(dataset=train_dataset, random=random, batch_size=batch_size, num_datasets=len(device_ids), num_examples_per_dataset=s_test_num_samples)
(scattered_inputs, scattered_indices) = prepare_scattered_inputs_and_indices(dataset=train_dataset, device_ids=device_ids, indices_to_include=train_indices_to_include)
devices = [torch.device(f'cuda:{device_id}') for device_id in device_ids]
tmpfiles = [tempfile.NamedTemporaryFile() for _ in range(len(device_ids))]
process_args = [(tmpfiles[process_index].name, model, dataloders[process_index], scattered_inputs[process_index], scattered_indices[process_index], 1, devices, test_inputs, params_filter, weight_decay, weight_decay_ignores, s_test_damp, s_test_scale, s_test_num_samples, return_s_test, (True if (debug is False) else False)) for process_index in range(len(device_ids))]
if (debug is False):
try:
custom_mp.spawn(compute_s_test_and_influence, list_of_args=process_args, nprocs=len(device_ids), join=True)
influences: Dict[(int, float)] = {}
for tmpfile in tmpfiles:
outputs_dict = torch.load(tmpfile.name)
for (key, val) in outputs_dict['influences'].items():
if (key in influences.keys()):
raise ValueError
influences[key] = val
s_test = outputs_dict.get('s_test', None)
finally:
for tmpfile in tmpfiles:
tmpfile.close()
return (influences, s_test)
else:
random_rank = np.random.choice(len(device_ids))
print(f'Using random rank {random_rank}')
return compute_s_test_and_influence(random_rank, *process_args[random_rank]) |
def run_jacobi_1d(device_type: dace.dtypes.DeviceType):
(TSTEPS, N) = sizes['small']
(A, B) = initialize(N)
A_ref = np.copy(A)
B_ref = np.copy(B)
if (device_type in {dace.dtypes.DeviceType.CPU, dace.dtypes.DeviceType.GPU}):
sdfg = jacobi_1d_kernel.to_sdfg()
sdfg = auto_optimize(sdfg, device_type)
sdfg(TSTEPS, A, B, N=N)
elif (device_type == dace.dtypes.DeviceType.FPGA):
sdfg = jacobi_1d_kernel.to_sdfg(simplify=True)
applied = sdfg.apply_transformations([FPGATransformSDFG])
assert (applied == 1)
from dace.libraries.blas import Dot
Dot.default_implementation = 'FPGA_PartialSums'
sdfg.expand_library_nodes()
sdfg.apply_transformations_repeated([InlineSDFG], print_report=True)
sdfg.specialize(dict(N=N))
sdfg(TSTEPS=TSTEPS, A=A, B=B)
ground_truth(TSTEPS, A_ref, B_ref)
assert np.allclose(A, A_ref)
return sdfg |
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10):
cls_num = 10
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, train=True, transform=None, target_transform=None, download=False):
super(IMBALANCECIFAR10, self).__init__(root, train, transform, target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.phat = 0.1
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = (len(self.data) / cls_num)
img_num_per_cls = []
if (imb_type == 'exp'):
for cls_idx in range(cls_num):
num = (img_max * (imb_factor ** (cls_idx / (cls_num - 1.0))))
img_num_per_cls.append(int(num))
elif (imb_type == 'step'):
for cls_idx in range((cls_num // 2)):
img_num_per_cls.append(int(img_max))
for cls_idx in range((cls_num // 2)):
img_num_per_cls.append(int((img_max * imb_factor)))
else:
img_num_per_cls.extend(([int(img_max)] * cls_num))
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
self.num_per_cls_dict = dict()
for (the_class, the_img_num) in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where((targets_np == the_class))[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
new_data.append(self.data[(selec_idx, ...)])
new_targets.extend(([the_class] * the_img_num))
new_data = np.vstack(new_data)
new_targets = self.get_two_class(new_targets)
self.data = new_data
self.targets = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
def get_two_class(self, Y):
Y = np.array(Y)
loc_0 = np.where((Y <= ((self.cls_num / 2) - 1)))[0]
loc_1 = np.where((Y > ((self.cls_num / 2) - 1)))[0]
Y[loc_1] = 1
Y[loc_0] = 0
self.phat = (len(np.where((Y == 1))[0]) / len(Y))
return Y.tolist() |
def test_ByteMaskedArray_NumpyArray():
v2a = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([1, 0, 1, 0, 1], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True)
_cuda.jit(extensions=[ak.numba.cuda])
def f(out, obj):
out[0] = len(obj)
out[1] = (obj[0] if (obj[0] is not None) else 999.0)
out[2] = (obj[1] if (obj[1] is not None) else 999.0)
out[3] = (obj[2] if (obj[2] is not None) else 999.0)
out[4] = (obj[3] if (obj[3] is not None) else 999.0)
out[5] = (obj[4] if (obj[4] is not None) else 999.0)
out = np.zeros(6, dtype=np.float64)
f[(blockspergrid, threadsperblock)](out, ak.highlevel.Array(v2a, backend='cuda'))
assert (out.tolist() == [5.0, 1.1, 999.0, 3.3, 999.0, 5.5])
v2b = ak.contents.bytemaskedarray.ByteMaskedArray(ak.index.Index(np.array([0, 1, 0, 1, 0], np.int8)), ak.contents.numpyarray.NumpyArray(np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=False)
_cuda.jit(extensions=[ak.numba.cuda])
def f(out, obj):
out[0] = len(obj)
out[1] = (obj[0] if (obj[0] is not None) else 999.0)
out[2] = (obj[1] if (obj[1] is not None) else 999.0)
out[3] = (obj[2] if (obj[2] is not None) else 999.0)
out[4] = (obj[3] if (obj[3] is not None) else 999.0)
out[5] = (obj[4] if (obj[4] is not None) else 999.0)
out = np.zeros(6, dtype=np.float64)
f[(blockspergrid, threadsperblock)](out, ak.highlevel.Array(v2b, backend='cuda'))
assert (out.tolist() == [5.0, 1.1, 999.0, 3.3, 999.0, 5.5]) |
.parametrize('csr_container', (CSR_CONTAINERS + [None]))
def test_dtype_preserved(csr_container, global_dtype):
rng = np.random.RandomState(0)
X = rng.rand(10, 2).astype(global_dtype, copy=False)
if (csr_container is not None):
X[(X < 0.8)] = 0
X = csr_container(X)
km = BisectingKMeans(n_clusters=3, random_state=0)
km.fit(X)
assert (km.cluster_centers_.dtype == global_dtype) |
def test_set_last_execution_result(test_case_chromosome):
result = MagicMock(ExecutionResult)
test_case_chromosome.set_last_execution_result(result)
assert (test_case_chromosome.get_last_execution_result() == result) |
class PythonComponent(Component):
def __init__(self, name, libz3Component):
assert isinstance(libz3Component, DLLComponent)
global PYTHON_ENABLED
Component.__init__(self, name, None, [])
self.libz3Component = libz3Component
def main_component(self):
return False
def mk_win_dist(self, build_path, dist_path):
if (not is_python_enabled()):
return
src = os.path.join(build_path, 'python', 'z3')
dst = os.path.join(dist_path, INSTALL_BIN_DIR, 'python', 'z3')
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
def mk_unix_dist(self, build_path, dist_path):
self.mk_win_dist(build_path, dist_path)
def mk_makefile(self, out):
return |
def move_cache(cache_dir=None, new_cache_dir=None, token=None):
if (new_cache_dir is None):
new_cache_dir = TRANSFORMERS_CACHE
if (cache_dir is None):
old_cache = (Path(TRANSFORMERS_CACHE).parent / 'transformers')
if os.path.isdir(str(old_cache)):
cache_dir = str(old_cache)
else:
cache_dir = new_cache_dir
cached_files = get_all_cached_files(cache_dir=cache_dir)
logger.info(f'Moving {len(cached_files)} files to the new cache system')
hub_metadata = {}
for file_info in tqdm(cached_files):
url = file_info.pop('url')
if (url not in hub_metadata):
try:
hub_metadata[url] = get_hf_file_metadata(url, token=token)
except requests.HTTPError:
continue
(etag, commit_hash) = (hub_metadata[url].etag, hub_metadata[url].commit_hash)
if ((etag is None) or (commit_hash is None)):
continue
if (file_info['etag'] != etag):
clean_files_for(os.path.join(cache_dir, file_info['file']))
continue
url_info = extract_info_from_url(url)
if (url_info is None):
continue
repo = os.path.join(new_cache_dir, url_info['repo'])
move_to_new_cache(file=os.path.join(cache_dir, file_info['file']), repo=repo, filename=url_info['filename'], revision=url_info['revision'], etag=etag, commit_hash=commit_hash) |
def barrier_if_distributed() -> None:
if (dist.is_available() and dist.is_initialized()):
dist.barrier() |
def test_case133():
url = (brokerIp + '/ngsi-ld/v1/subscriptions/')
headers = {'Content-Type': 'application/json', 'Accept': 'application/ld+json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'}
r = requests.post(url, data=json.dumps(ld_data.subdata132), headers=headers)
print(r.content)
print(r.status_code)
assert (r.status_code == 201) |
def _log_factor_(self, base=None, locals=None):
log_factor = self._log_factor_(base=base, locals=locals)
for (g, c) in log_factor:
if (hasattr(g, 'parent') and isinstance(g.parent(), GenericGrowthGroup)):
continue
from .misc import log_string
raise ArithmeticError(('Cannot build %s since %s is not in %s.' % (log_string(self, base), g, self.parent())))
return log_factor |
def _tags_to_preslots(tags, tokens, is_start_of_slot, is_end_of_slot):
slots = []
current_slot_start = 0
for (i, tag) in enumerate(tags):
if is_start_of_slot(tags, i):
current_slot_start = i
if is_end_of_slot(tags, i):
slots.append({RANGE: {START: tokens[current_slot_start].start, END: tokens[i].end}, SLOT_NAME: tag_name_to_slot_name(tag)})
current_slot_start = i
return slots |
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial) |
def simplify_mesh(mesh, f_target=10000, agressiveness=7.0):
vertices = mesh.vertices
faces = mesh.faces
(vertices, faces) = mesh_simplify(vertices, faces, f_target, agressiveness)
mesh_simplified = trimesh.Trimesh(vertices, faces, process=False)
return mesh_simplified |
def get_sgd_weight_predictor(sgd_type: str, pred_mem: str, pred_type: str, optimizer, scheduler=None, nag_with_predictor=False, true_weights_storage=None) -> WeightPredictor:
has_weight_decay = any([(pg['weight_decay'] != 0) for pg in optimizer.param_groups])
if has_weight_decay:
if (pred_type == 'msnag'):
raise NotImplementedError(f'this is constantyly changed to to aggmsnag since it is better, use it instead. For measuring msnag alone - change the code')
if (pred_type != 'aggmsnag'):
raise NotImplementedError()
if (sgd_type == 'sgd1'):
if (pred_mem == 'clone'):
return SGDWDClonedWeightPrediction(optimizer, fix_fn=None, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
else:
if (pred_type != 'msnag'):
raise NotImplementedError(pred_type)
fix_fn_cls = SGD_TYPE_TO_MSNAG_CLASS.get(sgd_type, None)
fix_fn = fix_fn_cls()
pred_cls = PRED_MEM_TO_CLASS.get(pred_mem, None)
return pred_cls(optimizer, fix_fn=fix_fn, scheduler=scheduler, nag_with_predictor=nag_with_predictor, true_weights_storage=true_weights_storage) |
def register_types(module):
root_module = module.get_root()
module.add_class('Address', import_from_module='ns.network')
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('AsciiTraceHelperForIpv4', allow_subclassing=True, import_from_module='ns.internet')
module.add_class('AsciiTraceHelperForIpv6', allow_subclassing=True, import_from_module='ns.internet')
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('Buffer', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
module.add_class('ByteTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
module.add_class('ByteTagList', import_from_module='ns.network')
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('EventId', import_from_module='ns.core')
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('InetSocketAddress', import_from_module='ns.network')
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4Address', import_from_module='ns.network')
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv4AddressHelper', import_from_module='ns.internet')
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv4InterfaceContainer', import_from_module='ns.internet')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator', u'ns3::Ipv4InterfaceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator*', u'ns3::Ipv4InterfaceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv4 >, unsigned int > > const_iterator&', u'ns3::Ipv4InterfaceContainer::Iterator&')
module.add_class('Ipv4Mask', import_from_module='ns.network')
module.add_class('Ipv6Address', import_from_module='ns.network')
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Ipv6AddressHelper', import_from_module='ns.internet')
module.add_class('Ipv6InterfaceAddress', import_from_module='ns.internet')
module.add_enum('State_e', ['TENTATIVE', 'DEPRECATED', 'PREFERRED', 'PERMANENT', 'HOMEADDRESS', 'TENTATIVE_OPTIMISTIC', 'INVALID'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
module.add_enum('Scope_e', ['HOST', 'LINKLOCAL', 'GLOBAL'], outer_class=root_module['ns3::Ipv6InterfaceAddress'], import_from_module='ns.internet')
module.add_class('Ipv6InterfaceContainer', import_from_module='ns.internet')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv6 >, unsigned int > > const_iterator', u'ns3::Ipv6InterfaceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv6 >, unsigned int > > const_iterator*', u'ns3::Ipv6InterfaceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< std::pair< ns3::Ptr< ns3::Ipv6 >, unsigned int > > const_iterator&', u'ns3::Ipv6InterfaceContainer::Iterator&')
module.add_class('Ipv6Prefix', import_from_module='ns.network')
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('Mac8Address', import_from_module='ns.network')
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
module.add_class('NetDeviceContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator', u'ns3::NetDeviceContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator*', u'ns3::NetDeviceContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::NetDevice > > const_iterator&', u'ns3::NetDeviceContainer::Iterator&')
module.add_class('NodeContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator', u'ns3::NodeContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator*', u'ns3::NodeContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator&', u'ns3::NodeContainer::Iterator&')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('ObjectFactory', import_from_module='ns.core')
module.add_class('PacketMetadata', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
module.add_class('PacketTagIterator', import_from_module='ns.network')
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
module.add_class('PacketTagList', import_from_module='ns.network')
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
module.add_class('PcapFile', import_from_module='ns.network')
module.add_class('PcapHelper', import_from_module='ns.network')
module.add_enum('DataLinkType', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
module.add_class('PcapHelperForIpv4', allow_subclassing=True, import_from_module='ns.internet')
module.add_class('PcapHelperForIpv6', allow_subclassing=True, import_from_module='ns.internet')
module.add_class('PointToPointDumbbellHelper')
module.add_class('PointToPointGridHelper')
module.add_class('PointToPointHelper', import_from_module='ns.point_to_point', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
module.add_class('PointToPointStarHelper')
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('TagBuffer', import_from_module='ns.network')
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('InternetStackHelper', import_from_module='ns.internet', parent=[root_module['ns3::PcapHelperForIpv4'], root_module['ns3::PcapHelperForIpv6'], root_module['ns3::AsciiTraceHelperForIpv4'], root_module['ns3::AsciiTraceHelperForIpv6']])
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
module.add_class('Ipv6Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
module.add_enum('NextHeader_e', ['IPV6_EXT_HOP_BY_HOP', 'IPV6_IPV4', 'IPV6_TCP', 'IPV6_UDP', 'IPV6_IPV6', 'IPV6_EXT_ROUTING', 'IPV6_EXT_FRAGMENTATION', 'IPV6_EXT_CONFIDENTIALITY', 'IPV6_EXT_AUTHENTIFICATION', 'IPV6_ICMPV6', 'IPV6_EXT_END', 'IPV6_EXT_DESTINATION', 'IPV6_SCTP', 'IPV6_EXT_MOBILITY', 'IPV6_UDP_LITE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv6Header'], import_from_module='ns.internet')
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv4'])
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_BAD_CHECKSUM', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv4L3Protocol'], import_from_module='ns.internet')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )', u'ns3::Ipv4L3Protocol::SentTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )*', u'ns3::Ipv4L3Protocol::SentTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )&', u'ns3::Ipv4L3Protocol::SentTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv4 >, uint32_t )', u'ns3::Ipv4L3Protocol::TxRxTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv4 >, uint32_t )*', u'ns3::Ipv4L3Protocol::TxRxTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv4 >, uint32_t )&', u'ns3::Ipv4L3Protocol::TxRxTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv4L3Protocol::DropReason, ns3::Ptr< ns3::Ipv4 >, uint32_t )', u'ns3::Ipv4L3Protocol::DropTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv4L3Protocol::DropReason, ns3::Ptr< ns3::Ipv4 >, uint32_t )*', u'ns3::Ipv4L3Protocol::DropTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv4Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv4L3Protocol::DropReason, ns3::Ptr< ns3::Ipv4 >, uint32_t )&', u'ns3::Ipv4L3Protocol::DropTracedCallback&')
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::ErrorCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::ErrorCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::ErrorCallback&')
module.add_class('Ipv6', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Ipv6L3Protocol', import_from_module='ns.internet', parent=root_module['ns3::Ipv6'])
module.add_enum('DropReason', ['DROP_TTL_EXPIRED', 'DROP_NO_ROUTE', 'DROP_INTERFACE_DOWN', 'DROP_ROUTE_ERROR', 'DROP_UNKNOWN_PROTOCOL', 'DROP_UNKNOWN_OPTION', 'DROP_MALFORMED_HEADER', 'DROP_FRAGMENT_TIMEOUT'], outer_class=root_module['ns3::Ipv6L3Protocol'], import_from_module='ns.internet')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )', u'ns3::Ipv6L3Protocol::SentTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )*', u'ns3::Ipv6L3Protocol::SentTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, uint32_t )&', u'ns3::Ipv6L3Protocol::SentTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv6 >, uint32_t )', u'ns3::Ipv6L3Protocol::TxRxTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv6 >, uint32_t )*', u'ns3::Ipv6L3Protocol::TxRxTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Ptr< ns3::Ipv6 >, uint32_t )&', u'ns3::Ipv6L3Protocol::TxRxTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv6L3Protocol::DropReason, ns3::Ptr< ns3::Ipv6 >, uint32_t )', u'ns3::Ipv6L3Protocol::DropTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv6L3Protocol::DropReason, ns3::Ptr< ns3::Ipv6 >, uint32_t )*', u'ns3::Ipv6L3Protocol::DropTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ipv6Header const &, ns3::Ptr< ns3::Packet const >, ns3::Ipv6L3Protocol::DropReason, ns3::Ptr< ns3::Ipv6 >, uint32_t )&', u'ns3::Ipv6L3Protocol::DropTracedCallback&')
module.add_class('Ipv6PmtuCache', import_from_module='ns.internet', parent=root_module['ns3::Object'])
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ipv4L3Protocol::DropReason', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv4Header &', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv6Header &', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ipv6L3Protocol::DropReason', 'ns3::Ptr<ns3::Ipv6>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::Ipv6Header &', 'ns3::Ptr<const ns3::Packet>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::Ipv4>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::Packet>', 'ns3::Ptr<ns3::Ipv6>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_container('std::vector< bool >', 'bool', container_type=u'vector')
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module) |
class JointTrainAgent(iql.IQLAgent):
network: TrainState = None
def pretrain_update(agent, pretrain_batch, seed=None, value_update=True, actor_update=True, high_actor_update=True):
def loss_fn(network_params):
info = {}
if value_update:
(value_loss, value_info) = compute_value_loss(agent, pretrain_batch, network_params)
for (k, v) in value_info.items():
info[f'value/{k}'] = v
else:
value_loss = 0.0
if actor_update:
(actor_loss, actor_info) = compute_actor_loss(agent, pretrain_batch, network_params)
for (k, v) in actor_info.items():
info[f'actor/{k}'] = v
else:
actor_loss = 0.0
if (high_actor_update and agent.config['use_waypoints']):
(high_actor_loss, high_actor_info) = compute_high_actor_loss(agent, pretrain_batch, network_params)
for (k, v) in high_actor_info.items():
info[f'high_actor/{k}'] = v
else:
high_actor_loss = 0.0
loss = ((value_loss + actor_loss) + high_actor_loss)
return (loss, info)
if value_update:
new_target_params = jax.tree_map((lambda p, tp: ((p * agent.config['target_update_rate']) + (tp * (1 - agent.config['target_update_rate'])))), agent.network.params['networks_value'], agent.network.params['networks_target_value'])
(new_network, info) = agent.network.apply_loss_fn(loss_fn=loss_fn, has_aux=True)
if value_update:
params = unfreeze(new_network.params)
params['networks_target_value'] = new_target_params
new_network = new_network.replace(params=freeze(params))
return (agent.replace(network=new_network), info)
pretrain_update = jax.jit(pretrain_update, static_argnames=('value_update', 'actor_update', 'high_actor_update'))
def sample_actions(agent, observations: np.ndarray, goals: np.ndarray, *, low_dim_goals: bool=False, seed: PRNGKey, temperature: float=1.0, discrete: int=0, num_samples: int=None) -> jnp.ndarray:
dist = agent.network(observations, goals, low_dim_goals=low_dim_goals, temperature=temperature, method='actor')
if (num_samples is None):
actions = dist.sample(seed=seed)
else:
actions = dist.sample(seed=seed, sample_shape=num_samples)
if (not discrete):
actions = jnp.clip(actions, (- 1), 1)
return actions
sample_actions = jax.jit(sample_actions, static_argnames=('num_samples', 'low_dim_goals', 'discrete'))
def sample_high_actions(agent, observations: np.ndarray, goals: np.ndarray, *, seed: PRNGKey, temperature: float=1.0, num_samples: int=None) -> jnp.ndarray:
dist = agent.network(observations, goals, temperature=temperature, method='high_actor')
if (num_samples is None):
actions = dist.sample(seed=seed)
else:
actions = dist.sample(seed=seed, sample_shape=num_samples)
return actions
sample_high_actions = jax.jit(sample_high_actions, static_argnames=('num_samples',))
def get_policy_rep(agent, *, targets: np.ndarray, bases: np.ndarray=None) -> jnp.ndarray:
return agent.network(targets=targets, bases=bases, method='policy_goal_encoder') |
class CallStack():
def __init__(self, frames):
self.frames = frames
def from_here(project_root, start_from=1):
stack = inspect.stack()
context = []
try:
for frame_info in stack[start_from:]:
if (not frame_info.filename.startswith(project_root)):
continue
if ('self' not in frame_info.frame.f_locals):
continue
if (not isinstance(frame_info.frame.f_locals['self'], torch.nn.Module)):
continue
context.append(SourceLocation(file_path=os.path.relpath(frame_info.filename, start=project_root), line_number=frame_info.lineno, module_id=id(frame_info.frame.f_locals['self'])))
return CallStack(context)
finally:
del stack |
def get_embedding(text, model='text-embedding-ada-002'):
text = text.replace('\n', ' ')
if (len(text) > 50):
text = ' '.join(text.split(' ')[:50])
for _ in range(5):
try:
return openai.Embedding.create(input=[text], model=model)['data'][0]['embedding']
except:
print('Error generating embedding! Attempting again...')
time.sleep(30) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.