code stringlengths 281 23.7M |
|---|
def test_boundary_box_parsing() -> None:
assert (BoundaryBox.from_text('-0.1,-0.1,0.1,0.1') == BoundaryBox((- 0.1), (- 0.1), 0.1, 0.1))
assert (BoundaryBox.from_text('0.1,-0.1,-0.1,0.1') is None)
assert (BoundaryBox.from_text('-0.1,0.1,0.1,-0.1') is None)
assert (BoundaryBox.from_text('wrong') is None)
assert (BoundaryBox.from_text('-O.1,-0.1,0.1,0.1') is None)
assert (BoundaryBox.from_text('-20,-20,20,20') is None) |
class OptionSeriesNetworkgraphSonificationTracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsPackedbubbleStatesHoverMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
.parametrize('ops', XP_OPS)
.parametrize('dtype', FLOAT_TYPES)
def test_seq2col_lengths_zero_between(ops, dtype):
cols_check = ops.asarray2f([[0, 0, 0, 1, 2, 3, 4, 5, 6], [1, 2, 3, 4, 5, 6, 7, 8, 9], [4, 5, 6, 7, 8, 9, 10, 11, 12], [7, 8, 9, 10, 11, 12, 13, 14, 15], [10, 11, 12, 13, 14, 15, 0, 0, 0], [0, 0, 0, 16, 17, 18, 19, 20, 21], [16, 17, 18, 19, 20, 21, 0, 0, 0]], dtype=dtype)
grad_check = ops.asarray2f([[2, 4, 6], [12, 15, 18], [21, 24, 27], [30, 33, 36], [26, 28, 30], [32, 34, 36], [38, 40, 42]], dtype=dtype)
ops.xp.testing.assert_allclose(cols_check, ops.seq2col(ops.xp.arange(1.0, 22.0, dtype=dtype).reshape(7, 3), 1, lengths=ops.asarray1i([5, 0, 2])))
ops.xp.testing.assert_allclose(grad_check, ops.backprop_seq2col(cols_check, 1, lengths=ops.asarray1i([5, 0, 2])))
ops.xp.testing.assert_allclose(cols_check, ops.seq2col(ops.xp.arange(1.0, 22.0, dtype=dtype).reshape(7, 3), 1, lengths=ops.asarray1i([5, 0, 0, 2])))
ops.xp.testing.assert_allclose(grad_check, ops.backprop_seq2col(cols_check, 1, lengths=ops.asarray1i([5, 0, 0, 2]))) |
def test_encoding_new_categories(df_enc):
df_unseen = pd.DataFrame({'var_A': ['D'], 'var_B': ['D']})
encoder = OrdinalEncoder(encoding_method='arbitrary', unseen='encode')
encoder.fit(df_enc[['var_A', 'var_B']])
df_transformed = encoder.transform(df_unseen)
assert (df_transformed == (- 1)).all(axis=None) |
('\n{out_data} = _mm256_blendv_ps ({z_data}, {y_data}, \n_mm256_cmp_ps ({x_data}, {v_data}, _CMP_LT_OQ));\n')
def avx2_select_ps(out: ([f32][8] AVX2), x: ([f32][8] AVX2), v: ([f32][8] AVX2), y: ([f32][8] AVX2), z: ([f32][8] AVX2)):
assert (stride(out, 0) == 1)
assert (stride(x, 0) == 1)
assert (stride(v, 0) == 1)
assert (stride(y, 0) == 1)
assert (stride(z, 0) == 1)
for i in seq(0, 8):
out[i] = select(x[i], v[i], y[i], z[i]) |
class TestDateRange(TestCase):
EPOCH = datetime_to_epoch(datetime(2017, 4, 3, 22, 50, 17))
def test_bad_unit(self):
with pytest.raises(ConfigurationError, match='"unit" must be one of'):
date_range('invalid', 1, 1)
def test_bad_range(self):
with pytest.raises(ConfigurationError, match='must be greater than or equal to'):
date_range('hours', 1, (- 1))
def test_hours_single(self):
unit = 'hours'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 4, 3, 21, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 3, 21, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_hours_past_range(self):
unit = 'hours'
range_from = (- 3)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 4, 3, 19, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 3, 21, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_hours_future_range(self):
unit = 'hours'
range_from = 0
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 3, 22, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 4, 0, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_hours_span_range(self):
unit = 'hours'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 3, 21, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 4, 0, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_days_single(self):
unit = 'days'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 4, 2, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_days_past_range(self):
unit = 'days'
range_from = (- 3)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 31, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_days_future_range(self):
unit = 'days'
range_from = 0
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 3, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 5, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_days_span_range(self):
unit = 'days'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 2, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 5, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_weeks_single(self):
unit = 'weeks'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 26, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 1, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_weeks_past_range(self):
unit = 'weeks'
range_from = (- 3)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 12, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 1, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_weeks_future_range(self):
unit = 'weeks'
range_from = 0
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 2, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 22, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_weeks_span_range(self):
unit = 'weeks'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2017, 3, 26, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 22, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_weeks_single_iso(self):
unit = 'weeks'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 27, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'))
def test_weeks_past_range_iso(self):
unit = 'weeks'
range_from = (- 3)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 13, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 2, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'))
def test_weeks_future_range_iso(self):
unit = 'weeks'
range_from = 0
range_to = 2
start = datetime_to_epoch(datetime(2017, 4, 3, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 23, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'))
def test_weeks_span_range_iso(self):
unit = 'weeks'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2017, 3, 27, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 4, 23, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH, week_starts_on='monday'))
def test_months_single(self):
unit = 'months'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2017, 3, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 3, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_months_past_range(self):
unit = 'months'
range_from = (- 4)
range_to = (- 1)
start = datetime_to_epoch(datetime(2016, 12, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 3, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_months_future_range(self):
unit = 'months'
range_from = 7
range_to = 10
start = datetime_to_epoch(datetime(2017, 11, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2018, 2, 28, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_months_super_future_range(self):
unit = 'months'
range_from = 9
range_to = 10
start = datetime_to_epoch(datetime(2018, 1, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2018, 2, 28, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_months_span_range(self):
unit = 'months'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2017, 3, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2017, 6, 30, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_years_single(self):
unit = 'years'
range_from = (- 1)
range_to = (- 1)
start = datetime_to_epoch(datetime(2016, 1, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_years_past_range(self):
unit = 'years'
range_from = (- 3)
range_to = (- 1)
start = datetime_to_epoch(datetime(2014, 1, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2016, 12, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_years_future_range(self):
unit = 'years'
range_from = 0
range_to = 2
start = datetime_to_epoch(datetime(2017, 1, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH))
def test_years_span_range(self):
unit = 'years'
range_from = (- 1)
range_to = 2
start = datetime_to_epoch(datetime(2016, 1, 1, 0, 0, 0))
end = datetime_to_epoch(datetime(2019, 12, 31, 23, 59, 59))
assert ((start, end) == date_range(unit, range_from, range_to, epoch=self.EPOCH)) |
def test_def_include_file():
string = write_rpc_request(1, 'initialize', {'rootPath': str(test_dir)})
file_path = (test_dir / 'test_inc.f90')
string += def_request(file_path, 3, 16)
(errcode, results) = run_request(string)
assert (errcode == 0)
ref_res = [[2, 2, str(((test_dir / 'subdir') / 'test_inc2.f90'))]]
assert (len(ref_res) == (len(results) - 1))
for (i, res) in enumerate(ref_res):
validate_def(results[(i + 1)], res) |
def text_callbacks(viz, env, args):
txt = 'This is a write demo notepad. Type below. Delete clears text:<br>'
callback_text_window = viz.text(txt, env=env)
def type_callback(event):
if (event['event_type'] == 'KeyPress'):
curr_txt = event['pane_data']['content']
if (event['key'] == 'Enter'):
curr_txt += '<br>'
elif (event['key'] == 'Backspace'):
curr_txt = curr_txt[:(- 1)]
elif (event['key'] == 'Delete'):
curr_txt = txt
elif (len(event['key']) == 1):
curr_txt += event['key']
viz.text(curr_txt, win=callback_text_window, env=env)
viz.register_event_handler(type_callback, callback_text_window)
return callback_text_window |
def gen_one_75_entry_from_80(db_conn: sqlite3.Connection, table: str, op_type: str, op_keys: str, algo: str=None):
db_conn_cur = db_conn.cursor()
entries = query_cache(db_conn_cur=db_conn_cur, table=table, op_type=op_type, op_keys=op_keys, query_template=DEFAULT_QUERY_TEMPLATE, algo=None, device='80')
if (len(entries) == 0):
_LOGGER.info('Could not find valid entries, skip')
return
column_names = get_column_names(db_conn_cur, table)
if (algo is not None):
if (len(entries) != 1):
raise RuntimeError('multiple entried found, which cannot be applied to a single algo')
insert_sm75_entry(db_conn, table, entries[0], column_names, algo)
return
for entry in entries:
insert_sm75_entry(db_conn, table, entry, column_names, algo) |
def get_gtp_int_tile(clock_region, grid):
for tile_name in sorted(grid.tiles()):
if (not tile_name.startswith('GTP_INT_INTERFACE')):
continue
loc = grid.loc_of_tilename(tile_name)
left_gridinfo = grid.gridinfo_at_loc(GridLoc((loc.grid_x - 1), loc.grid_y))
right_gridinfo = grid.gridinfo_at_loc(GridLoc((loc.grid_x + 1), loc.grid_y))
if (left_gridinfo.tile_type in ['INT_L', 'INT_R']):
cmt = left_gridinfo.clock_region
elif (right_gridinfo.tile_type in ['INT_L', 'INT_R']):
cmt = right_gridinfo.clock_region
else:
assert False
gridinfo = grid.gridinfo_at_loc(loc)
m = GTP_INT_Y_RE.match(tile_name)
assert m
int_y = int(m.group(1))
if ((clock_region == cmt) and ((int_y % 50) == 26)):
return tile_name |
class EscapeAllExtension(Extension):
def __init__(self, *args, **kwargs):
self.config = {'hardbreak': [False, 'Turn escaped newlines to hardbreaks - Default: False'], 'nbsp': [False, 'Turn escaped spaces to non-breaking spaces - Default: False']}
super(EscapeAllExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
config = self.getConfigs()
hardbreak = config['hardbreak']
md.inlinePatterns.register(EscapeAllPattern((ESCAPE_NO_NL_RE if hardbreak else ESCAPE_RE), config['nbsp']), 'escape', 180)
md.postprocessors.register(EscapeAllPostprocessor(md), 'unescape', 10)
if config['hardbreak']:
md.inlinePatterns.register(SubstituteTagInlineProcessor(HARDBREAK_RE, 'br'), 'hardbreak', 5.1) |
class ThirdPartyAuthTest(FaunaTestCase):
def setUpClass(cls):
super(ThirdPartyAuthTest, cls).setUpClass()
cls.collection_ref = cls._q(query.create_collection({'name': '3rdpartyauth_test_coll'}))['ref']
def _create(cls, n=0, **data):
data['n'] = n
return cls._q(query.create(cls.collection_ref, {'data': data}))
def _q(cls, query_json):
return cls.client.query(query_json)
def _randStr(cls, n=10):
letters = string.ascii_letters
return ''.join((random.choice(letters) for i in range(n)))
def _assert_insufficient_permissions(self, q):
self.assertRaises(PermissionDenied, (lambda : self._q(q)))
def test_create_access_providers(self):
providerName = 'provider_'
issuerName = 'issuer_'
jwksUri = '
provider = self.admin_client.query(query.create_access_provider({'name': providerName, 'issuer': issuerName, 'jwks_uri': jwksUri}))
self.assertTrue(self.admin_client.query(query.exists(query.access_provider(providerName))))
self.assertEqual(provider['name'], providerName)
self.assertEqual(provider['issuer'], issuerName)
self.assertEqual(provider['jwks_uri'], jwksUri)
self.assertRaises(BadRequest, (lambda : self.admin_client.query(query.create_access_provider({'name': providerName, 'issuer': issuerName, 'jwks_uri': jwksUri}))))
self.assertRaises(BadRequest, (lambda : self.admin_client.query(query.create_access_provider({'name': providerName, 'jwks_uri': jwksUri}))))
jwksUri = '
self.assertRaises(BadRequest, (lambda : self.admin_client.query(query.create_access_provider({'name': providerName, 'issuer': issuerName, 'jwks_uri': jwksUri}))))
def test_access_provider(self):
self.assertEqual(self._q(query.access_provider('pvd-name')), Ref('pvd-name', Native.ACCESS_PROVIDERS))
def test_access_providers(self):
for i in range(10):
providerName = ('provider_%d' % i)
issuerName = ('issuer_%d' % i)
jwksUri = (' % i)
obj = {'name': providerName, 'issuer': issuerName, 'jwks_uri': jwksUri}
self.admin_client.query(query.create_access_provider(obj))
self.assertEqual(self.admin_client.query(query.count(query.access_providers())), 10)
self._assert_insufficient_permissions(query.paginate(query.access_providers()))
def test_identity_has_identity(self):
instance_ref = self.client.query(query.create(self.collection_ref, {'credentials': {'password': 'sekrit'}}))['ref']
secret = self.client.query(query.login(instance_ref, {'password': 'sekrit'}))['secret']
instance_client = self.client.new_session_client(secret=secret)
self.assertTrue(instance_client.query(query.has_current_identity()))
self.assertEqual(instance_client.query(query.current_identity()), instance_ref)
def test_has_current_token(self):
instance_ref = self._q(query.create(self.collection_ref, {'credentials': {'password': 'sekrit'}}))['ref']
secret = self._q(query.login(instance_ref, {'password': 'sekrit'}))['secret']
instance_client = self.client.new_session_client(secret=secret)
self.assertTrue(instance_client.query(query.has_current_token()))
self.assertFalse(self._q(query.has_current_token()))
def test_has_current_identity(self):
instance_ref = self._q(query.create(self.collection_ref, {'credentials': {'password': 'sekrit'}}))['ref']
secret = self._q(query.login(instance_ref, {'password': 'sekrit'}))['secret']
instance_client = self.client.new_session_client(secret=secret)
self.assertTrue(instance_client.query(query.has_current_identity()))
self.assertFalse(self._q(query.has_current_identity()))
def test_create_accprov_with_roles(self):
providerName = 'provider_with_roles'
issuerName = ('issuer_%s' % self._randStr())
fullUri = (' //$%s.auth0.com' % self._randStr(4))
roleOneName = ('role_one_%s' % self._randStr(4))
roleTwoName = ('role_two_%s' % self._randStr(4))
self.admin_client.query(query.create_role({'name': roleOneName, 'privileges': [{'resource': query.databases(), 'actions': {'read': True}}]}))
self.admin_client.query(query.create_role({'name': roleTwoName, 'privileges': [{'resource': query.databases(), 'actions': {'read': True}}]}))
provider = self.admin_client.query(query.create_access_provider({'name': providerName, 'issuer': issuerName, 'jwks_uri': fullUri, 'roles': [query.role(roleOneName), {'role': query.role(roleTwoName), 'predicate': query.query(query.lambda_('x', True))}]}))
self.assertEqual(provider['name'], providerName)
self.assertEqual(provider['issuer'], issuerName)
self.assertEqual(provider['jwks_uri'], fullUri)
self.assertTrue(isinstance(provider['roles'], list)) |
class OptionPlotoptionsTreemapSonificationContexttracksMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def get_pseudorandom_indices(seed, modulus, count, exclude_multiples_of=0):
assert (modulus < (2 ** 24))
data = seed
while (len(data) < (4 * count)):
data += blake(data[(- 32):])
if (exclude_multiples_of == 0):
return [(int.from_bytes(data[i:(i + 4)], 'big') % modulus) for i in range(0, (count * 4), 4)]
else:
real_modulus = ((modulus * (exclude_multiples_of - 1)) // exclude_multiples_of)
o = [(int.from_bytes(data[i:(i + 4)], 'big') % real_modulus) for i in range(0, (count * 4), 4)]
return [((x + 1) + (x // (exclude_multiples_of - 1))) for x in o] |
def destroy_service(service_name, with_archive):
params = {}
if with_archive:
params['with_archive'] = True
response = get_session().delete(((base_services_url + '/') + service_name), params=params)
if (response.status_code != 200):
raise get_exception(response)
else:
return response.json().get('message', '`{}` service has been destroyed successfully'.format(service_name)) |
def test_info():
info('This is normal text')
info_red('This is red text')
info_green('This is green text')
info_blue('This is blue text')
set_log_level(ERROR)
info('You should not see this')
log(ERROR, 'You should see this error message')
log(WARNING, 'You should NOT see this warning') |
def elements_from_clize_docstring(source):
free_text = None
for p in _split_clize_docstring(source):
if p.startswith(' '):
if (free_text is not None):
(yield (EL_FREE_TEXT, free_text, False))
free_text = None
(yield (EL_FREE_TEXT, p, True))
continue
argdoc = CLIZEDOC_ARGUMENT_RE.match(p)
if argdoc:
(argname, text) = argdoc.groups()
if (free_text is not None):
if free_text.endswith(':'):
(yield (EL_LABEL, free_text[:(- 1)]))
free_text = None
if (free_text is not None):
(yield (EL_FREE_TEXT, free_text, False))
free_text = None
(yield (EL_PARAM_DESC, argname, text))
else:
if (free_text is not None):
(yield (EL_FREE_TEXT, free_text, False))
free_text = None
free_text = p
if (free_text is not None):
(yield (EL_FREE_TEXT, free_text, False)) |
def test_serialization_types():
(cache=True, cache_version='1.0.0')
def squared(value: int) -> typing.List[typing.Dict[(str, int)]]:
return [{'squared_value': (value ** 2)}]
def compute_square_wf(input_integer: int) -> typing.List[typing.Dict[(str, int)]]:
compute_square_result = squared(value=input_integer)
return compute_square_result
wf_spec = get_serializable(OrderedDict(), serialization_settings, compute_square_wf)
assert (wf_spec.template.interface.outputs['o0'].type.collection_type.map_value_type.simple == SimpleType.INTEGER)
task_spec = get_serializable(OrderedDict(), serialization_settings, squared)
assert (task_spec.template.interface.outputs['o0'].type.collection_type.map_value_type.simple == SimpleType.INTEGER) |
class DiskVmMonHistorySerializer(MonHistorySerializer):
disk_id = s.IntegerField(required=True, min_value=(DISK_ID_MIN + 1), max_value=(DISK_ID_MAX_BHYVE + 1))
def validate(self, attrs):
disk_id = attrs.get('disk_id')
assert disk_id
try:
self.item_id = (disk_id - 1)
self.obj.get_real_disk_id(self.obj.json_active_get_disks()[self.item_id])
except IndexError:
raise s.ValidationError(_('Disk ID not defined on VM.'))
return attrs |
class TorchScriptShim(PyTorchShim):
def __init__(self, model: Optional['torch.ScriptModule'], config=None, optimizer: Any=None, mixed_precision: bool=False, grad_scaler: Optional[PyTorchGradScaler]=None, device: Optional['torch.device']=None):
if ((model is not None) and (not isinstance(model, torch.jit.ScriptModule))):
raise ValueError('PyTorchScriptShim must be initialized with ScriptModule or None (for deserialization)')
super().__init__(model, config, optimizer, mixed_precision, grad_scaler, device)
def to_bytes(self):
filelike = BytesIO()
torch.jit.save(self._model, filelike)
filelike.seek(0)
model_bytes = filelike.getvalue()
msg = {'config': self.cfg, 'model': model_bytes}
return srsly.msgpack_dumps(msg)
def from_bytes(self, bytes_data):
device = get_torch_default_device()
msg = srsly.msgpack_loads(bytes_data)
self.cfg = msg['config']
filelike = BytesIO(msg['model'])
filelike.seek(0)
map_location = (torch.device('cpu') if (device.type == 'mps') else device)
self._model = torch.jit.load(filelike, map_location=map_location)
self._model.to(device)
self._grad_scaler.to_(device)
return self |
def to_pytorch(total_size=None, features=None, targets=None, options=None, targets_options=None, merger=default_merger, targets_merger=default_merger, num_parallel_calls=10, prefetch=1024, shuffle_buffer_size=100, **kwargs):
if (total_size is None):
total_size = len(features[0])
print('totalsize', total_size)
import torch
(func, func_targets) = to_funcs(features, targets, options, targets_options, merger, targets_merger)
class ClimetlabTorchDataset(torch.utils.data.Dataset):
def __len__(self):
return total_size
def __getitem__(self, i):
x = func(i)
Y = func_targets(i)
x = x.astype(np.float32)
Y = Y.astype(np.float32)
return (x, Y)
return ClimetlabTorchDataset() |
class Migration(migrations.Migration):
dependencies = [('reporting', '0001_initial')]
operations = [migrations.CreateModel(name='ReportingAgencyOverview', fields=[('reporting_agency_overview_id', models.AutoField(primary_key=True, serialize=False)), ('toptier_code', models.TextField()), ('fiscal_year', models.IntegerField()), ('fiscal_period', models.IntegerField()), ('total_dollars_obligated_gtas', models.DecimalField(decimal_places=2, max_digits=23)), ('total_budgetary_resources', models.DecimalField(decimal_places=2, max_digits=23)), ('total_diff_approp_ocpa_obligated_amounts', models.DecimalField(decimal_places=2, max_digits=23))], options={'db_table': 'reporting_agency_overview'}), migrations.AddIndex(model_name='reportingagencyoverview', index=models.Index(fields=['fiscal_year', 'fiscal_period', 'toptier_code'], name='reporting_agency_ovr_group_idx'))] |
class CommaAndSpaces(object):
def p_enumItems(self, p):
n = len(p)
if (n == 4):
p[0] = (p[1] + [p[3]])
elif (n == 2):
p[0] = [p[1]]
elif (n == 3):
if (p[2] == ','):
p[0] = p[1]
else:
p[0] = (p[1] + [p[2]]) |
class OptionPlotoptionsArearangeSonificationDefaultspeechoptionsMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('undefined')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('undefined')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('undefined')
def min(self, text: str):
self._config(text, js_type=False)
def within(self):
return self._config_get('undefined')
def within(self, text: str):
self._config(text, js_type=False) |
def mapreduce(key_value_func, iterable, **kwargs):
collect_in = kwargs.pop('collect_in', None)
reduce_func = kwargs.pop('reduce_func', (None if (collect_in is None) else False))
reduce_func_init = kwargs.pop('reduce_func_init', _coconut_sentinel)
if ((reduce_func_init is not _coconut_sentinel) and (not reduce_func)):
raise _coconut.TypeError('reduce_func_init requires reduce_func')
map_using = kwargs.pop('map_using', _coconut.map)
if kwargs:
raise _coconut.TypeError(('mapreduce()/collectby() got unexpected keyword arguments ' + _coconut.repr(kwargs)))
collection = (collect_in if (collect_in is not None) else (_coconut.collections.defaultdict(_coconut.list) if (reduce_func is None) else _coconut.dict()))
for (key, val) in map_using(key_value_func, iterable):
if (reduce_func is None):
collection[key].append(val)
else:
old_val = collection.get(key, reduce_func_init)
if (old_val is not _coconut_sentinel):
if (reduce_func is False):
raise _coconut.ValueError((('mapreduce()/collectby() got duplicate key ' + repr(key)) + ' with reduce_func=False'))
val = reduce_func(old_val, val)
collection[key] = val
return collection |
()
('--debug', is_flag=True, help='Enable debug mode.')
('--number', type=click.Choice(['one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty', 'twenty-one', 'twenty-two', 'twenty-three', 'twenty-four', 'twenty-five', 'twenty-six', 'twenty-seven', 'twenty-eight', 'twenty-nine', 'thirty']), show_default=True, help='This click choice has loads of options.')
def cli(debug, number):
print(f"Debug mode is {('on' if debug else 'off')}") |
class TrackingZoom(ZoomTool):
def normal_mouse_wheel(self, event):
if (self.enable_wheel and (event.mouse_wheel != 0)):
if (event.mouse_wheel > 0):
zoom = (2 / (1 + self.zoom_factor))
elif (event.mouse_wheel < 0):
zoom = ((1 + self.zoom_factor) / 2)
c = self.component
(low_pt, high_pt) = self._map_coordinate_box((c.x, c.y), (c.x2, c.y2))
mouse_pos = (c.x_mapper.map_data(event.x), c.y_mapper.map_data(event.y))
if (self.tool_mode == 'range'):
datarange_list = [(self._determine_axis(), self._get_mapper().range)]
else:
datarange_list = [(0, c.x_mapper.range), (1, c.y_mapper.range)]
(orig_low, orig_high) = self._history[0]
for (ndx, datarange) in datarange_list:
mouse_val = mouse_pos[ndx]
newlow = (mouse_val - (zoom * (mouse_val - low_pt[ndx])))
newhigh = (mouse_val + (zoom * (high_pt[ndx] - mouse_val)))
if (type(orig_high) in (tuple, list)):
(ol, oh) = (orig_low[ndx], orig_high[ndx])
else:
(ol, oh) = (orig_low, orig_high)
if self._zoom_limit_reached(ol, oh, newlow, newhigh):
event.handled = True
return
if (datarange.default_state == 'low_track'):
hi = max([source.get_bounds()[1] for source in datarange.sources])
if ((hi >= low_pt[ndx]) and (hi <= high_pt[ndx])):
datarange.scale_tracking_amount(zoom)
newhigh = 'auto'
newlow = 'track'
elif (datarange.default_state == 'high_track'):
lo = min([source.get_bounds()[0] for source in datarange.sources])
if ((lo >= low_pt[ndx]) and (lo <= high_pt[ndx])):
datarange.scale_tracking_amount(zoom)
newlow = 'auto'
newhigh = 'track'
datarange.set_bounds(newlow, newhigh)
event.handled = True
self.component.request_redraw() |
class BeamChainPreviewComponent(AsyncioIsolatedComponent):
_beam_chain = None
shard_num: int
def is_enabled(self) -> bool:
return (self._boot_info.args.sync_mode.upper() == SYNC_BEAM.upper())
async def do_run(self, event_bus: EndpointAPI) -> None:
trinity_config = self._boot_info.trinity_config
app_config = trinity_config.get_app_config(Eth1AppConfig)
chain_config = app_config.get_chain_config()
base_db = DBClient.connect(trinity_config.database_ipc_path)
with base_db:
loop = asyncio.get_event_loop()
beam_chain = make_pausing_beam_chain(chain_config.vm_configuration, chain_config.chain_id, chain_config.consensus_context_class, base_db, event_bus, NoopMetricsRegistry(), loop=loop, urgent=False)
preview_server = BlockPreviewServer(event_bus, beam_chain, self.shard_num)
async with background_asyncio_service(preview_server) as manager:
(await manager.wait_finished()) |
def check_and_upload(package_id: PackageId, runner: CliRunner) -> None:
result = runner.invoke(cli, [*CLI_LOG_OPTION, 'search', (package_id.package_type.value + 's'), '--query', package_id.public_id.name], standalone_mode=False)
if (not (str(package_id.public_id) in result.output)):
if (package_id.package_type == PackageType.AGENT):
publish_agent(package_id, runner)
else:
push_package(package_id, runner)
else:
print("The {} '{}' is already in the registry".format(package_id.package_type.value, str(package_id.public_id))) |
class CBRNG(Computation):
def __init__(self, randoms_arr, generators_dim, sampler, seed=None):
self._sampler = sampler
self._keygen = KeyGenerator.create(sampler.bijection, seed=seed, reserve_id_space=True)
assert (sampler.dtype == randoms_arr.dtype)
counters_size = randoms_arr.shape[(- generators_dim):]
self._generators_dim = generators_dim
self._counters_t = Type(sampler.bijection.counter_dtype, shape=counters_size)
Computation.__init__(self, [Parameter('counters', Annotation(self._counters_t, 'io')), Parameter('randoms', Annotation(randoms_arr, 'o'))])
def create_counters(self):
return numpy.zeros(self._counters_t.shape, self._counters_t.dtype)
def _build_plan(self, plan_factory, _device_params, counters, randoms):
plan = plan_factory()
plan.kernel_call(TEMPLATE.get_def('cbrng'), [counters, randoms], kernel_name='kernel_cbrng', global_size=helpers.product(counters.shape), render_kwds=dict(sampler=self._sampler, keygen=self._keygen, batch=helpers.product(randoms.shape[:(- self._generators_dim)]), counters_slices=[self._generators_dim], randoms_slices=[(len(randoms.shape) - self._generators_dim), self._generators_dim]))
return plan |
class TestStat():
def _make_stationary(self, seed, n_samples):
np.random.seed(seed)
return np.random.randn(n_samples)
def _make_nonstationary(self, seed, n_samples):
np.random.seed(seed)
return np.random.randn(n_samples).cumsum()
.parametrize('seed', [42])
.parametrize('n_samples', [100, 1000, 10000])
def test_stationary(self, seed, n_samples):
X = self._make_stationary(seed, n_samples)
assert (StatTester().pvalue(X) < 0.1)
assert StatTester().is_stat(X)
.parametrize('seed', [42])
.parametrize('n_samples', [100, 1000, 10000])
def test_nonstationary(self, seed, n_samples):
X = self._make_nonstationary(seed, n_samples)
assert (StatTester().pvalue(X) > 0.1)
assert (not StatTester().is_stat(X))
def test_method_valueerror(self):
tester = StatTester(method='nonexistent')
with pytest.raises(ValueError):
_ = tester.null_hypothesis
with pytest.raises(ValueError):
_ = tester.pvalue(np.ones(100)) |
class OptionPlotoptionsPieSonificationContexttracksMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesTreegraphDataMarkerStatesHover(Options):
def animation(self) -> 'OptionSeriesTreegraphDataMarkerStatesHoverAnimation':
return self._config_sub_data('animation', OptionSeriesTreegraphDataMarkerStatesHoverAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get('undefined')
def height(self, num: float):
self._config(num, js_type=False)
def heightPlus(self):
return self._config_get('undefined')
def heightPlus(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get(None)
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(None)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def lineWidthPlus(self):
return self._config_get(0)
def lineWidthPlus(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get('undefined')
def width(self, num: float):
self._config(num, js_type=False)
def widthPlus(self):
return self._config_get('undefined')
def widthPlus(self, num: float):
self._config(num, js_type=False) |
()
('dockerfile_or_tag')
('--use-nvidia-driver/--no-use-nvidia-driver', help='Run test with nvidia docker driver (required for GPU image)', default=False)
('--extra-docker-args', help='Extra arguments pass to docker run command')
_log.simple_verbosity_option(logger)
def test(dockerfile_or_tag, use_nvidia_driver, extra_docker_args):
if (':' in dockerfile_or_tag):
image_tag = dockerfile_or_tag
dockerfile_path = gen_dockerfile_path_from_tag(image_tag)
else:
image_tag = assert_image_tag_from_dockerfile(logger, dockerfile_or_tag)
dockerfile_path = dockerfile_or_tag
matrix_yml_path = find_matrix_from_dockerfile(dockerfile_path)
project_dir = os.path.dirname(matrix_yml_path)
if (not os.path.exists(matrix_yml_path)):
logger.error('matrix.yml not found in project dir: %s', project_dir)
sys.exit(1)
extra_args = []
if (use_nvidia_driver or ('gpu' in image_tag)):
try:
conn =
conn.request('GET', '/v1.0/docker/cli')
data = conn.getresponse().read()
conn.close()
extra_args += data.split(' ')
except:
extra_args.append('--runtime=nvidia')
if extra_docker_args:
extra_args += extra_docker_args.split(' ')
os.chdir(project_dir)
with open(matrix_yml_path) as matrix_fobj:
matrix = yaml.load(matrix_fobj)
(target, env) = gen_target_env_from_tag(image_tag)
if (target not in matrix):
logger.error('target %s not found in matrix.', target)
sys.exit(1)
target_cfg = matrix[target]
target_cfg_items = gen_target_cfg_items(target_cfg)
if (not target_cfg_items):
logger.error('Invalid type (%s) for target configuration.', type(target_cfg))
sys.exit(1)
env_cfg = None
for (target_env, target_env_cfg) in gen_target_env_cfg(target_cfg_items):
if (target_env == env):
env_cfg = target_env_cfg
break
if (not env_cfg):
logger.error('env %s not found in target %s', env, target)
sys.exit(1)
test_script = env_cfg.get('_test')
if (not test_script):
logger.info('No test found for image %s, skipped.', dockerfile)
sys.exit(0)
if isinstance(test_script, str):
test_script = [test_script]
for script in test_script:
script = os.path.abspath(script)
if (not os.path.exists(script)):
logger.info('Defined test script (%s) not found for image %s.', script, image_tag)
sys.exit(1)
logger.info('')
logger.info('[*] Testing image %s with script %s...', image_tag, script)
logger.info('')
cmds = ['docker', 'run', '--rm', '-v', ('%s:/build_test' % os.path.dirname(script))]
cmds += extra_args
cmds += [image_tag, 'bash', '-c', ('cd /build_test && bash %s' % os.path.basename(script))]
logger.info('running test docker command: %s', cmds)
check_call(cmds) |
(image=gpu_image, gpu=True, shared_volumes={str(SHARED): volume}, secret=hf_secret, timeout=(30 * 60))
def extract_speakers_pyannote(path_audio: str) -> list[SpeakerSegment]:
from pyannote.audio import Pipeline
(cache_dir := (SHARED / '.hf')).mkdir(exist_ok=True)
auth_token = os.environ['HUGGINGFACE_TOKEN']
pipeline = Pipeline.from_pretrained('pyannote/speaker-.1.1', use_auth_token=auth_token, cache_dir=cache_dir)
ret = []
dia = pipeline(path_audio)
for (speech_turn, _, speaker) in dia.itertracks(yield_label=True):
ret.append(SpeakerSegment(speaker, speech_turn.start, speech_turn.end))
return ret |
.external
.parametrize('validate_target', ['aws', 'okta', 'bigquery'])
def test_validate_failure(test_config: FidesConfig, validate_target: str, test_client: TestClient) -> None:
data = {'config': EXTERNAL_FAILURE_CONFIG_BODY[validate_target], 'target': validate_target}
response = test_client.post(((test_config.cli.server_url + API_PREFIX) + '/validate/'), headers=test_config.user.auth_header, data=dumps(data))
validate_response = ValidateResponse.parse_raw(response.text)
assert (validate_response.status == 'failure')
assert (validate_response.message == EXPECTED_FAILURE_MESSAGES[validate_target])
assert (response.status_code == 200) |
class FilenameEndswithTest(unittest.TestCase):
def test_single_string(self):
py = filename_endswith('.py')
self.assertTrue(py('foo.py'))
self.assertTrue(py('foo/foo.py'))
self.assertFalse(py('foo.txt'))
self.assertFalse(py('foo/foo.txt'))
def test_sequence(self):
py = filename_endswith(['.py', '.pyi'])
self.assertTrue(py('foo.py'))
self.assertTrue(py('foo/foo.py'))
self.assertTrue(py('foo.pyi'))
self.assertTrue(py('foo/foo.pyi'))
self.assertFalse(py('foo.txt'))
self.assertFalse(py('foo/foo.txt')) |
class Plugin(plugin.PluginProto):
PLUGIN_ID = 68
PLUGIN_NAME = 'Environment - SHT30 temperature sensor (TESTING)'
PLUGIN_VALUENAME1 = 'Temperature'
PLUGIN_VALUENAME2 = 'Humidity'
def __init__(self, taskindex):
plugin.PluginProto.__init__(self, taskindex)
self.dtype = rpieGlobals.DEVICE_TYPE_I2C
self.vtype = rpieGlobals.SENSOR_TYPE_TEMP_HUM
self.readinprogress = 0
self.valuecount = 2
self.senddataoption = True
self.timeroption = True
self.timeroptional = False
self.formulaoption = True
self._nextdataservetime = 0
self.lastread = 0
self.i2cbus = None
def plugin_init(self, enableplugin=None):
plugin.PluginProto.plugin_init(self, enableplugin)
self.uservar[0] = 0
if self.enabled:
try:
try:
i2cl = self.i2c
except:
i2cl = (- 1)
self.i2cbus = gpios.HWPorts.i2c_init(i2cl)
if (i2cl == (- 1)):
self.i2cbus = gpios.HWPorts.i2cbus
if (self.i2cbus is not None):
if (self.interval > 2):
nextr = (self.interval - 2)
else:
nextr = self.interval
self._lastdataservetime = (rpieTime.millis() - (nextr * 1000))
self.lastread = 0
else:
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, 'I2C can not be initialized!')
self.initialized = False
except Exception as e:
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, str(e))
self.initialized = False
def webform_load(self):
choice1 = self.taskdevicepluginconfig[0]
options = ['0x44', '0x45']
optionvalues = [68, 69]
webserver.addFormSelector('I2C address', 'plugin_68_addr', len(options), options, optionvalues, None, int(choice1))
webserver.addFormNote("Enable <a href='pinout'>I2C bus</a> first, than <a href='i2cscanner'>search for the used address</a>!")
return True
def webform_save(self, params):
par = webserver.arg('plugin_68_addr', params)
if (par == ''):
par = 68
self.taskdevicepluginconfig[0] = int(par)
return True
def plugin_read(self):
result = False
if (self.enabled and self.initialized and (self.readinprogress == 0)):
self.readinprogress = 1
try:
(temp, hum) = self.read_sht30()
except Exception as e:
temp = None
hum = None
misc.addLog(rpieGlobals.LOG_LEVEL_ERROR, ('SHT30: ' + str(e)))
if (temp is not None):
self.set_value(1, temp, False)
self.set_value(2, hum, False)
self.plugin_senddata()
self._lastdataservetime = rpieTime.millis()
result = True
self.readinprogress = 0
return result
def read_sht30(self):
if self.initialized:
try:
bus = self.i2cbus
except:
self.i2cbus = None
return (None, None)
temp = None
hum = None
try:
bus.write_i2c_block_data(int(self.taskdevicepluginconfig[0]), 44, [6])
time.sleep(0.1)
data = bus.read_i2c_block_data(int(self.taskdevicepluginconfig[0]), 0, 6)
temp = (((((data[0] * 256.0) + data[1]) * 175) / 65535.0) - 45)
hum = ((100 * ((data[3] * 256) + data[4])) / 65535.0)
except:
return (None, None)
return (temp, hum) |
def _nonoverlapping_groups(table, bp: int):
gap_sizes = (table.start.values[1:] - table.end.cummax().values[:(- 1)])
group_keys = np.r_[(False, (gap_sizes > (- bp)))].cumsum()
keyed_groups = zip(group_keys, table.itertuples(index=False))
return (row_group for (_key, row_group) in itertools.groupby(keyed_groups, first_of)) |
def test_associate_asset():
s = AssetStore()
with raises(TypeError):
s.associate_asset('module.name1', 'foo.js')
s.associate_asset('module.name1', 'foo.js', 'xxx')
assert (s.get_asset('foo.js').to_string() == 'xxx')
s.associate_asset('module.name2', 'foo.js')
with raises(TypeError):
s.associate_asset('module.name2', 'foo.js', 'zzz')
s.associate_asset('module.name2', 'bar.js', 'yyy')
assert (s.get_associated_assets('module.name1') == ('foo.js',))
assert (s.get_associated_assets('module.name2') == ('foo.js', 'bar.js')) |
def extractTorchandkeytranslationWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class ProphetInputlet(Inputlet):
def __init__(self, initiator: Any, cards: List[Card]):
self.initiator = initiator
self.cards = cards
self.upcards: List[Card] = []
self.downcards: List[Card] = []
def parse(self, data):
try:
check_type(([[int, ...]] * 2), data)
upcards = data[0]
downcards = data[1]
check((sorted((upcards + downcards)) == list(range(len(self.cards)))))
except CheckFailed:
return [self.cards, []]
cards = self.cards
upcards = [cards[i] for i in upcards]
downcards = [cards[i] for i in downcards]
return [upcards, downcards]
def data(self):
cards = self.cards
upcards = self.upcards
downcards = self.downcards
if (not (set(cards) == set((upcards + downcards)))):
return [list(range(len(self.cards))), []]
upcards = [cards.index(c) for c in upcards]
downcards = [cards.index(c) for c in downcards]
return [upcards, downcards]
def set_result(self, upcards, downcards):
assert (set(self.cards) == set((upcards + downcards)))
self.upcards = upcards
self.downcards = downcards
def set_result_sid(self, upcards, downcards):
g: Any = self.game
upcards = [g.deck.lookup(i) for i in upcards]
downcards = [g.deck.lookup(i) for i in downcards]
self.set_result(upcards, downcards) |
def enableFlatpak():
home = os.getenv('HOME')
flatpak_discord_path = os.path.isdir(('%s/.var/app/com.discordapp.Discord' % home))
package_path = os.path.isfile('/usr/bin/discord')
manual_install_path = os.path.isdir('/opt/Discord')
if (flatpak_discord_path and (not package_path) and (not manual_install_path)):
XDG_Symlink(home) |
class WafActiveRulesResponseAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'data': ([WafActiveRuleResponseData],), 'included': (IncludedWithWafActiveRule,)}
_property
def discriminator():
return None
attribute_map = {'data': 'data', 'included': 'included'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def get_opensearch(host: str='opensearch') -> OpenSearch:
port = 9200
auth = ('admin', 'admin')
client = OpenSearch(hosts=[{'host': host, 'port': port}], connection_class=RequestsHttpConnection, use_ssl=True, verify_certs=False, ssl_assert_hostname=False, ssl_show_warn=False)
return client |
class OptionSeriesPictorialSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def use_class(name=None, pos=unimplemented_parameter, varargs=unimplemented_parameter, named=unimplemented_parameter, varkwargs=unimplemented_parameter, kwargs={}):
conv = partial(_use_class, pos, varargs, named, varkwargs, kwargs)
if (not name):
warnings.warn('Nameless parameter converter. Please specify the name argument when calling use_class', RuntimeWarning, stacklevel=3)
name = repr(conv)
return parameter_converter(conv, name=name) |
def timestamp(filename, source='auto'):
if (source == 'auto'):
try:
return exif_timestamp(filename)
except exceptions.ICExifReadError:
return stat_timestamp(filename)
elif (source == 'stat'):
return stat_timestamp(filename)
elif (source == 'exif'):
return exif_timestamp(filename)
else:
raise ValueError("source not in ['stat', 'exif', 'auto']") |
.parametrize('initials, completed, any_failed, failures', [([True], [False], True, [True]), ([False], [False], False, [False]), ([False, True], [True, False], True, [False, True]), ([False, True], [False, True], False, [False, False]), ([False, False], [False, False], False, [False, False]), ([False, False], [True, True], False, [False, False]), ([True, True], [False, True], True, [True, False]), ([False, False], [], True, [True, True])])
def test_failed_realizations(initials, completed, any_failed, failures, base_arguments):
BaseRunModel.validate = MagicMock()
brm = BaseRunModel(base_arguments, MagicMock(), None, None, None, None)
brm._initial_realizations_mask = initials
brm._completed_realizations_mask = completed
assert (brm._create_mask_from_failed_realizations() == failures)
assert (brm._count_successful_realizations() == sum(completed))
assert (brm.has_failed_realizations() == any_failed) |
class RecurrentAgent(Agent):
def __init__(self, model=None, n_actions=None):
super().__init__()
self.model = model
self.n_actions = n_actions
def update(self, state_dict):
self.model.load_state_dict(state_dict)
def __call__(self, state, observation, agent_info=None, history=None):
initial_state = observation['initial_state']
B = observation.n_elems()
if (agent_info is None):
agent_info = DictTensor({'stochastic': torch.tensor([True]).repeat(B)})
agent_initial = self.model.initial_state(B)
if (state is None):
state = DictTensor({'agent_state': agent_initial, 'agent_step': torch.zeros(B).long()})
else:
istate = DictTensor({'agent_state': agent_initial, 'agent_step': torch.zeros(B).long()})
state = masked_dicttensor(state, istate, initial_state)
(new_z, action_proba) = self.model(state['agent_state'], observation['frame'])
dist = torch.distributions.Categorical(action_proba)
action_sampled = dist.sample()
action_max = action_proba.max(1)[1]
smask = agent_info['stochastic'].float()
action = masked_tensor(action_max, action_sampled, agent_info['stochastic'])
new_state = DictTensor({'agent_state': new_z, 'agent_step': (state['agent_step'] + 1)})
agent_do = DictTensor({'action': action, 'action_probabilities': action_proba})
return (state, agent_do, new_state) |
def validate_web_app_data(token: str, raw_init_data: str):
try:
parsed_data = dict(parse_qsl(raw_init_data))
except ValueError:
return False
if ('hash' not in parsed_data):
return False
init_data_hash = parsed_data.pop('hash')
data_check_string = '\n'.join((f'{key}={value}' for (key, value) in sorted(parsed_data.items())))
secret_key = hmac.new(key=b'WebAppData', msg=token.encode(), digestmod=sha256)
return (hmac.new(secret_key.digest(), data_check_string.encode(), sha256).hexdigest() == init_data_hash) |
class MemoryOpTransformationTestCase(unittest.TestCase):
BATCH_SIZE = 1024
M = 10
N = 128
USE_DYNAMIC_BATCH = False
def __init__(self, *args, **kwargs):
super(MemoryOpTransformationTestCase, self).__init__(*args, **kwargs)
self.test_count = 0
def _prepare_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
X1 = ops.concatenate()([X0], dim=1)
X2 = ops.concatenate()([X1], dim=2)
X3 = ops.concatenate()([X2, X1], dim=1)
X3._attrs['name'] = 'output0'
X3._attrs['is_output'] = True
return X3
def test_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 4)
graph = transform.transform_memory_ops(graph)
self.assertEqual(len(graph), 2)
(['float16', 'float'])
def test_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
out_pt = torch.cat([x0_pt, x0_pt], dim=1)
out = get_torch_empty_tensor(out_pt.size(), dtype)
module.run_with_tensors([x0_pt], [out])
self.assertTrue(torch.allclose(out_pt, out, atol=0.1, rtol=0.01))
def _prepare_split_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
[X1, X2] = ops.split()(X0, int((self.M / 2)), dim=1)
X3 = ops.concatenate()([X1, X2], dim=1)
[X4, X5] = ops.split()(X3, int((self.N / 2)), dim=2)
X6 = ops.concatenate()([X4, X5], dim=1)
X6._attrs['name'] = 'output0'
X6._attrs['is_output'] = True
Y0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch1') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input1', is_input=True)
Y1 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch2') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input2', is_input=True)
[Y2, Y3] = ops.split()(Y0, int((self.M / 2)), dim=1)
Y4 = ops.concatenate()([Y1, Y2, Y3, Y0], dim=1)
Y4._attrs['name'] = 'output1'
Y4._attrs['is_output'] = True
return [X6, Y4]
def test_split_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_split_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 12)
graph = transform.transform_memory_ops(graph)
self.assertEqual(len(graph), 7)
(['float16', 'float'])
def test_split_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_split_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'split_cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
(x4_pt, x5_pt) = torch.split(x0_pt, int((self.N / 2)), dim=2)
out_pt0 = torch.cat([x4_pt, x5_pt], dim=1)
y0_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
y1_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
out_pt1 = torch.cat([y1_pt, y0_pt, y0_pt], dim=1)
out0 = get_torch_empty_tensor(out_pt0.size(), dtype)
out1 = get_torch_empty_tensor(out_pt1.size(), dtype)
module.run_with_tensors({'input0': x0_pt, 'input1': y0_pt, 'input2': y1_pt}, {'output0': out0, 'output1': out1})
self.assertTrue(torch.allclose(out_pt0, out0, atol=0.1, rtol=0.01))
self.assertTrue(torch.allclose(out_pt1, out1, atol=0.1, rtol=0.01))
def _prepare_cat_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=int((self.M / 2))), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
X1 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch1') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=int((self.M / 2))), IntImm(value=self.N)], dtype=dtype, name='input1', is_input=True)
X2 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch2') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=(self.N + 4))], dtype=dtype, name='input2', is_input=True)
X3 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch3') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=(self.N * 2))], dtype=dtype, name='input3', is_input=True)
X5 = ops.concatenate()([X0, X1], dim=1)
X6 = ops.concatenate()([X5, X2], dim=2)
X7 = ops.concatenate()([X3, X6], dim=2)
X8 = ops.concatenate()([X7, X2], dim=2)
X8._attrs['name'] = 'output0'
X8._attrs['is_output'] = True
return [X8]
def test_cat_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_cat_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 8)
self.assertEqual(len(graph_utils.get_sorted_ops(graph)), 4)
graph = transform.transform_memory_ops(graph)
self.assertEqual(len(graph), 6)
self.assertEqual(len(graph_utils.get_sorted_ops(graph)), 2)
(['float16', 'float'])
def test_cat_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_cat_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'cat_cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, int((self.M / 2)), self.N], dtype)
x1_pt = get_random_torch_tensor([self.BATCH_SIZE, int((self.M / 2)), self.N], dtype)
x2_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, (self.N + 4)], dtype)
x3_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, (self.N * 2)], dtype)
x5_pt = torch.cat([x0_pt, x1_pt], dim=1)
out_pt0 = torch.cat([x3_pt, x5_pt, x2_pt, x2_pt], dim=2)
out0 = get_torch_empty_tensor(out_pt0.size(), dtype)
module.run_with_tensors({'input0': x0_pt, 'input1': x1_pt, 'input2': x2_pt, 'input3': x3_pt}, [out0])
self.assertTrue(torch.allclose(out_pt0, out0, atol=0.1, rtol=0.01))
def _prepare_skip_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
X1 = ops.concatenate()([X0], dim=1)
X2 = ops.concatenate()([X1], dim=2)
X3 = ops.concatenate()([X2, X1], dim=1)
X1._attrs['name'] = 'output0'
X1._attrs['is_output'] = True
X3._attrs['name'] = 'output1'
X3._attrs['is_output'] = True
return (X1, X3)
def test_skip_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_skip_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 4)
graph = transform.transform_memory_ops(graph)
print(graph)
self.assertEqual(len(graph), 3)
(['float16', 'float'])
def test_skip_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_skip_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'skip_cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
out0_pt = torch.cat([x0_pt], dim=1)
out1_pt = torch.cat([x0_pt, x0_pt], dim=1)
out0 = get_torch_empty_tensor(out0_pt.size(), dtype)
out1 = get_torch_empty_tensor(out1_pt.size(), dtype)
module.run_with_tensors([x0_pt], [out0, out1])
self.assertTrue(torch.allclose(out0_pt, out0, atol=0.1, rtol=0.01))
self.assertTrue(torch.allclose(out1_pt, out1, atol=0.1, rtol=0.01))
def _prepare_skip_split_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
[X1, X2] = ops.split()(X0, int((self.M / 2)), dim=1)
X3 = ops.concatenate()([X1, X2], dim=1)
[X4, X5] = ops.split()(X3, int((self.N / 2)), dim=2)
X6 = ops.concatenate()([X4, X5], dim=1)
X3._attrs['name'] = 'output0'
X3._attrs['is_output'] = True
X6._attrs['name'] = 'output1'
X6._attrs['is_output'] = True
return [X3, X6]
def test_skip_split_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_skip_split_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 7)
graph = transform.transform_memory_ops(graph)
self.assertEqual(len(graph), 5)
(['float16'])
def test_skip_split_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_skip_split_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'skip_split_cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, self.N], dtype)
out_pt0 = x0_pt
(x4_pt, x5_pt) = torch.split(x0_pt, int((self.N / 2)), dim=2)
out_pt1 = torch.cat([x4_pt, x5_pt], dim=1)
out0 = get_torch_empty_tensor(out_pt0.size(), dtype)
out1 = get_torch_empty_tensor(out_pt1.size(), dtype)
module.run_with_tensors({'input0': x0_pt}, {'output0': out0, 'output1': out1})
self.assertTrue(torch.allclose(out_pt0, out0, atol=0.1, rtol=0.01))
self.assertTrue(torch.allclose(out_pt1, out1, atol=0.1, rtol=0.01))
def _prepare_skip_cat_cat_elimination_graph(self, dtype='float16'):
X0 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch0') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=int((self.M / 2))), IntImm(value=self.N)], dtype=dtype, name='input0', is_input=True)
X1 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch1') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=int((self.M / 2))), IntImm(value=self.N)], dtype=dtype, name='input1', is_input=True)
X2 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch2') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=(self.N + 4))], dtype=dtype, name='input2', is_input=True)
X3 = Tensor(shape=[(IntVar(values=[1, self.BATCH_SIZE], name='input_batch3') if self.USE_DYNAMIC_BATCH else IntImm(value=self.BATCH_SIZE)), IntImm(value=self.M), IntImm(value=(self.N * 2))], dtype=dtype, name='input3', is_input=True)
X5 = ops.concatenate()([X0, X1], dim=1)
X6 = ops.concatenate()([X5, X2], dim=2)
X7 = ops.concatenate()([X3, X6], dim=2)
X8 = ops.concatenate()([X7, X2], dim=2)
X6._attrs['name'] = 'output0'
X6._attrs['is_output'] = True
X8._attrs['name'] = 'output1'
X8._attrs['is_output'] = True
return [X6, X8]
def test_skip_cat_cat_elimination_graph_transformation(self):
OUTPUT = self._prepare_skip_cat_cat_elimination_graph()
graph = transform.toposort(OUTPUT)
transform.name_graph(graph)
transform.mark_param_tensor(graph)
self.assertEqual(len(graph), 8)
self.assertEqual(len(graph_utils.get_sorted_ops(graph)), 4)
graph = transform.transform_memory_ops(graph)
self.assertEqual(len(graph), 7)
self.assertEqual(len(graph_utils.get_sorted_ops(graph)), 3)
(['float16', 'float'])
def test_skip_cat_cat_elimination_e2e(self, dtype):
target = detect_target()
if ((dtype == 'float') and (target.name == 'rocm')):
self.skipTest('float tensors not supported by ROCM')
OUTPUT = self._prepare_skip_cat_cat_elimination_graph(dtype)
module = compile_model(OUTPUT, target, './tmp', f'skip_cat_cat_elimination_{dtype}')
x0_pt = get_random_torch_tensor([self.BATCH_SIZE, int((self.M / 2)), self.N], dtype)
x1_pt = get_random_torch_tensor([self.BATCH_SIZE, int((self.M / 2)), self.N], dtype)
x2_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, (self.N + 4)], dtype)
x3_pt = get_random_torch_tensor([self.BATCH_SIZE, self.M, (self.N * 2)], dtype)
x5_pt = torch.cat([x0_pt, x1_pt], dim=1)
out_pt0 = torch.cat([x5_pt, x2_pt], dim=2)
out_pt1 = torch.cat([x3_pt, x5_pt, x2_pt, x2_pt], dim=2)
out0 = get_torch_empty_tensor(out_pt0.size(), dtype)
out1 = get_torch_empty_tensor(out_pt1.size(), dtype)
module.run_with_tensors({'input0': x0_pt, 'input1': x1_pt, 'input2': x2_pt, 'input3': x3_pt}, [out0, out1])
self.assertTrue(torch.allclose(out_pt0, out0, atol=0.1, rtol=0.01))
self.assertTrue(torch.allclose(out_pt1, out1, atol=0.1, rtol=0.01))
def _test_fuse_strided_cat_cat(self, M0, M1, N, test_name, dtype='float16'):
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
X0 = Tensor(shape=[batch_dim, IntImm(M0), IntImm(N)], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm(M1), IntImm(N)], dtype=dtype, name='x1', is_input=True)
M2 = (M0 + M1)
X2 = Tensor(shape=[batch_dim, IntImm(M2), IntImm(N)], dtype=dtype, name='x2', is_input=True)
cat_dim = 1
concat_0 = ops.concatenate()([X0, X1], dim=cat_dim)
add_1 = ops.elementwise(FuncEnum.ADD)(concat_0, X2)
concat_2 = ops.concatenate()([X0, concat_0], dim=cat_dim)
reduce_dim = cat_dim
reduce_3 = ops.reduce_sum(reduce_dim)(add_1)
reduce_4 = ops.reduce_sum(reduce_dim)(concat_2)
Y = ops.elementwise(FuncEnum.ADD)(reduce_3, reduce_4)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
dll_name = f'test_{self.test_count}.so'
module = compile_model([Y], target, './tmp', test_name, dll_name=dll_name)
self.test_count += 1
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 5)
concat_cnt = 0
for sorted_op in sorted_ops:
op_type = sorted_op._attrs['op']
self.assertTrue((op_type != 'dynamic_slice'))
if (op_type == 'concatenate'):
concat_cnt += 1
self.assertEqual(concat_cnt, 1)
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, M0, N], dtype)
x1_pt = get_random_torch_tensor([batch, M1, N], dtype)
x2_pt = get_random_torch_tensor([batch, M2, N], dtype)
concat_0_pt = torch.cat([x0_pt, x1_pt], dim=cat_dim)
add_1_pt = (concat_0_pt + x2_pt)
concat_2_pt = torch.cat([x0_pt, concat_0_pt], dim=cat_dim)
reduce_3_pt = torch.sum(add_1_pt, reduce_dim)
reduce_4_pt = torch.sum(concat_2_pt, reduce_dim)
y_pt = (reduce_3_pt + reduce_4_pt)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt, 'x2': x2_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.1, rtol=0.1)
def test_fuse_strided_cat_cat(self):
self._test_fuse_strided_cat_cat(M0=3, M1=4, N=9, test_name='test_fuse_strided_cat_cat')
self._test_fuse_strided_cat_cat(M0=2, M1=4, N=8, test_name='test_fuse_strided_cat_cat')
def _test_fuse_strided_cat_reshape_cat(self, M0, M1, M3, N, test_name, dtype='float16'):
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
X0 = Tensor(shape=[batch_dim, IntImm(M0), IntImm(N)], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm(M1), IntImm(N)], dtype=dtype, name='x1', is_input=True)
M2 = (M0 + M1)
X2 = Tensor(shape=[batch_dim, IntImm((M2 * N))], dtype=dtype, name='x2', is_input=True)
X3 = Tensor(shape=[batch_dim, IntImm((M3 * N))], dtype=dtype, name='x3', is_input=True)
cat_dim = 1
concat_0 = ops.concatenate()([X0, X1], dim=cat_dim)
reshape_to_shape_1 = [(- 1), (M2 * N)]
reshape_1 = ops.reshape()(concat_0, reshape_to_shape_1)
add_2 = ops.elementwise(FuncEnum.ADD)(reshape_1, X2)
concat_3 = ops.concatenate()([X3, reshape_1], dim=cat_dim)
reduce_dim = cat_dim
reduce_4 = ops.reduce_sum(reduce_dim)(add_2)
reduce_5 = ops.reduce_sum(reduce_dim)(concat_3)
Y = ops.elementwise(FuncEnum.ADD)(reduce_4, reduce_5)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
dll_name = f'test_{self.test_count}.so'
module = compile_model([Y], target, './tmp', test_name, dll_name=dll_name)
self.test_count += 1
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 5)
concat_cnt = 0
for sorted_op in sorted_ops:
op_type = sorted_op._attrs['op']
self.assertTrue((op_type != 'dynamic_slice'))
if (op_type == 'concatenate'):
concat_cnt += 1
self.assertEqual(concat_cnt, 1)
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, M0, N], dtype)
x1_pt = get_random_torch_tensor([batch, M1, N], dtype)
x2_pt = get_random_torch_tensor([batch, (M2 * N)], dtype)
x3_pt = get_random_torch_tensor([batch, (M3 * N)], dtype)
concat_0_pt = torch.cat([x0_pt, x1_pt], dim=cat_dim)
reshape_1_pt = torch.reshape(concat_0_pt, reshape_to_shape_1)
add_2_pt = (reshape_1_pt + x2_pt)
concat_3_pt = torch.cat([x3_pt, reshape_1_pt], dim=cat_dim)
reduce_4_pt = torch.sum(add_2_pt, reduce_dim)
reduce_5_pt = torch.sum(concat_3_pt, reduce_dim)
y_pt = (reduce_4_pt + reduce_5_pt)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt, 'x2': x2_pt, 'x3': x3_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.1, rtol=0.1)
def test_fuse_strided_cat_reshape_cat(self):
self._test_fuse_strided_cat_reshape_cat(M0=2, M1=4, M3=3, N=8, test_name='test_fuse_strided_cat_reshape_cat')
def _test_fuse_strided_cat_reshape_cat_2(self, M0, M1, M2, M3, N, test_name, dtype='float16'):
assert (M0 == M1), f'expected M0={M0!r} to be equal to M1={M1!r}'
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
X0 = Tensor(shape=[batch_dim, IntImm((M0 * N))], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm((M1 * N))], dtype=dtype, name='x1', is_input=True)
X2 = Tensor(shape=[batch_dim, IntImm((M2 * N))], dtype=dtype, name='x2', is_input=True)
X3 = Tensor(shape=[batch_dim, IntImm(M3), IntImm(N)], dtype=dtype, name='x3', is_input=True)
M4 = (M0 + M2)
X4 = Tensor(shape=[batch_dim, IntImm(M4), IntImm(N)], dtype=dtype, name='x4', is_input=True)
cat_dim = 1
add_0 = ops.elementwise(FuncEnum.ADD)(X0, X1)
concat_1 = ops.concatenate()([add_0, X2], dim=cat_dim)
reshape_2 = ops.reshape()(concat_1, [(- 1), (M0 + M2), N])
add_3 = ops.elementwise(FuncEnum.ADD)(reshape_2, X4)
concat_4 = ops.concatenate()([X3, reshape_2, X3], dim=cat_dim)
reshape_to_shape_5 = (sum([t.shape()[cat_dim].value() for t in [X3, reshape_2, X3]]) * N)
reshape_5 = ops.reshape()(concat_4, [(- 1), reshape_to_shape_5])
X6 = Tensor(shape=[batch_dim, IntImm(reshape_to_shape_5)], dtype=dtype, name='x6', is_input=True)
add_6 = ops.elementwise(FuncEnum.ADD)(reshape_5, X6)
concat_7 = ops.concatenate()([X0, reshape_5, X0], dim=cat_dim)
reshape_8 = ops.reshape()(add_3, [(- 1), ((M0 + M2) * N)])
reduce_dim = cat_dim
reduce_9 = ops.reduce_sum(reduce_dim)(reshape_8)
reduce_10 = ops.reduce_sum(reduce_dim)(add_6)
reduce_11 = ops.reduce_sum(reduce_dim)(concat_7)
add_12 = ops.elementwise(FuncEnum.ADD)(reduce_9, reduce_10)
Y = ops.elementwise(FuncEnum.ADD)(add_12, reduce_11)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
dll_name = f'test_{self.test_count}.so'
module = compile_model([Y], target, './tmp', test_name, dll_name=dll_name)
self.test_count += 1
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 8)
concat_cnt = 0
for sorted_op in sorted_ops:
op_type = sorted_op._attrs['op']
self.assertTrue((op_type != 'dynamic_slice'))
if (op_type == 'concatenate'):
concat_cnt += 1
self.assertEqual(concat_cnt, 1)
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, (M0 * N)], dtype)
x1_pt = get_random_torch_tensor([batch, (M1 * N)], dtype)
x2_pt = get_random_torch_tensor([batch, (M2 * N)], dtype)
x3_pt = get_random_torch_tensor([batch, M3, N], dtype)
x4_pt = get_random_torch_tensor([batch, M4, N], dtype)
x6_pt = get_random_torch_tensor([batch, reshape_to_shape_5], dtype)
add_0_pt = (x0_pt + x1_pt)
concat_1_pt = torch.cat([add_0_pt, x2_pt], dim=cat_dim)
reshape_2_pt = torch.reshape(concat_1_pt, [(- 1), (M0 + M2), N])
add_3_pt = (reshape_2_pt + x4_pt)
concat_4_pt = torch.cat([x3_pt, reshape_2_pt, x3_pt], dim=cat_dim)
reshape_5_pt = torch.reshape(concat_4_pt, [(- 1), reshape_to_shape_5])
add_6_pt = (reshape_5_pt + x6_pt)
concat_7_pt = torch.cat([x0_pt, reshape_5_pt, x0_pt], dim=cat_dim)
reshape_8_pt = torch.reshape(add_3_pt, [(- 1), ((M0 + M2) * N)])
reduce_9_pt = torch.sum(reshape_8_pt, reduce_dim)
reduce_10_pt = torch.sum(add_6_pt, reduce_dim)
reduce_11_pt = torch.sum(concat_7_pt, reduce_dim)
add_12_pt = (reduce_9_pt + reduce_10_pt)
y_pt = (add_12_pt + reduce_11_pt)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt, 'x2': x2_pt, 'x3': x3_pt, 'x4': x4_pt, 'x6': x6_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.1, rtol=0.1)
def test_fuse_strided_cat_reshape_cat_2(self):
self._test_fuse_strided_cat_reshape_cat_2(M0=2, M1=2, M2=2, M3=1, N=2, test_name='test_fuse_strided_cat_reshape_cat_2')
def _test_fuse_strided_cat_reshape_cat_3(self, M0, M1, M2, M3, N, test_name, dtype='float16'):
assert (M0 == M1), f'expected M0={M0!r} to be equal to M1={M1!r}'
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
X0 = Tensor(shape=[batch_dim, IntImm((M0 * N))], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm((M1 * N))], dtype=dtype, name='x1', is_input=True)
X2 = Tensor(shape=[batch_dim, IntImm((M2 * N))], dtype=dtype, name='x2', is_input=True)
X3 = Tensor(shape=[batch_dim, IntImm(M3), IntImm(N)], dtype=dtype, name='x3', is_input=True)
M4 = (M0 + M2)
X4 = Tensor(shape=[batch_dim, IntImm(M4), IntImm(N)], dtype=dtype, name='x4', is_input=True)
cat_dim = 1
add_0 = ops.elementwise(FuncEnum.ADD)(X0, X1)
concat_1 = ops.concatenate()([add_0, X2], dim=cat_dim)
reshape_2 = ops.reshape()(concat_1, [(- 1), (M0 + M2), N])
add_3 = ops.elementwise(FuncEnum.ADD)(reshape_2, X4)
concat_4 = ops.concatenate()([X3, reshape_2, X3], dim=cat_dim)
reshape_to_shape_5 = (sum([t.shape()[cat_dim].value() for t in [X3, reshape_2, X3]]) * N)
reshape_5 = ops.reshape()(concat_4, [(- 1), reshape_to_shape_5])
X6 = Tensor(shape=[batch_dim, IntImm(reshape_to_shape_5)], dtype=dtype, name='x6', is_input=True)
add_6 = ops.elementwise(FuncEnum.ADD)(reshape_5, X6)
concat_7 = ops.concatenate()([X0, reshape_5, X0], dim=cat_dim)
reshape_8 = ops.reshape()(add_3, [(- 1), ((M0 + M2) * N)])
reduce_dim = cat_dim
reduce_9 = ops.reduce_sum(reduce_dim)(reshape_8)
reduce_10 = ops.reduce_sum(reduce_dim)(add_6)
reduce_11 = ops.reduce_sum(reduce_dim)(concat_7)
add_12 = ops.elementwise(FuncEnum.ADD)(reduce_9, reduce_10)
Y = ops.elementwise(FuncEnum.ADD)(add_12, reduce_11)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
dll_name = f'test_{self.test_count}.so'
module = compile_model([Y], target, './tmp', test_name, dll_name=dll_name)
self.test_count += 1
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 8)
concat_cnt = 0
for sorted_op in sorted_ops:
op_type = sorted_op._attrs['op']
self.assertTrue((op_type != 'dynamic_slice'))
if (op_type == 'concatenate'):
concat_cnt += 1
self.assertEqual(concat_cnt, 1)
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, (M0 * N)], dtype)
x1_pt = get_random_torch_tensor([batch, (M1 * N)], dtype)
x2_pt = get_random_torch_tensor([batch, (M2 * N)], dtype)
x3_pt = get_random_torch_tensor([batch, M3, N], dtype)
x4_pt = get_random_torch_tensor([batch, M4, N], dtype)
x6_pt = get_random_torch_tensor([batch, reshape_to_shape_5], dtype)
add_0_pt = (x0_pt + x1_pt)
concat_1_pt = torch.cat([add_0_pt, x2_pt], dim=cat_dim)
reshape_2_pt = torch.reshape(concat_1_pt, [(- 1), (M0 + M2), N])
add_3_pt = (reshape_2_pt + x4_pt)
concat_4_pt = torch.cat([x3_pt, reshape_2_pt, x3_pt], dim=cat_dim)
reshape_5_pt = torch.reshape(concat_4_pt, [(- 1), reshape_to_shape_5])
add_6_pt = (reshape_5_pt + x6_pt)
concat_7_pt = torch.cat([x0_pt, reshape_5_pt, x0_pt], dim=cat_dim)
reshape_8_pt = torch.reshape(add_3_pt, [(- 1), ((M0 + M2) * N)])
reduce_9_pt = torch.sum(reshape_8_pt, reduce_dim)
reduce_10_pt = torch.sum(add_6_pt, reduce_dim)
reduce_11_pt = torch.sum(concat_7_pt, reduce_dim)
add_12_pt = (reduce_9_pt + reduce_10_pt)
y_pt = (add_12_pt + reduce_11_pt)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt, 'x2': x2_pt, 'x3': x3_pt, 'x4': x4_pt, 'x6': x6_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.1, rtol=0.1)
def test_fuse_strided_cat_reshape_cat_3(self):
self._test_fuse_strided_cat_reshape_cat_3(M0=2, M1=2, M2=2, M3=1, N=2, test_name='test_fuse_strided_cat_reshape_cat_3')
def _test_non_fusible_strided_cat_cat(self, M0, N, test_name, dtype='float16'):
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
X0 = Tensor(shape=[batch_dim, IntImm(M0), IntImm(N)], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm(M0), IntImm(N)], dtype=dtype, name='x1', is_input=True)
X2 = Tensor(shape=[batch_dim, IntImm((M0 + M0)), IntImm(N)], dtype=dtype, name='x2', is_input=True)
cat_dim = 1
concat_0 = ops.concatenate()([X0, X1], dim=cat_dim)
add_1 = ops.elementwise(FuncEnum.ADD)(concat_0, X2)
Y = ops.concatenate()([concat_0, add_1], dim=cat_dim)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
dll_name = f'test_{self.test_count}.so'
module = compile_model([Y], target, './tmp', test_name, dll_name=dll_name)
self.test_count += 1
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 3)
concat_cnt = 0
output_cat = None
for sorted_op in sorted_ops:
op_type = sorted_op._attrs['op']
if (op_type == 'concatenate'):
concat_cnt += 1
if (sorted_op._attrs['outputs'][0] == Y):
output_cat = sorted_op
self.assertEqual(concat_cnt, 2)
self.assertEqual(output_cat._attrs['input_masks'], [True, False])
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, M0, N], dtype)
x1_pt = get_random_torch_tensor([batch, M0, N], dtype)
x2_pt = get_random_torch_tensor([batch, (M0 + M0), N], dtype)
concat_0_pt = torch.cat([x0_pt, x1_pt], dim=cat_dim)
add_1_pt = (concat_0_pt + x2_pt)
y_pt = torch.cat([concat_0_pt, add_1_pt], dim=cat_dim)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt, 'x2': x2_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.1, rtol=0.1)
def test_non_fusible_strided_cat_cat(self):
self._test_non_fusible_strided_cat_cat(M0=2, N=8, test_name='test_non_fusible_strided_cat_cat')
def _test_non_fusible_split_reshape_cat(self, M, test_name, dtype='float16'):
batch_sizes = [1, self.BATCH_SIZE]
batch_dim = shape_utils.gen_int_var_min_max(batch_sizes, 'batch_0')
assert ((M % 2) == 0), f'expected M={M!r} % 2 == 0'
X0 = Tensor(shape=[batch_dim, IntImm(M)], dtype=dtype, name='x0', is_input=True)
X1 = Tensor(shape=[batch_dim, IntImm(2), IntImm((M // 2))], dtype=dtype, name='x1', is_input=True)
dim = 1
(split_0, split_1) = ops.split()(X0, [(M // 2), (M // 2)], dim=dim)
unsqueeze_2 = ops.unsqueeze(dim=dim)(split_0)
unsqueeze_3 = ops.unsqueeze(dim=dim)(split_1)
add_4 = ops.elementwise(FuncEnum.ADD)(X1, X1)
Y = ops.concatenate()([unsqueeze_2, unsqueeze_3, add_4], dim=dim)
Y._attrs['name'] = 'output0'
Y._attrs['is_output'] = True
target = detect_target()
module = compile_model(Y, target, './tmp', test_name)
sorted_graph = module.debug_sorted_graph
sorted_ops = graph_utils.get_sorted_ops(sorted_graph)
self.assertEqual(len(sorted_ops), 2)
for batch in [1, self.BATCH_SIZE]:
x0_pt = get_random_torch_tensor([batch, M], dtype)
x1_pt = get_random_torch_tensor([batch, 2, (M // 2)], dtype)
(split_0_pt, split_1_pt) = torch.split(x0_pt, [(M // 2), (M // 2)], dim=dim)
unsqueeze_2_pt = torch.unsqueeze(split_0_pt, dim)
unsqueeze_3_pt = torch.unsqueeze(split_1_pt, dim)
add_4_pt = (x1_pt + x1_pt)
y_pt = torch.cat([unsqueeze_2_pt, unsqueeze_3_pt, add_4_pt], dim=dim)
y = get_torch_empty_tensor(y_pt.size(), dtype)
inputs = {'x0': x0_pt, 'x1': x1_pt}
outputs = [y]
module.run_with_tensors(inputs, outputs)
torch.testing.assert_close(y_pt, y, atol=0.01, rtol=0.01)
def test_non_fusible_split_reshape_cat(self):
self._test_non_fusible_split_reshape_cat(M=32, test_name='test_non_fusible_split_reshape_cat') |
class OptionSeriesTreemapSonificationTracksMappingNoteduration(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsScatterDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
def test_load_titanic():
data = load_titanic()
variables = ['pclass', 'survived', 'name', 'sex', 'age', 'sibsp', 'parch', 'ticket', 'fare', 'cabin', 'embarked', 'boat', 'body', 'home.dest']
assert isinstance(data, DataFrame)
assert (data.shape == (1309, 14))
assert (list(data.columns) == variables) |
def distance_geodetic(point_p, point_q, ellipsoid):
(longitude, latitude, height) = point_p[:]
(longitude_p, latitude_p, height_p) = point_q[:]
(longitude, latitude) = (np.radians(longitude), np.radians(latitude))
(longitude_p, latitude_p) = (np.radians(longitude_p), np.radians(latitude_p))
cosphi = np.cos(latitude)
sinphi = np.sin(latitude)
cosphi_p = np.cos(latitude_p)
sinphi_p = np.sin(latitude_p)
coslambda = np.cos((longitude_p - longitude))
prime_vertical_radius = ellipsoid.prime_vertical_radius(sinphi)
prime_vertical_radius_p = ellipsoid.prime_vertical_radius(sinphi_p)
return geodetic_distance_core(cosphi, sinphi, height, cosphi_p, sinphi_p, height_p, coslambda, prime_vertical_radius, prime_vertical_radius_p, (ellipsoid.first_eccentricity ** 2)) |
def partition_files(staging, extension_destinations):
source_dir = staging.path()
source_dir = source_dir.rstrip('/')
for (dirpath, _dirnames, filenames) in os.walk(staging.path()):
relative_dirname = dirpath[(len(source_dir) + 1):]
if (not relative_dirname):
relative_depth = 1
else:
relative_depth = (2 + relative_dirname.count('/'))
for filename in filenames:
(_, extension) = os.path.splitext(filename)
dest_base = extension_destinations.get(extension, None)
if (dest_base is None):
continue
relative_path = os.path.join(relative_dirname, filename)
source_path = staging.absolute(relative_path)
dest_base.staging.move(source_path, relative_path)
dependency_mountpoint = dest_base.uuid
staging_symlink = os.path.join(('../' * relative_depth), dependency_mountpoint, relative_path)
logging.info(('%s %s' % (staging_symlink, source_path)))
staging.symlink(staging_symlink, relative_path) |
class ETSToolkitError(RuntimeError):
def __init__(self, message='', toolkit=None, *args):
if ((not message) and toolkit):
message = "could not import toolkit '{0}'".format(toolkit)
self.toolkit = toolkit
self.message = message
if message:
if toolkit:
args = ((toolkit,) + args)
args = ((message,) + args)
self.args = args |
class KLDivLossCallback(MetricCallback):
def __init__(self, input_key: Union[(str, List[str], Dict[(str, str)])]=None, output_key: Union[(str, List[str], Dict[(str, str)])]=None, prefix: str='kl_div_loss', multiplier: float=1.0, temperature: float=1.0, **metric_kwargs):
if (output_key is None):
output_key = ['s_logits', 't_logits', 'attention_mask']
super().__init__(prefix=prefix, input_key=input_key, output_key=output_key, multiplier=multiplier, metric_fn=self.metric_fn, **metric_kwargs)
self.temperature = temperature
self._criterion = nn.KLDivLoss(reduction='batchmean')
def metric_fn(self, s_logits: torch.Tensor, t_logits: torch.Tensor, attention_mask: torch.Tensor):
mask = attention_mask.unsqueeze((- 1)).expand_as(s_logits)
s_logits_slct = torch.masked_select(s_logits, mask)
s_logits_slct = s_logits_slct.view((- 1), s_logits.size((- 1)))
t_logits_slct = torch.masked_select(t_logits, mask)
t_logits_slct = t_logits_slct.view((- 1), s_logits.size((- 1)))
loss_kl = (self._criterion(F.log_softmax((s_logits_slct / self.temperature), dim=(- 1)), F.softmax((t_logits_slct / self.temperature), dim=(- 1))) * (self.temperature ** 2))
return loss_kl |
class CuckooVirtualBox(object):
SAVED = 'saved'
RUNNING = 'running'
POWEROFF = 'poweroff'
ABORTED = 'aborted'
ERROR = 'machete'
def __init__(self, headless):
self.vbox_manage_path = '/usr/bin/VBoxManage'
self.status = None
if headless:
self.mode = 'headless'
else:
self.mode = 'gui'
def setSnapshot(self, vm_name, snapshot_name=None):
if (self._status(vm_name) == self.RUNNING):
raise Exception(('Trying to start an already started vm %s' % vm_name))
virtualbox_args = [self.vbox_manage_path, 'snapshot', vm_name]
if (snapshot_name is not None):
virtualbox_args.extend(['restore', snapshot_name])
else:
virtualbox_args.extend(['restorecurrent'])
try:
if subprocess.call(virtualbox_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True):
raise Exception("VBoxManage exited with error restoring the machine's snapshot")
except OSError as exc:
raise Exception(('VBoxManage failed restoring the machine: %s' % exc))
self._waitStatus(vm_name, self.SAVED)
def renameSnapshot(self, vm_name, new_name, snapshot_name=None):
virtualbox_args = [self.vbox_manage_path, 'snapshot', vm_name]
if (snapshot_name is not None):
virtualbox_args.extend(['edit', snapshot_name])
else:
virtualbox_args.extend(['edit', '--current'])
virtualbox_args.extend(['--name', new_name])
try:
if subprocess.call(virtualbox_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True):
raise Exception("VBoxManage exited with error restoring the machine's snapshot")
except OSError as exc:
raise Exception(('VBoxManage failed restoring the machine: %s' % exc))
def takeSnapshot(self, vm_name, snapshot_name=None):
virtualbox_args = [self.vbox_manage_path, 'snapshot', vm_name]
if (snapshot_name is not None):
virtualbox_args.extend(['take', snapshot_name])
try:
if subprocess.call(virtualbox_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True):
raise Exception("VBoxManage exited with error restoring the machine's snapshot")
except OSError as exc:
raise Exception(('VBoxManage failed restoring the machine: %s' % exc))
def deleteSnapshot(self, vm_name, snapshot_name=None):
virtualbox_args = [self.vbox_manage_path, 'snapshot', vm_name]
if (snapshot_name is not None):
virtualbox_args.extend(['delete', snapshot_name])
try:
if subprocess.call(virtualbox_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True):
raise Exception("VBoxManage exited with error restoring the machine's snapshot")
except OSError as exc:
raise Exception(('VBoxManage failed restoring the machine: %s' % exc))
def start(self, vm_name):
if (self._status(vm_name) == self.RUNNING):
raise Exception(('Trying to start an already started vm %s' % vm_name))
try:
proc = subprocess.Popen([self.vbox_manage_path, 'startvm', vm_name, '--type', self.mode], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(_, err) = proc.communicate()
if err:
raise OSError(err)
except OSError as exc:
raise Exception(('VBoxManage failed starting the machine %s in mode: %s' % (vm_name, exc)))
self._waitStatus(vm_name, self.RUNNING)
def stop(self, vm_name):
LOG.debug(('Stopping vm %s' % vm_name))
if (self._status(vm_name) in [self.POWEROFF, self.ABORTED]):
raise Exception(('Trying to stop an already stopped vm %s' % vm_name))
try:
proc = subprocess.Popen([self.vbox_manage_path, 'controlvm', vm_name, 'poweroff'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stop_me = 0
while (proc.poll() is None):
if (stop_me < 10):
time.sleep(1)
stop_me += 1
else:
LOG.info(('Stopping vm %s timeouted. Killing' % vm_name))
proc.terminate()
if ((proc.returncode != 0) and (stop_me < 10)):
LOG.error('VBoxManage exited with error powering off the machine')
except OSError as exc:
raise Exception(('VBoxManage failed powering off the machine: %s' % exc))
def _list(self):
try:
proc = subprocess.Popen([self.vbox_manage_path, 'list', 'vms'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(output, _) = proc.communicate()
except OSError as exc:
raise Exception(('VBoxManage error listing installed machines: %s' % exc))
machines = []
for line in output.split('\n'):
try:
vm_name = line.split('"')[1]
if (vm_name == '<inaccessible>'):
LOG.warn('Found an inaccessible virtual machine, please check its state.')
else:
machines.append(vm_name)
except IndexError:
continue
return machines
def _status(self, vm_name):
LOG.debug(('Getting status for %s' % vm_name))
status = None
try:
proc = subprocess.Popen([self.vbox_manage_path, 'showvminfo', vm_name, '--machinereadable'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(output, err) = proc.communicate()
output = output.decode(encoding='utf-8')
if (proc.returncode != 0):
LOG.error('VBoxManage returns error checking status for machine %s: %s', vm_name, err)
status = self.ERROR
except OSError as exc:
LOG.error('VBoxManage failed to check status for machine %s: %s', vm_name, exc)
status = self.ERROR
if (not status):
for line in output.split('\n'):
state = re.match('VMState="(\\w+)"', line, (re.M | re.I))
if state:
status = state.group(1)
LOG.debug(('Machine %s status %s' % (vm_name, status)))
status = status.lower()
if status:
self.status = status
return status
else:
raise Exception(('Unable to get status for %s' % vm_name))
def _waitStatus(self, vm_name, state):
waitme = 0
try:
current = self._status(vm_name)
except NameError:
return
if isinstance(state, str):
state = [state]
while (current not in state):
LOG.debug('Waiting %i cuckooseconds for machine %s to switch to status %s', waitme, vm_name, state)
if (waitme > 10):
raise Exception('Timeout hit while for machine %s to change status', vm_name)
time.sleep(1)
waitme += 1
self._status(vm_name) |
class ErrorTransformer(TypeTransformer[FlyteError]):
def __init__(self):
super().__init__(name='FlyteError', t=FlyteError)
def get_literal_type(self, t: Type[T]) -> LiteralType:
return LiteralType(simple=_type_models.SimpleType.ERROR)
def to_literal(self, ctx: FlyteContext, python_val: FlyteError, python_type: Type[T], expected: LiteralType) -> Literal:
if (type(python_val) != FlyteError):
raise TypeTransformerFailedError(f"Expected value of type {FlyteError} but got '{python_val}' of type {type(python_val)}")
return Literal(scalar=Scalar(error=Error(message=python_val.message, failed_node_id=python_val.failed_node_id)))
def to_python_value(self, ctx: FlyteContext, lv: Literal, expected_python_type: Type[T]) -> T:
if (not (lv and lv.scalar and (lv.scalar.error is not None))):
raise TypeTransformerFailedError('Can only convert a generic literal to FlyteError')
return FlyteError(message=lv.scalar.error.message, failed_node_id=lv.scalar.error.failed_node_id)
def guess_python_type(self, literal_type: LiteralType) -> Type[FlyteError]:
if (literal_type.simple and (literal_type.simple == _type_models.SimpleType.ERROR)):
return FlyteError
raise ValueError(f'Transformer {self} cannot reverse {literal_type}') |
class OptionPlotoptionsScatterSonificationDefaultspeechoptions(Options):
def activeWhen(self) -> 'OptionPlotoptionsScatterSonificationDefaultspeechoptionsActivewhen':
return self._config_sub_data('activeWhen', OptionPlotoptionsScatterSonificationDefaultspeechoptionsActivewhen)
def language(self):
return self._config_get('en-US')
def language(self, text: str):
self._config(text, js_type=False)
def mapping(self) -> 'OptionPlotoptionsScatterSonificationDefaultspeechoptionsMapping':
return self._config_sub_data('mapping', OptionPlotoptionsScatterSonificationDefaultspeechoptionsMapping)
def pointGrouping(self) -> 'OptionPlotoptionsScatterSonificationDefaultspeechoptionsPointgrouping':
return self._config_sub_data('pointGrouping', OptionPlotoptionsScatterSonificationDefaultspeechoptionsPointgrouping)
def preferredVoice(self):
return self._config_get(None)
def preferredVoice(self, text: str):
self._config(text, js_type=False)
def showPlayMarker(self):
return self._config_get(True)
def showPlayMarker(self, flag: bool):
self._config(flag, js_type=False)
def type(self):
return self._config_get('speech')
def type(self, text: str):
self._config(text, js_type=False) |
class meter_features(loxi.OFObject):
def __init__(self, max_meter=None, band_types=None, capabilities=None, max_bands=None, max_color=None, features=None):
if (max_meter != None):
self.max_meter = max_meter
else:
self.max_meter = 0
if (band_types != None):
self.band_types = band_types
else:
self.band_types = 0
if (capabilities != None):
self.capabilities = capabilities
else:
self.capabilities = 0
if (max_bands != None):
self.max_bands = max_bands
else:
self.max_bands = 0
if (max_color != None):
self.max_color = max_color
else:
self.max_color = 0
if (features != None):
self.features = features
else:
self.features = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!L', self.max_meter))
packed.append(struct.pack('!L', self.band_types))
packed.append(struct.pack('!L', self.capabilities))
packed.append(struct.pack('!B', self.max_bands))
packed.append(struct.pack('!B', self.max_color))
packed.append(('\x00' * 2))
packed.append(struct.pack('!L', self.features))
packed.append(('\x00' * 4))
return ''.join(packed)
def unpack(reader):
obj = meter_features()
obj.max_meter = reader.read('!L')[0]
obj.band_types = reader.read('!L')[0]
obj.capabilities = reader.read('!L')[0]
obj.max_bands = reader.read('!B')[0]
obj.max_color = reader.read('!B')[0]
reader.skip(2)
obj.features = reader.read('!L')[0]
reader.skip(4)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.max_meter != other.max_meter):
return False
if (self.band_types != other.band_types):
return False
if (self.capabilities != other.capabilities):
return False
if (self.max_bands != other.max_bands):
return False
if (self.max_color != other.max_color):
return False
if (self.features != other.features):
return False
return True
def pretty_print(self, q):
q.text('meter_features {')
with q.group():
with q.indent(2):
q.breakable()
q.text('max_meter = ')
q.text(('%#x' % self.max_meter))
q.text(',')
q.breakable()
q.text('band_types = ')
q.text(('%#x' % self.band_types))
q.text(',')
q.breakable()
q.text('capabilities = ')
q.text(('%#x' % self.capabilities))
q.text(',')
q.breakable()
q.text('max_bands = ')
q.text(('%#x' % self.max_bands))
q.text(',')
q.breakable()
q.text('max_color = ')
q.text(('%#x' % self.max_color))
q.text(',')
q.breakable()
q.text('features = ')
q.text(('%#x' % self.features))
q.breakable()
q.text('}') |
class PrivacyNoticeBase():
name = Column(String, nullable=False)
description = Column(String)
internal_description = Column(String)
regions = Column(ARRAY(EnumColumn(PrivacyNoticeRegion, native_enum=False)), index=True, nullable=False)
consent_mechanism = Column(EnumColumn(ConsentMechanism), nullable=False)
data_uses = Column(ARRAY(String), nullable=False)
enforcement_level = Column(EnumColumn(EnforcementLevel), nullable=False)
disabled = Column(Boolean, nullable=False, default=False)
has_gpc_flag = Column(Boolean, nullable=False, default=False)
displayed_in_privacy_center = Column(Boolean, nullable=False, default=False)
displayed_in_overlay = Column(Boolean, nullable=False, default=False)
displayed_in_api = Column(Boolean, nullable=False, default=False)
notice_key = Column(String, nullable=False)
current_preference: Optional[str] = None
outdated_preference: Optional[str] = None
current_served: Optional[bool] = None
outdated_served: Optional[bool] = None
def applies_to_system(self, system: System) -> bool:
for system_data_use in System.get_data_uses([system], include_parents=True):
for privacy_notice_data_use in (self.data_uses or []):
if (system_data_use == privacy_notice_data_use):
return True
return False
def generate_notice_key(cls, name: Optional[str]) -> FidesKey:
if (not isinstance(name, str)):
raise Exception('Privacy notice keys must be generated from a string.')
notice_key: str = re.sub('\\s+', '_', name.lower().strip())
return FidesKey(FidesKey.validate(notice_key))
def dry_update(self, *, data: dict[(str, Any)]) -> FidesBase:
cloned_attributes = self.__dict__.copy()
for (key, val) in data.items():
cloned_attributes[key] = val
cloned_attributes.pop('_sa_instance_state')
return PrivacyNotice(**cloned_attributes) |
class host_match(object):
__rule_tree = None
__rules = None
__internal_keywords = None
def __init__(self):
self.clear()
self.__internal_keywords = ['refcnt', 'action', 'rule_info']
def match(self, host: str):
host = host.lower()
_list = host.split('.')
_list.reverse()
o = self.__rule_tree
is_found = True
for x in _list:
if (x in self.__internal_keywords):
x = ('__%s' % x)
if (x not in o):
is_found = False
break
o = o[x]
if is_found:
if o['rule_info']:
return (True, o['rule_info']['action'])
_list.append('')
_list.reverse()
return self.match('.'.join(_list))
if ('*' in o):
return (True, o['*']['rule_info']['action'])
return (False, None)
def add_rule(self, rule_object: tuple):
(rule, flags) = rule_object
rule = rule.lower()
if (rule in self.__rules):
return False
_list = rule.split('.')
_list.reverse()
o = self.__rule_tree
for x in _list:
if (x in self.__internal_keywords):
x = ('__%s' % x)
if (x not in o):
o[x] = {'refcnt': 0, 'action': None, 'rule_info': None}
o = o[x]
o['refcnt'] += 1
o['rule_info'] = {'action': flags}
self.__rules[rule] = None
return True
def del_rule(self, rule: str):
rule = rule.lower()
if (rule not in self.__rules):
return False
_list = rule.split('.')
_list.reverse()
o = self.__rule_tree
is_found = True
for x in _list:
if (x in self.__internal_keywords):
x = ('__%s' % x)
if (x not in o):
is_found = False
break
o = o[x]
if (not is_found):
return
o = self.__rule_tree
for x in _list:
t = o
o = o[x]
o['refcnt'] -= 1
if (o['refcnt'] == 0):
del t[x]
break
''
del self.__rules[rule]
return True
def rule_tree(self):
return self.__rule_tree
def rules(self):
rules = []
for x in self.__rules:
rules.append(x)
return rules
def exists(self, rule: str):
return (rule in self.__rules)
def clear(self):
self.__rule_tree = {}
self.__rules = {} |
class ApiKeyAuthentication(BaseAuthentication):
def authenticate(self, request):
api_key = request.META.get('HTTP_ES_API_KEY', None)
if (not api_key):
return None
try:
user = User.objects.get(api_key=api_key)
except User.DoesNotExist:
raise AuthenticationFailed('Invalid API key')
else:
_set_request_dc(request, user)
return (user, 'api_key')
def authenticate_header(self, request):
return 'api_key' |
class WebsocketEcho(ServiceType):
skip_variant: ClassVar[bool] = True
def __init__(self, *args, **kwargs) -> None:
kwargs['service_manifests'] = integration_manifests.load('websocket_echo_backend')
super().__init__(*args, **kwargs)
def requirements(self):
(yield ('url', Query((' % self.path.fqdn), expected=404))) |
class FlowStorePipe(Pipe):
def linear_storage(self):
return current._pipeline_linear_storage
def parallel_storage(self):
return current._pipeline_parallel_storage
def store_linear(self, status):
self.linear_storage.append(((self.__class__.__name__ + '.') + status))
def store_parallel(self, status):
self.parallel_storage.append(((self.__class__.__name__ + '.') + status))
async def on_pipe_success(self):
self.store_linear('success')
async def on_pipe_failure(self):
self.store_linear('failure') |
class GymSerializer(Serializer):
def encode(msg: Message) -> bytes:
msg = cast(GymMessage, msg)
message_pb = ProtobufMessage()
dialogue_message_pb = DialogueMessage()
gym_msg = gym_pb2.GymMessage()
dialogue_message_pb.message_id = msg.message_id
dialogue_reference = msg.dialogue_reference
dialogue_message_pb.dialogue_starter_reference = dialogue_reference[0]
dialogue_message_pb.dialogue_responder_reference = dialogue_reference[1]
dialogue_message_pb.target = msg.target
performative_id = msg.performative
if (performative_id == GymMessage.Performative.ACT):
performative = gym_pb2.GymMessage.Act_Performative()
action = msg.action
AnyObject.encode(performative.action, action)
step_id = msg.step_id
performative.step_id = step_id
gym_msg.act.CopyFrom(performative)
elif (performative_id == GymMessage.Performative.PERCEPT):
performative = gym_pb2.GymMessage.Percept_Performative()
step_id = msg.step_id
performative.step_id = step_id
observation = msg.observation
AnyObject.encode(performative.observation, observation)
reward = msg.reward
performative.reward = reward
done = msg.done
performative.done = done
info = msg.info
AnyObject.encode(performative.info, info)
gym_msg.percept.CopyFrom(performative)
elif (performative_id == GymMessage.Performative.STATUS):
performative = gym_pb2.GymMessage.Status_Performative()
content = msg.content
performative.content.update(content)
gym_msg.status.CopyFrom(performative)
elif (performative_id == GymMessage.Performative.RESET):
performative = gym_pb2.GymMessage.Reset_Performative()
gym_msg.reset.CopyFrom(performative)
elif (performative_id == GymMessage.Performative.CLOSE):
performative = gym_pb2.GymMessage.Close_Performative()
gym_msg.close.CopyFrom(performative)
else:
raise ValueError('Performative not valid: {}'.format(performative_id))
dialogue_message_pb.content = gym_msg.SerializeToString()
message_pb.dialogue_message.CopyFrom(dialogue_message_pb)
message_bytes = message_pb.SerializeToString()
return message_bytes
def decode(obj: bytes) -> Message:
message_pb = ProtobufMessage()
gym_pb = gym_pb2.GymMessage()
message_pb.ParseFromString(obj)
message_id = message_pb.dialogue_message.message_id
dialogue_reference = (message_pb.dialogue_message.dialogue_starter_reference, message_pb.dialogue_message.dialogue_responder_reference)
target = message_pb.dialogue_message.target
gym_pb.ParseFromString(message_pb.dialogue_message.content)
performative = gym_pb.WhichOneof('performative')
performative_id = GymMessage.Performative(str(performative))
performative_content = {}
if (performative_id == GymMessage.Performative.ACT):
pb2_action = gym_pb.act.action
action = AnyObject.decode(pb2_action)
performative_content['action'] = action
step_id = gym_pb.act.step_id
performative_content['step_id'] = step_id
elif (performative_id == GymMessage.Performative.PERCEPT):
step_id = gym_pb.percept.step_id
performative_content['step_id'] = step_id
pb2_observation = gym_pb.percept.observation
observation = AnyObject.decode(pb2_observation)
performative_content['observation'] = observation
reward = gym_pb.percept.reward
performative_content['reward'] = reward
done = gym_pb.percept.done
performative_content['done'] = done
pb2_info = gym_pb.percept.info
info = AnyObject.decode(pb2_info)
performative_content['info'] = info
elif (performative_id == GymMessage.Performative.STATUS):
content = gym_pb.status.content
content_dict = dict(content)
performative_content['content'] = content_dict
elif (performative_id == GymMessage.Performative.RESET):
pass
elif (performative_id == GymMessage.Performative.CLOSE):
pass
else:
raise ValueError('Performative not valid: {}.'.format(performative_id))
return GymMessage(message_id=message_id, dialogue_reference=dialogue_reference, target=target, performative=performative, **performative_content) |
class TestNXActionResubmit(unittest.TestCase):
type_ = {'buf': b'\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': b'\x00\x10', 'val': ofproto.NX_ACTION_RESUBMIT_SIZE}
vendor = {'buf': b'\x00\x00# ', 'val': 8992}
subtype = {'buf': b'\x00\x01', 'val': 1}
in_port = {'buf': b'\nL', 'val': 2636}
table = {'buf': b'R', 'val': 82}
zfill = (b'\x00' * 3)
buf = ((((((type_['buf'] + len_['buf']) + vendor['buf']) + subtype['buf']) + in_port['buf']) + table['buf']) + zfill)
c = NXActionResubmit(in_port['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.subtype['val'], self.c.subtype)
eq_(self.in_port['val'], self.c.in_port)
def test_parser(self):
res = OFPActionVendor.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self.len_['val'], res.len)
eq_(self.in_port['val'], res.in_port)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_RESUBMIT_PACK_STR
res = struct.unpack(fmt, six.binary_type(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.in_port['val'], res[4]) |
class TestDSLContext(TestDSLBase):
def test_can_be_named_from_decorator(self):
name = 'context name'
(name)
def whatever(context):
pass
self.assertEqual(str(Context.all_top_level_contexts[0]), name)
def test_can_be_named_from_function(self):
def Context_name_from_Function(context):
pass
self.assertEqual(str(Context.all_top_level_contexts[0]), 'Context name from Function')
def test_multiple_top_contexts(self):
def first_context(context):
pass
def second_context(context):
pass
self.assertEqual(str(Context.all_top_level_contexts[0]), 'first context')
self.assertEqual(str(Context.all_top_level_contexts[1]), 'second context')
def test_can_nest_contexts(self):
context_names = ['top context name', 'first nested context name', 'second nested context name']
(context_names[0])
def top(context):
_context(context_names[1])
def sub1(context):
_context(context_names[2])
def sub2(context):
pass
expected_nested_context_names = context_names
registered_nested_context_names = []
registered_nested_context_names.append(str(Context.all_top_level_contexts[0]))
registered_nested_context_names.append(str(Context.all_top_level_contexts[0].children_contexts[0]))
registered_nested_context_names.append(str(Context.all_top_level_contexts[0].children_contexts[0].children_contexts[0]))
self.assertEqual(expected_nested_context_names, registered_nested_context_names)
def test_cant_call_context_function(self):
def not_callable(context):
pass
with self.assertRaisesRegex(BaseException, 'This function should not be called outside test code.'):
not_callable(None)
def test_contex_cant_be_async(self):
with self.assertRaisesRegex(RuntimeError, 'TestSlide DSL context function `async_context` can not be async!'):
async def async_context(context):
async def async_example(self):
pass
def test_cant_create_top_contexts_with_same_name(self):
with self.assertRaisesRegex(RuntimeError, 'A context with the same name is already defined'):
def top_context(context):
pass
('top context')
def whatever(context):
pass
def test_can_create_nested_contexts_with_same_name(self):
def same_name(context):
_context
def same_name(context):
pass
self.assertEqual(str(Context.all_top_level_contexts[0]), 'same name')
self.assertEqual(str(Context.all_top_level_contexts[0].children_contexts[0]), 'same name')
def test_cant_create_nested_contexts_with_same_name(self):
with self.assertRaisesRegex(RuntimeError, 'A context with the same name is already defined'):
def top_context(context):
_context
def repeated_name(context):
pass
_context('repeated name')
def whatever(context):
pass
def test_skip_contexts(self):
def not_skipped(context):
_context
def skipped(context):
_context
def inherits_skip_setting_from_parent(context):
pass
_context
def not_skipped(context):
pass
def skipped(context):
_context
def not_skipped(context):
_context
def not_skipped(context):
pass
self.assertFalse(Context.all_top_level_contexts[0].skip)
self.assertTrue(Context.all_top_level_contexts[0].children_contexts[0].skip)
self.assertFalse(Context.all_top_level_contexts[0].children_contexts[1].skip)
self.assertTrue(Context.all_top_level_contexts[0].children_contexts[0].children_contexts[0].skip)
self.assertTrue(Context.all_top_level_contexts[1].skip)
self.assertTrue(Context.all_top_level_contexts[1].children_contexts[0].skip)
self.assertTrue(Context.all_top_level_contexts[1].children_contexts[0].children_contexts[0].skip)
def test_focus_contexts(self):
def not_focused(context):
_context
def focused(context):
_context
def inherits_focus_setting_from_parent(context):
pass
_context
def not_focused(context):
pass
def focused(context):
_context
def not_focused(context):
_context
def not_focused(context):
pass
self.assertFalse(Context.all_top_level_contexts[0].focus)
self.assertTrue(Context.all_top_level_contexts[0].children_contexts[0].focus)
self.assertFalse(Context.all_top_level_contexts[0].children_contexts[1].focus)
self.assertTrue(Context.all_top_level_contexts[0].children_contexts[0].children_contexts[0].focus)
self.assertTrue(Context.all_top_level_contexts[1].focus)
self.assertTrue(Context.all_top_level_contexts[1].children_contexts[0].focus)
self.assertTrue(Context.all_top_level_contexts[1].children_contexts[0].children_contexts[0].focus) |
.parametrize('test_input, expected', [('"\\uD83D\\uDCA9"', ''), ('"a\\uD83D\\uDCA9b"', 'ab'), ('"\\uD800"', '\ud800'), ('"a\\uD800b"', 'a\ud800b'), ('"\\uDEAD"', '\udead'), ('"a\\uDEADb"', 'a\udeadb'), ('"\\uD83D\\uD83D\\uDCA9"', '\ud83d'), ('"\\uDCA9\\uD83D\\uDCA9"', '\udca9'), ('"\\uD83D\\uDCA9\\uD83D"', '\ud83d'), ('"\\uD83D\\uDCA9\\uDCA9"', '\udca9'), ('"\\uD83D \\uDCA9"', '\ud83d \udca9'), ('"\ud800"', '\ud800'), ('"\udead"', '\udead'), ('"\ud800a\udead"', '\ud800a\udead'), ('"\ud83d\udca9"', '\ud83d\udca9')])
def test_decode_surrogate_characters(test_input, expected):
assert (ujson.loads(test_input) == expected)
assert (ujson.loads(test_input.encode('utf-8', 'surrogatepass')) == expected)
assert (json.loads(test_input) == expected) |
(scope='session')
def group(pytestconfig, session):
group_id = pytestconfig.cache.get('group_id', None)
if (not group_id):
group_id = input("A group you're chatting with's id: ")
pytestconfig.cache.set('group_id', group_id)
return fbchat.Group(session=session, id=group_id) |
def configure_fulltext_models_mock_with_sample_document(fulltext_models_mock: MockFullTextModels, sample_layout_document: SampleLayoutDocument):
doc = sample_layout_document
segmentation_model_mock = fulltext_models_mock.segmentation_model_mock
header_model_mock = fulltext_models_mock.header_model_mock
name_header_model_mock = fulltext_models_mock.name_header_model_mock
name_citation_model_mock = fulltext_models_mock.name_citation_model_mock
affiliation_address_model_mock = fulltext_models_mock.affiliation_address_model_mock
fulltext_model_mock = fulltext_models_mock.fulltext_model_mock
reference_segmenter_model_mock = fulltext_models_mock.reference_segmenter_model_mock
citation_model_mock = fulltext_models_mock.citation_model_mock
figure_model_mock = fulltext_models_mock.figure_model_mock
table_model_mock = fulltext_models_mock.table_model_mock
segmentation_model_mock.update_label_by_layout_block(doc.header_block, '<header>')
segmentation_model_mock.update_label_by_layout_block(doc.body_block, '<body>')
segmentation_model_mock.update_label_by_layout_block(doc.ref_ref_block, '<references>')
header_model_mock.update_label_by_layout_block(doc.title_block, '<title>')
header_model_mock.update_label_by_layout_block(doc.author_block, '<author>')
header_model_mock.update_label_by_layout_block(doc.affiliation_block, '<affiliation>')
affiliation_address_model_mock.update_label_by_layout_block(doc.institution_block, '<institution>')
name_header_model_mock.update_label_by_layout_block(doc.author_surname_block, '<surname>')
fulltext_model_mock.update_label_by_layout_block(doc.body_section_title_block, '<section>')
fulltext_model_mock.update_label_by_layout_block(doc.body_section_paragraph_block, '<paragraph>')
fulltext_model_mock.update_label_by_layout_block(doc.figure_block, '<figure>')
fulltext_model_mock.update_label_by_layout_block(doc.table_block, '<table>')
figure_model_mock.update_label_by_layout_block(doc.figure_head_block, '<figure_head>')
table_model_mock.update_label_by_layout_block(doc.table_head_block, '<figure_head>')
reference_segmenter_model_mock.update_label_by_layout_block(doc.ref_label_block, '<label>')
reference_segmenter_model_mock.update_label_by_layout_block(doc.ref_text_block, '<reference>')
citation_model_mock.update_label_by_layout_block(doc.ref_title_block, '<title>')
citation_model_mock.update_label_by_layout_block(doc.ref_author_block, '<author>')
name_citation_model_mock.update_label_by_layout_block(doc.ref_author_surname_block, '<surname>') |
class ZVmDefineDiskSerializer(_VmDefineDiskSerializer):
image = s.CharField(required=True, default=settings.VMS_DISK_IMAGE_ZONE_DEFAULT, max_length=64)
def __init__(self, request, vm, *args, **kwargs):
super(ZVmDefineDiskSerializer, self).__init__(request, vm, *args, **kwargs)
if (vm.ostype == Vm.LINUX_ZONE):
self.fields['image'].default = vm.dc.settings.VMS_DISK_IMAGE_LX_ZONE_DEFAULT
else:
self.fields['image'].default = vm.dc.settings.VMS_DISK_IMAGE_ZONE_DEFAULT
if (self.disk_id > 0):
if (not self.object):
self.object = {}
self.object['boot'] = False
self.object['image'] = None
self.object['size'] = (vm.json.get('quota', 0) * 1024)
self.object['zpool'] = vm.json.get('zpool', Node.ZPOOL)
self.fields['image'].read_only = True
self.fields['size'].read_only = True
self.fields['zpool'].read_only = True
self.fields['boot'].read_only = True
elif (self.disk_id is not None):
self.object['boot'] = True
self.fields['boot'].read_only = True |
class OptionSeriesVectorSonificationContexttracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def get_move_right_index_cells_code(current_pointer, node_index):
code = node_index.get_code(current_pointer)
code += '[-]'
code += '<'
code += '['
code += '>>'
code += '[-]+'
code += '<'
code += '[>+<-]'
code += '<'
code += '-'
code += '[>+<-]'
code += '>'
code += ']'
code += '>>'
return code |
class ReceiveTxBuilder():
def __init__(self):
self._request = None
self._outputs = None
self._sig_salt = None
def request(self, request: Request):
self._request = request
return self
def output_txo(self, _outputs: Output):
assert (self._request.value == _outputs.v)
self._outputs = _outputs
return self
def sig_salt(self, _sig_salt: int):
assert (_sig_salt < SNARK_SCALAR_FIELD)
self._sig_salt = _sig_salt
return self
def build(self):
assert (self._outputs is not None)
assert (self._sig_salt is not None)
return TxReceive(self._request, self._outputs, self._sig_salt) |
class OptionPlotoptionsDependencywheelDatalabelsFilter(Options):
def operator(self):
return self._config_get(None)
def operator(self, value: Any):
self._config(value, js_type=False)
def property(self):
return self._config_get(None)
def property(self, text: str):
self._config(text, js_type=False) |
class OptionSeriesXrangeStates(Options):
def hover(self) -> 'OptionSeriesXrangeStatesHover':
return self._config_sub_data('hover', OptionSeriesXrangeStatesHover)
def inactive(self) -> 'OptionSeriesXrangeStatesInactive':
return self._config_sub_data('inactive', OptionSeriesXrangeStatesInactive)
def normal(self) -> 'OptionSeriesXrangeStatesNormal':
return self._config_sub_data('normal', OptionSeriesXrangeStatesNormal)
def select(self) -> 'OptionSeriesXrangeStatesSelect':
return self._config_sub_data('select', OptionSeriesXrangeStatesSelect) |
_meta(characters.minoriko.AutumnFeast)
class AutumnFeast():
name = ''
description = ',<style=Card.Name></style>'
def clickable(self):
me = self.me
if (not self.my_turn()):
return False
if self.limit1_skill_used('autumnfeast_tag'):
return False
if (not (me.cards or me.showncards or me.equips)):
return False
return True
def is_action_valid(self, sk, tl):
cl = sk.associated_cards
from thb.cards.classes import Card
if ((len(cl) != 2) or any(((c.color != Card.RED) for c in cl))):
return (False, '2!')
return (True, '~')
def effect_string(self, act):
return f'{N.char(act.source)}:,!'
def sound_effect(self, act):
return 'thb-cv-minoriko_autumnfeast' |
def clear_cloudflare():
url = '
headers = {'Content-Type': 'application/json', 'Authorization': f"Bearer {os.environ['CF_API_KEY']}"}
data = {'purge_everything': True}
result = json.loads(requests.delete(((url % ZONE_ID) + '/purge_cache'), headers=headers, data=json.dumps(data)).text)
if result['success']:
print('Cloudflare clearing succeeded')
else:
raise ValueError(('Cloudflare clearing failed: %s' % json.dumps(result, indent=2))) |
class ManagedKubernetesProcessor(KubernetesProcessor):
manager: ResourceManager
def __init__(self, manager: ResourceManager):
self.manager = manager
def aconf(self) -> Config:
return self.manager.aconf
def logger(self) -> logging.Logger:
return self.manager.logger
def deps(self) -> DependencyInjector:
return self.manager.deps.for_instance(self) |
def test_signal_method():
class TestWorkflow():
_method()
def the_signal_method(self):
pass
assert TestWorkflow.the_signal_method._signal_method
assert isinstance(TestWorkflow.the_signal_method._signal_method, SignalMethod)
assert (TestWorkflow.the_signal_method._signal_method.name == 'TestWorkflow::the_signal_method') |
class StayView(TemplateView):
template_name = 'booking/booking.html'
_csrf
def get(self, request, *args, **kwargs):
room_id = kwargs.get('room_id')
if room_id:
self.room = get_qs_or_404(Resource, pk=room_id).select_related('location').first()
self.location = self.room.location
else:
self.room = None
self.location = get_object_or_404(Location, slug=kwargs.get('location_slug'))
if (not self.location.rooms_with_future_capacity()):
msg = 'Sorry! This location does not currently have any listings.'
messages.add_message(self.request, messages.INFO, msg)
return HttpResponseRedirect(reverse('location_detail', args=(self.location.slug,)))
return super().get(request, *args, **kwargs)
def populate_room(self, context, resource_data, many):
resource = ResourceSerializer(resource_data, many=many, context={'request': self.request})
if many:
context['rooms'] = resource.data
else:
context['room'] = resource.data
return context
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['location'] = self.location
is_admin = self.location.house_admins.all().filter(pk=self.request.user.pk).exists()
if self.request.user.is_authenticated():
user_drft_balance = self.request.user.profile.drft_spending_balance()
else:
user_drft_balance = 0
fees = Fee.objects.filter(locationfee__location=self.location)
react_data = {'is_house_admin': is_admin, 'user_drft_balance': user_drft_balance, 'fees': FeeSerializer(fees, many=True).data}
resource_data = (self.room if self.room else self.location.rooms_with_future_capacity())
use_many = (False if self.room else True)
react_data = self.populate_room(react_data, resource_data, use_many)
context['react_data'] = json.dumps(react_data, cls=DateEncoder)
return context |
def _split_by(string: str, delim: str) -> Tuple[(str, str)]:
assert (len(delim) == 1), 'delimiter must be of size 1'
quote_char: Optional[str] = None
for (index, char) in enumerate(string):
if (quote_char == char):
quote_char = None
elif (quote_char is not None):
pass
elif (char in ("'", '"')):
quote_char = char
elif (char == delim):
return (string[:index].strip(), string[(index + 1):].strip())
return (string, '') |
_os(*metadata.platforms)
def main():
winword = 'C:\\Users\\Public\\winword.exe'
common.copy_file(EXE_FILE, winword)
key = 'SOFTWARE\\Microsoft\\Office\\Test\\Security'
value = 'AccessVBOM'
data = '1'
with common.temporary_reg(common.HKCU, key, value, data):
pass
common.execute([winword, '-c', 'echo', '-Embedding', ';powershell'], timeout=5, kill=True)
common.remove_file(winword) |
def getAttributeNames(object, includeMagic=1, includeSingle=1, includeDouble=1):
attributes = []
dict = {}
if (not introspect.hasattrAlwaysReturnsTrue(object)):
special_attrs = ['__bases__', '__class__', '__dict__', '__name__', '__closure__', '__code__', '___kwdefaults__', '__doc__', '__globals__']
attributes += [attr for attr in special_attrs if hasattr(object, attr)]
if hasattr(object, 'trait_names'):
try:
attributes += object.trait_names()
except TypeError:
pass
if includeMagic:
try:
attributes += object._getAttributeNames()
except:
pass
attrdict = getAllAttributeNames(object)
object_dir = dir(object)
for ((obj_type_name, technique, count), attrlist) in attrdict.items():
if ((type(object).__name__ == obj_type_name) and (technique == 'dir')):
attributes += attrlist
else:
attributes += [attr for attr in attrlist if ((attr not in object_dir) and hasattr(object, attr))]
for item in attributes:
dict[item] = None
attributes = list(dict.keys())
attributes = [attribute for attribute in attributes if isinstance(attribute, str)]
attributes.sort(key=(lambda x: x.upper()))
if (not includeSingle):
attributes = filter((lambda item: ((item[0] != '_') or (item[1] == '_'))), attributes)
if (not includeDouble):
attributes = filter((lambda item: (item[:2] != '__')), attributes)
return attributes |
class Avar2Test(unittest.TestCase):
def test(self):
axisTags = ['wght', 'wdth']
fvar = table__f_v_a_r()
for tag in axisTags:
axis = Axis()
axis.axisTag = tag
fvar.axes.append(axis)
master_locations_normalized = [{}, {'wght': 1, 'wdth': (- 1)}]
data = [{}, {'wdth': (- 0.8)}]
model = models.VariationModel(master_locations_normalized, axisTags)
store_builder = varStore.OnlineVarStoreBuilder(axisTags)
store_builder.setModel(model)
varIdxes = {}
for axis in axisTags:
masters = [fl2fi(m.get(axis, 0), 14) for m in data]
varIdxes[axis] = store_builder.storeMasters(masters)[1]
store = store_builder.finish()
mapping = store.optimize()
varIdxes = {axis: mapping[value] for (axis, value) in varIdxes.items()}
del model, store_builder, mapping
varIdxMap = otTables.DeltaSetIndexMap()
varIdxMap.Format = 1
varIdxMap.mapping = []
for tag in axisTags:
varIdxMap.mapping.append(varIdxes[tag])
avar = table__a_v_a_r()
avar.segments['wght'] = {}
avar.segments['wdth'] = {(- 1.0): (- 1.0), 0.0: 0.0, 0.4: 0.5, 1.0: 1.0}
avar.majorVersion = 2
avar.table = otTables.avar()
avar.table.VarIdxMap = varIdxMap
avar.table.VarStore = store
font = TTFont()
font['fvar'] = fvar
font['avar'] = avar
b = BytesIO()
font.save(b)
b.seek(0)
font2 = TTFont(b)
assert (font2['avar'].table.VarStore.VarRegionList.RegionAxisCount == 2)
assert (font2['avar'].table.VarStore.VarRegionList.RegionCount == 1)
xml1 = BytesIO()
writer = XMLWriter(xml1)
font['avar'].toXML(writer, font)
xml2 = BytesIO()
writer = XMLWriter(xml2)
font2['avar'].toXML(writer, font2)
assert (xml1.getvalue() == xml2.getvalue()), (xml1.getvalue(), xml2.getvalue())
avar = table__a_v_a_r()
xml = b''.join(xml2.getvalue().splitlines()[1:])
for (name, attrs, content) in parseXML(xml):
avar.fromXML(name, attrs, content, ttFont=TTFont())
assert (avar.table.VarStore.VarRegionList.RegionAxisCount == 2)
assert (avar.table.VarStore.VarRegionList.RegionCount == 1) |
def extractWwwTinytranslationXyz(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def operator_in(item, field, value):
if (field not in item):
return False
try:
ipaddress.ip_network(item[field])
ipaddress.ip_network(value)
return operator_in_network(item[field], value)
except ValueError:
pass
values = value.split(',')
return ((field in item) and (item[field] in values)) |
class OptionPlotoptionsTilemapSonificationContexttracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionSeriesTreemapSonification(Options):
def contextTracks(self) -> 'OptionSeriesTreemapSonificationContexttracks':
return self._config_sub_data('contextTracks', OptionSeriesTreemapSonificationContexttracks)
def defaultInstrumentOptions(self) -> 'OptionSeriesTreemapSonificationDefaultinstrumentoptions':
return self._config_sub_data('defaultInstrumentOptions', OptionSeriesTreemapSonificationDefaultinstrumentoptions)
def defaultSpeechOptions(self) -> 'OptionSeriesTreemapSonificationDefaultspeechoptions':
return self._config_sub_data('defaultSpeechOptions', OptionSeriesTreemapSonificationDefaultspeechoptions)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def pointGrouping(self) -> 'OptionSeriesTreemapSonificationPointgrouping':
return self._config_sub_data('pointGrouping', OptionSeriesTreemapSonificationPointgrouping)
def tracks(self) -> 'OptionSeriesTreemapSonificationTracks':
return self._config_sub_data('tracks', OptionSeriesTreemapSonificationTracks) |
class Run():
def __init__(self, log_file_path=None, verbose=True, width=45):
self.log_file_path = log_file_path
self.info_dict = {}
self.verbose = verbose
self.width = width
self.single_line_prefixes = {1: '* ', 2: ' - ', 3: ' > '}
def log(self, line):
if (not self.log_file_path):
self.warning('The run object got a logging request, but it was not inherited with a log file path :(')
return
with open(self.log_file_path, 'a') as log_file:
log_file.write(('[%s] %s\n' % (get_date(), CLEAR(line))))
def write(self, line, quiet=False, overwrite_verbose=False):
if self.log_file_path:
self.log(line)
if ((self.verbose and (not quiet)) or overwrite_verbose):
try:
sys.stderr.write(line)
except Exception:
sys.stderr.write(line.encode('utf-8'))
def info(self, key, value, quiet=False, display_only=False, overwrite_verbose=False, nl_before=0, nl_after=0, lc='cyan', mc='yellow', progress=None):
if (not display_only):
self.info_dict[key] = value
if isinstance(value, bool):
pass
elif isinstance(value, str):
value = remove_spaces(value)
elif isinstance(value, int):
value = pretty_print(value)
label = key
info_line = ('%s%s %s: %s\n%s' % (('\n' * nl_before), color_text(label, lc), ('.' * (self.width - len(label))), color_text(str(value), mc), ('\n' * nl_after)))
if progress:
progress.clear()
self.write(info_line, overwrite_verbose=False, quiet=quiet)
progress.update(progress.msg)
else:
self.write(info_line, quiet=quiet, overwrite_verbose=overwrite_verbose)
def info_single(self, message, overwrite_verbose=False, mc='yellow', nl_before=0, nl_after=0, cut_after=80, level=1, progress=None):
if isinstance(message, str):
message = remove_spaces(message)
if (level not in self.single_line_prefixes):
raise Exception(f'the `info_single` function does not know how to deal with a level of {level} :/')
if cut_after:
message_line = color_text(('%s%s\n' % (self.single_line_prefixes[level], textwrap.fill(str(message), cut_after))), mc)
else:
message_line = color_text(('%s%s\n' % (self.single_line_prefixes[level], str(message))), mc)
message_line = ((('\n' * nl_before) + message_line) + ('\n' * nl_after))
if progress:
progress.clear()
self.write(message_line, overwrite_verbose=False)
progress.update(progress.msg)
else:
self.write(message_line, overwrite_verbose=False)
def warning(self, message, header='WARNING', lc='red', raw=False, overwrite_verbose=False, nl_before=0, nl_after=0):
if isinstance(message, str):
message = remove_spaces(message)
message_line = ''
header_line = color_text(('%s\n%s\n%s\n' % (('\n' * nl_before), header, ('=' * (self.width + 2)))), lc)
if raw:
message_line = color_text(('%s\n\n%s' % (message, ('\n' * nl_after))), lc)
else:
message_line = color_text(('%s\n\n%s' % (textwrap.fill(str(message), 80), ('\n' * nl_after))), lc)
self.write(((header_line + message_line) if message else header_line), overwrite_verbose=overwrite_verbose)
def quit(self):
if self.log_file_path:
self.log('Bye.') |
def hstack(arrays: Tuple[(ArrayLike, ...)]) -> Array:
columns = 0
shapes = []
first = None
for a in arrays:
cs = shape(a)
if (first is None):
first = cs
if (not cs):
return cast(Array, reshape(cast(VectorLike, arrays), (len(arrays),)))
elif (len(cs) == 1):
m1 = []
for a1 in arrays:
m1.extend(ravel(a1))
return cast(Array, reshape(m1, (len(m1),)))
columns += cs[1]
shapes.append(cs)
if (first is None):
raise ValueError("'hstack' requires at least one array")
m = []
for data in zipl(*[_hstack_extract(a, s) for (a, s) in zipl(arrays, shapes)]):
m.extend(sum(data, []))
new_shape = ((first[:1] + tuple([columns])) + first[2:])
return cast(Array, reshape(cast(Array, m), new_shape)) |
class OptionSeriesBulletSonificationDefaultinstrumentoptionsMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
class FlagInstance(EnumInstance):
def __bool__(self):
return bool(self.value)
__nonzero__ = __bool__
def __or__(self, other):
if hasattr(other, 'value'):
other = other.value
return self.__class__(self.enum, (self.value | other))
def __and__(self, other):
if hasattr(other, 'value'):
other = other.value
return self.__class__(self.enum, (self.value & other))
def __xor__(self, other):
if hasattr(other, 'value'):
other = other.value
return self.__class__(self.enum, (self.value ^ other))
__ror__ = __or__
__rand__ = __and__
__rxor__ = __xor__
def __invert__(self):
return self.__class__(self.enum, (~ self.value))
def __str__(self):
if (self.name is not None):
return '{}.{}'.format(self.enum.name, self.name)
(members, _) = self.decompose()
return '{}.{}'.format(self.enum.name, '|'.join([str((name or value)) for (name, value) in members]))
def __repr__(self):
if (self.name is not None):
return '<{}.{}: {}>'.format(self.enum.name, self.name, self.value)
(members, _) = self.decompose()
return '<{}.{}: {}>'.format(self.enum.name, '|'.join([str((name or value)) for (name, value) in members]), self.value)
def name(self):
return self.enum.reverse.get(self.value, None)
def decompose(self):
members = []
not_covered = self.value
for (name, value) in self.enum.values.items():
if (value and ((value & self.value) == value)):
members.append((name, value))
not_covered &= (~ value)
if (not members):
members.append((None, self.value))
members.sort(key=(lambda m: m[0]), reverse=True)
if ((len(members) > 1) and (members[0][1] == self.value)):
members.pop(0)
return (members, not_covered) |
def get_required_transaction(w3: 'Web3', transaction_hash: _Hash32) -> TxData:
current_transaction = w3.eth.get_transaction(transaction_hash)
if (not current_transaction):
raise ValueError(f'Supplied transaction with hash {transaction_hash!r} does not exist')
return current_transaction |
class TLS(AmbassadorTest):
target: ServiceType
def init(self):
self.xfail = 'FIXME: IHA'
self.target = HTTP()
def manifests(self) -> str:
return (f'''
---
apiVersion: v1
kind: Secret
metadata:
name: test-tls-secret
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts['localhost'].k8s_crt}
tls.key: {TLSCerts['localhost'].k8s_key}
---
apiVersion: v1
kind: Secret
metadata:
name: ambassador-certs
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts['localhost'].k8s_crt}
tls.key: {TLSCerts['localhost'].k8s_key}
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: tls-host
labels:
kat-ambassador-id: tls
spec:
ambassador_id: [tls]
tlsSecret:
name: test-tls-secret
requestPolicy:
insecure:
action: Reject
''' + super().manifests())
def config(self) -> Generator[(Union[(str, Tuple[(Node, str)])], None, None)]:
(yield (self.target, self.format('\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nname: tls_target_mapping\nprefix: /tls-target/\nservice: {self.target.path.fqdn}\n')))
def scheme(self) -> str:
return '
def queries(self):
(yield Query(self.url('tls-target/'), insecure=True)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.