code stringlengths 281 23.7M |
|---|
def fft(field, domain, poly):
if (len(domain) <= 8):
return _simple_ft(field, domain, poly)
offset = domain[1]
(evens, odds) = cast(field, poly, offset)
casted_domain = [field.mul(x, (offset ^ x)) for x in domain[::2]]
even_points = fft(field, casted_domain, evens)
odd_points = fft(field, casted_domain, odds)
o = []
for i in range((len(domain) // 2)):
o.append((even_points[i] ^ field.mul(domain[(i * 2)], odd_points[i])))
o.append((even_points[i] ^ field.mul(domain[((i * 2) + 1)], odd_points[i])))
return o |
def infer_device_type(df: pd.DataFrame) -> DeviceType:
if (('stream' in df.columns) and (('pid' not in df.columns) or ('tid' not in df.columns))):
if (df.stream.unique() > 0).all():
return DeviceType.GPU
elif (df.stream.unique() == (- 1)).all():
return DeviceType.CPU
elif {'stream', 'pid', 'tid'}.issubset(set(df.columns)):
if ((df.stream.unique() > 0).all() or ((df.pid.unique() == 0).all() or (df.tid.unique() == 0).all())):
return DeviceType.GPU
elif (df.stream.unique() == (- 1)).all():
return DeviceType.CPU
return DeviceType.UNKNOWN |
class SliceTemplate(models.Model):
templateId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
description = models.TextField(null=True, blank=True)
nfvoType = models.ManyToManyField(ServiceMappingPluginModel)
genericTemplates = models.ManyToManyField(GenericTemplate, related_name='templates')
instanceId = models.ManyToManyField(NetworkSliceSubnet, blank=True, related_name='instsances') |
class TestDevice(object):
def mouse(self):
settings = mouse_settings.FakeMouseSettings(4152, 47789, aerox9_wireless_wireless.profile)
return mouse.Mouse(usbhid.FakeDevice(), aerox9_wireless_wireless.profile, settings)
.parametrize('value,expected_hid_report', [(100, b'\x02\x00m\x01\x00\x00'), (200, b'\x02\x00m\x01\x00\x01'), (300, b'\x02\x00m\x01\x00\x02'), (18000, b'\x02\x00m\x01\x00\xd6'), ('200,400', b'\x02\x00m\x02\x00\x01\x03'), ('200,400,800,1600', b'\x02\x00m\x04\x00\x01\x03\x08\x11')])
def test_set_sensitivity(self, mouse, value, expected_hid_report):
response = mouse.set_sensitivity(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [(125, b'\x02\x00k\x03'), (250, b'\x02\x00k\x02'), (500, b'\x02\x00k\x01'), (1000, b'\x02\x00k\x00')])
def test_set_polling_rate(self, mouse, value, expected_hid_report):
response = mouse.set_polling_rate(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [('#ABCDEF', b'\x02\x00a\x01\x00\xab\xcd\xef'), ('red', b'\x02\x00a\x01\x00\xff\x00\x00')])
def test_set_z1_color(self, mouse, value, expected_hid_report):
response = mouse.set_z1_color(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [('#ABCDEF', b'\x02\x00a\x01\x01\xab\xcd\xef'), ('red', b'\x02\x00a\x01\x01\xff\x00\x00')])
def test_set_z2_color(self, mouse, value, expected_hid_report):
response = mouse.set_z2_color(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [('#ABCDEF', b'\x02\x00a\x01\x02\xab\xcd\xef'), ('red', b'\x02\x00a\x01\x02\xff\x00\x00')])
def test_set_z3_color(self, mouse, value, expected_hid_report):
response = mouse.set_z3_color(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [('red', b'\x02\x00f\x01\x00\xff\x00\x00'), ('#ff1802', b'\x02\x00f\x01\x00\xff\x18\x02'), ('disable', b'\x02\x00f\x00\x00\x00\x00\x00'), ('off', b'\x02\x00f\x00\x00\x00\x00\x00')])
def test_set_reactive_color(self, mouse, value, expected_hid_report):
response = mouse.set_reactive_color(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [(0, b'\x02\x00i\x00\x00\x00'), (1, b'\x02\x00i`\xea\x00'), (5, b'\x02\x00i\xe0\x93\x04'), (20, b'\x02\x00i\x80O\x12')])
def test_set_sleep_timer(self, mouse, value, expected_hid_report):
response = mouse.set_sleep_timer(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [(0, b'\x02\x00c\x0f\x01\x00\x00\x00\x00\x00'), (30, b'\x02\x00c\x0f\x01\x00\x000u\x00'), (60, b'\x02\x00c\x0f\x01\x00\x00`\xea\x00'), (300, b'\x02\x00c\x0f\x01\x00\x00\xe0\x93\x04'), (1200, b'\x02\x00c\x0f\x01\x00\x00\x80O\x12')])
def test_set_dim_timer(self, mouse, value, expected_hid_report):
response = mouse.set_dim_timer(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
assert (len(response) == 64)
def test_set_rainbow_effect(self, mouse):
response = mouse.set_rainbow_effect()
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == b'\x02\x00b\xff')
assert (len(response) == 64)
.parametrize('value,expected_hid_report', [('off', b'\x02\x00g\x00\x00'), ('reactive', b'\x02\x00g\x00\x01'), ('rainbow', b'\x02\x00g\x01\x00'), ('reactive-rainbow', b'\x02\x00g\x01\x01')])
def test_set_default_lighting(self, mouse, value, expected_hid_report):
mouse.set_default_lighting(value)
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == expected_hid_report)
def test_battery_level(self, mouse):
battery_info = mouse.battery
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == b'\x02\x00\xd2')
assert ('is_charging' in battery_info)
assert ('level' in battery_info)
def test_save(self, mouse):
response = mouse.save()
mouse._hid_device.bytes.seek(0)
hid_report = mouse._hid_device.bytes.read()
assert (hid_report == b'\x02\x00Q\x00')
assert (len(response) == 64) |
def main(numpy: bool=False, pytorch: bool=False, generic: bool=False, gpu_id: int=(- 1)):
global CONFIG
fix_random_seed(0)
if (gpu_id >= 0):
require_gpu(gpu_id)
print('Set GPU', gpu_id)
backends = {'pytorch': pytorch, 'numpy': numpy, 'generic': generic}
for (name, use_backend) in backends.items():
if (not use_backend):
print(f'Skipping {name}')
continue
set_backend(name, gpu_id)
print('Getting data')
C = registry.resolve(Config().from_str(CONFIG))
model = C['model']
(X, Y) = get_dummy_data(**C['data'])
print('Copy to device')
X = [model.ops.asarray(x) for x in X]
Y = [model.ops.asarray(y) for y in Y]
print('Begin init', len(X))
model.initialize(X=X[:5])
print('Pre-batch')
n_words = sum((len(x) for x in X))
batches = model.ops.multibatch(16, X, Y)
batches = [(model.layers[0].predict(x), y) for (x, y) in batches]
model.layers.pop(0)
print('Start')
start_time = timer()
total = run_forward(model, [x for (x, y) in batches])
end_time = timer()
print(name, n_words, total, (end_time - start_time))
start_time = timer()
total = run_forward_backward(model, batches)
end_time = timer()
print(name, n_words, total, (end_time - start_time)) |
def extractCakesnorterBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('back to before i married the tyrant', 'Back to Before I Married the Tyrant [Rebirth]', 'translated'), ('bbmt', 'Back to Before I Married the Tyrant [Rebirth]', 'translated'), ('rcbw', 'Rebirth of the Campus Bussiness Woman', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def read_relation_as_df(adapter: RedshiftAdapter, relation: BaseRelation) -> pd.DataFrame:
sql = f'SELECT * FROM {relation}'
assert (adapter.type() == 'redshift')
with new_connection(adapter, 'fal-redshift:read_relation_as_df') as conn:
df = wr.redshift.read_sql_query(sql, con=conn.handle)
return df |
class FBPrintInternals(fb.FBCommand):
def name(self):
return 'pinternals'
def description(self):
return 'Show the internals of an object by dereferencing it as a pointer.'
def args(self):
return [fb.FBCommandArgument(arg='object', type='id', help='Object expression to be evaluated.')]
def options(self):
return [fb.FBCommandArgument(arg='appleWay', short='-a', long='--apple', boolean=True, default=False, help='Print ivars the apple way')]
def run(self, arguments, options):
object = fb.evaluateObjectExpression(arguments[0])
if options.appleWay:
if fb.evaluateBooleanExpression('[{} respondsToSelector:(_ivarDescription)]'.format(object)):
command = 'po [{} _ivarDescription]'.format(object)
else:
print('Sorry, but it seems Apple dumped the _ivarDescription method')
return
else:
objectClass = fb.evaluateExpressionValue((('(id)[(id)(' + object) + ') class]')).GetObjectDescription()
command = 'p *(({} *)((id){}))'.format(objectClass, object)
lldb.debugger.HandleCommand(command) |
def test_split_full_desktop_to_screens(monkeypatch):
class MockedPrimaryScreen():
def virtualGeometry(self) -> QtCore.QRect:
return QtCore.QRect(0, 0, 300, 120)
class MockedScreen():
def __init__(self, left, top, width, height):
self._geometry = QtCore.QRect(left, top, width, height)
def geometry(self):
return self._geometry
def mocked_screens() -> list:
return [MockedScreen(0, 0, 100, 100), MockedScreen(100, 10, 100, 100), MockedScreen(200, 20, 100, 100)]
def convert_to_pixels(image):
image = image.convertToFormat(QtGui.QImage.Format.Format_RGB32)
ptr = image.constBits()
values = list(ptr)
return [tuple(values[i:(i + 4)]) for i in range(0, len(values), 4)]
monkeypatch.setattr(QtWidgets.QApplication, 'primaryScreen', MockedPrimaryScreen)
monkeypatch.setattr(QtWidgets.QApplication, 'screens', mocked_screens)
img_path = (Path(__file__).parent / 'split_full_desktop_to_screens.png')
image = QtGui.QImage()
image.load(str(img_path.resolve()))
split_images = utils.split_full_desktop_to_screens(image)
assert (len(split_images) == len(mocked_screens()))
assert ({split_images[i].size().toTuple() for i in range(3)} == {(100, 100)})
assert (set(convert_to_pixels(split_images[0])) == {(0, 0, 255, 255)})
assert (set(convert_to_pixels(split_images[1])) == {(0, 255, 0, 255)})
assert (set(convert_to_pixels(split_images[2])) == {(255, 0, 0, 255)}) |
class Queen(Piece):
def __init__(self, x, y, c):
super().__init__(x, y, c)
self.set_letter('')
def drag(self, new_p, pieces):
if self.grabbed:
(path, dist) = self.select_path((self.start_x, self.start_y), [[1, 1], [(- 1), 1], [1, 0], [0, 1]], new_p)
path_len = math.sqrt(((path[0] ** 2) + (path[1] ** 2)))
self.slide(((path[0] * dist) / path_len), ((path[1] * dist) / path_len), pieces)
def draw_paths(self, pieces):
if self.targeted:
return
fake_piece = Queen(self.start_x, self.start_y, self.color)
directions = [[10, 10], [(- 10), (- 10)], [10, (- 10)], [(- 10), 10], [0, 10], [0, (- 10)], [10, 0], [(- 10), 0]]
end_positions = []
for d in directions:
fake_piece.slide(d[0], d[1], [p for p in pieces if (p != self)], fake=True)
end_positions.append((fake_piece.x, fake_piece.y))
fake_piece.slide(0, 0, [p for p in pieces if (p != self)], fake=True)
for end_pos in end_positions:
draw_line_round_corners_polygon(see_through, to_screen_coords((self.start_x, self.start_y)), to_screen_coords(end_pos), RED_HIGHLIGHT, (((self.radius * 2) * 640) / 8)) |
def _delete_symlinks(bench_path):
bench_dir = os.path.abspath(bench_path)
etc_systemd_system = os.path.join('/', 'etc', 'systemd', 'system')
unit_files = get_unit_files(bench_dir)
for unit_file in unit_files:
exec_cmd(f"sudo rm {etc_systemd_system}/{''.join(unit_file)}")
exec_cmd('sudo systemctl daemon-reload') |
.xfail(raises=ImageComparisonFailure, reason='Matplotlib plots for reasons a different image size.')
.skipif((LOW_MEMORY > memory), reason='Travis has too less memory to run it.')
def test_hicPlotMatrix_h5_region1():
outfile = NamedTemporaryFile(suffix='.png', prefix='hicexplorer_test_cool', delete=False)
args = '--matrix {0}/Li_et_al_2015.h5 --region X:3000000-3500000 --outFileName {1} '.format(ROOT, outfile.name).split()
compute(hicexplorer.hicPlotMatrix.main, args, 5)
res = compare_images(((ROOT + 'hicPlotMatrix') + '/Li_chrX30-35_cool.png'), outfile.name, tol=tolerance)
assert (res is None), res
if REMOVE_OUTPUT:
os.remove(outfile.name) |
def test_module_attribute_wiring_with_invalid_marker(container: Container):
from samples.wiring import module_invalid_attr_injection
with raises(Exception, match='Unknown type of marker {0}'.format(module_invalid_attr_injection.service)):
container.wire(modules=[module_invalid_attr_injection]) |
class Actuarial():
def __init__(self, lx=[], qx=[], nt=None, i=None, perc=100):
self.lx = lx
self.qx = qx
self.dx = []
self.ex = []
self.w = 0
self.i = i
self.q = 0
self.perc = perc
self.nt = nt
self.Dx = []
self.Nx = []
self.Cx = []
self.Mx = []
self.nEx = []
if nt:
mt = nt
init = mt[0]
self.qx = ([0.0] * init)
end_val = 0
for val in mt[1:]:
if (end_val < 1000.0):
end_val = ((val * perc) / 100)
self.qx.append(end_val)
if (perc != 100):
self.qx.append(1000)
if (self.lx == []):
self.lx = [100000.0]
for val in self.qx:
self.lx.append((self.lx[(- 1)] * (1 - (val / 1000))))
if (self.lx[(- 1)] != 0.0):
self.lx.append(0.0)
if (self.w == 0):
self.w = (self.lx.index(0) - 1)
if (self.qx == []):
l_x = self.lx[0]
for l_x1 in self.lx[1:]:
self.qx.append((((l_x - l_x1) * 1000) / l_x))
l_x = l_x1
if (self.dx == []):
dx_0 = (- 1)
end_x_lx = self.lx.index(0)
for lx0 in self.lx:
dx_0 += 1
lx1 = min((dx_0 + 1), end_x_lx)
self.dx.append((lx0 - self.lx[lx1]))
if (self.ex == []):
for g in range(0, len(self.lx[:(- 1)])):
lx_g = self.lx[g]
self.ex.append((0.5 + (sum(self.lx[(g + 1):(- 1)]) / lx_g)))
if (self.Dx == []):
age = (- 1)
for j in self.lx:
age += 1
self.Dx.append((((1 / (1 + i)) ** age) * j))
if (self.Nx == []):
for k in range(0, len(self.Dx)):
self.Nx.append(sum(self.Dx[k:(- 1)]))
if (self.Cx == []):
age = (- 1)
for l in self.dx:
age += 1
C_x = (((1 / (1 + i)) ** (age + 1)) * l)
self.Cx.append(C_x)
if (self.Mx == []):
for m in range(0, len(self.Cx)):
self.Mx.append(sum(self.Cx[m:(- 1)]))
def view(self, start=0, end=10, var='lx'):
column = {'qx': self.qx, 'lx': self.lx, 'dx': self.dx, 'ex': self.ex, 'nt': self.nt, 'Dx': self.Dx, 'Nx': self.Nx, 'Cx': self.Cx, 'Mx': self.Mx, 'nEx': self.nEx}
table_str = ''
index = start
if (var == 'nt'):
subs = 'index'
else:
subs = 'x'
for i in column[var][start:(end + 1)]:
table_str += '[{}={}] {}={}\n'.format(subs, index, var, i)
index += 1
print((table_str + 'Total number of rows for {} = {}'.format(var, len(column[var])))) |
class DeviceCodeAuthenticator(Authenticator):
def __init__(self, endpoint: str, cfg_store: ClientConfigStore, header_key: typing.Optional[str]=None, audience: typing.Optional[str]=None, scopes: typing.Optional[typing.List[str]]=None, typing.Optional[str]=None, verify: typing.Optional[typing.Union[(bool, str)]]=None, session: typing.Optional[requests.Session]=None):
cfg = cfg_store.get_client_config()
self._audience = (audience or cfg.audience)
self._client_id = cfg.client_id
self._device_auth_endpoint = cfg.device_authorization_endpoint
self._scopes = (scopes or cfg.scopes)
self._token_endpoint = cfg.token_endpoint
if (self._device_auth_endpoint is None):
raise AuthenticationError('Device Authentication is not available on the Flyte backend / authentication server')
self._session = (session or requests.Session())
super().__init__(endpoint=endpoint, header_key=(header_key or cfg.header_key), credentials=KeyringStore.retrieve(endpoint), verify=verify)
def refresh_credentials(self):
resp = token_client.get_device_code(self._device_auth_endpoint, self._client_id, self._audience, self._scopes, self._ self._verify, self._session)
text = f"To Authenticate, navigate in a browser to the following URL: {click.style(resp.verification_uri, fg='blue', underline=True)} and enter code: {click.style(resp.user_code, fg='blue')}"
click.secho(text)
try:
(token, expires_in) = token_client.poll_token_endpoint(resp, self._token_endpoint, client_id=self._client_id, audience=self._audience, scopes=self._scopes, verify=self._verify)
self._creds = Credentials(access_token=token, expires_in=expires_in, for_endpoint=self._endpoint)
KeyringStore.store(self._creds)
except Exception:
KeyringStore.delete(self._endpoint)
raise |
class AlertsFetcher(FetcherClient):
def __init__(self, dbt_runner: BaseDbtRunner, config: Config, elementary_database_and_schema: str):
super().__init__(dbt_runner)
self.config = config
self.elementary_database_and_schema = elementary_database_and_schema
def skip_alerts(self, alerts_to_skip: Union[(List[PendingTestAlertSchema], List[PendingModelAlertSchema], List[PendingSourceFreshnessAlertSchema])], resource_type: ResourceType):
table_name = self._resource_type_to_table(resource_type)
alert_ids = [alert.id for alert in alerts_to_skip]
alert_ids_chunks = self._split_list_to_chunks(alert_ids)
logger.info(f'Update skipped alerts at "{table_name}"')
for alert_ids_chunk in alert_ids_chunks:
self.dbt_runner.run(select='elementary_cli.update_alerts.update_skipped_alerts', vars={'alert_ids': alert_ids_chunk, 'table_name': table_name}, quiet=True)
def query_pending_test_alerts(self, days_back: int, disable_samples: bool=False) -> List[PendingTestAlertSchema]:
logger.info('Querying test alerts.')
pending_test_alerts_results = self.dbt_runner.run_operation(macro_name='elementary_cli.get_pending_test_alerts', macro_args={'days_back': days_back, 'disable_samples': disable_samples})
return [PendingTestAlertSchema(**result) for result in json.loads(pending_test_alerts_results[0])]
def query_pending_model_alerts(self, days_back: int) -> List[PendingModelAlertSchema]:
logger.info('Querying model alerts.')
pending_model_alerts_results = self.dbt_runner.run_operation(macro_name='elementary_cli.get_pending_model_alerts', macro_args={'days_back': days_back})
return [PendingModelAlertSchema(**result) for result in json.loads(pending_model_alerts_results[0])]
def query_pending_source_freshness_alerts(self, days_back: int) -> List[PendingSourceFreshnessAlertSchema]:
logger.info('Querying source freshness alerts.')
pending_source_freshness_alerts_results = self.dbt_runner.run_operation(macro_name='elementary_cli.get_pending_source_freshness_alerts', macro_args={'days_back': days_back})
return [PendingSourceFreshnessAlertSchema(**result) for result in json.loads(pending_source_freshness_alerts_results[0])]
def query_last_test_alert_times(self, days_back: int) -> Dict[(str, str)]:
logger.info('Querying test alerts last sent times.')
response = self.dbt_runner.run_operation(macro_name='elementary_cli.get_last_test_alert_sent_times', macro_args={'days_back': days_back})
return json.loads(response[0])
def query_last_model_alert_times(self, days_back: int) -> Dict[(str, str)]:
logger.info('Querying model alerts last sent times.')
response = self.dbt_runner.run_operation(macro_name='elementary_cli.get_last_model_alert_sent_times', macro_args={'days_back': days_back})
return json.loads(response[0])
def query_last_source_freshness_alert_times(self, days_back: int) -> Dict[(str, str)]:
logger.info('Querying source freshness alerts last sent times.')
response = self.dbt_runner.run_operation(macro_name='elementary_cli.get_last_source_freshness_alert_sent_times', macro_args={'days_back': days_back})
return json.loads(response[0])
def update_sent_alerts(self, alert_ids: List[str], resource_type: ResourceType) -> None:
table_name = self._resource_type_to_table(resource_type)
alert_ids_chunks = self._split_list_to_chunks(alert_ids)
logger.info(f'Update sent alerts at "{table_name}"')
for alert_ids_chunk in alert_ids_chunks:
self.dbt_runner.run(select='elementary_cli.update_alerts.update_sent_alerts', vars={'alert_ids': alert_ids_chunk, 'sent_at': get_now_utc_str(), 'table_name': table_name}, quiet=True)
def _split_list_to_chunks(items: list, chunk_size: int=50) -> List[List]:
chunk_list = []
for i in range(0, len(items), chunk_size):
chunk_list.append(items[i:(i + chunk_size)])
return chunk_list
def _resource_type_to_table(resource_type: ResourceType) -> str:
if (resource_type == ResourceType.TEST):
return 'alerts'
elif (resource_type == ResourceType.MODEL):
return 'alerts_models'
else:
return 'alerts_source_freshness' |
class OptionSeriesOrganizationSonificationTracksMappingLowpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class CssUIMenuActive(CssStyle.Style):
classname = 'ui-state-active'
def customize(self):
self.css({'border': BORDER_1PX_EXPR.format(self.page.theme.notch()), 'background-color': self.page.theme.notch()}, important=True)
self.hover.css({'border': BORDER_1PX_EXPR.format(self.page.theme.notch()), 'background-color': self.page.theme.notch()}) |
class FunctionImageData(ImageData):
func = Callable
data_range = Instance(DataRange2D)
def __init__(self, **kw):
super().__init__(**kw)
self.recalculate()
('data_range.updated')
def recalculate(self, event=None):
if ((self.func is not None) and (self.data_range is not None)):
newarray = self.func(self.data_range.x_range.low, self.data_range.x_range.high, self.data_range.y_range.low, self.data_range.y_range.high)
ImageData.set_data(self, newarray)
else:
self._data = array([], dtype=float)
def set_data(self, *args, **kw):
raise RuntimeError('Cannot set numerical data on a FunctionImageData')
def set_mask(self, mask):
raise NotImplementedError
def remove_mask(self):
raise NotImplementedError |
def extractPickupnovelsCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class SyncMimeLiteServer(SyncServer):
def __init__(self, *, global_model: IFLModel, channel: Optional[IFLChannel]=None, **kwargs) -> None:
init_self_cfg(self, component_class=__class__, config_class=SyncMimeLiteServerConfig, **kwargs)
self._global_model = global_model
self._aggregator = Aggregator(module=global_model.fl_get_module(), aggregation_type=self.cfg.aggregation_type, only_federated_params=self.cfg.only_federated_params)
self._active_user_selector = instantiate(self.cfg.active_user_selector)
self._channel: IFLChannel = (channel or IdentityChannel())
self._state_optimizer = instantiate(config=self.cfg.server_optimizer, model=global_model.fl_get_module())
self._optimizer = instantiate(config=FedAvgWithLROptimizerConfig(lr=self.cfg.server_optimizer.lr), model=global_model.fl_get_module())
self._grad_average = FLModelParamUtils.clone(global_model.fl_get_module())
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.active_user_selector, '_target_'):
cfg.active_user_selector = UniformlyRandomActiveUserSelectorConfig()
if OmegaConf.is_missing(cfg.server_optimizer, '_target_'):
cfg.server_optimizer = FedAvgWithLROptimizerConfig(lr=1.0)
def broadcast_message_to_clients(self, clients: Iterable[Client], global_round_num: Optional[int]=0) -> Message:
self._grad_average.zero_grad()
num_examples = 0.0
for client in clients:
(grad, weight) = client.full_dataset_gradient(self.global_model)
FLModelParamUtils.multiply_gradient_by_weight(grad, weight, grad)
FLModelParamUtils.add_gradients(self._grad_average, grad, self._grad_average)
num_examples += weight
del grad
assert (num_examples > 0), 'All selected clients in the current round have no data'
FLModelParamUtils.multiply_gradient_by_weight(self._grad_average, (1.0 / num_examples), self._grad_average)
return Message(model=self.global_model, server_opt_state=copy.deepcopy(self._state_optimizer.state_dict()['state']))
def step(self):
model_state_dict = copy.deepcopy(self._global_model.fl_get_module().state_dict())
FLModelParamUtils.copy_gradients(self._grad_average, self._global_model.fl_get_module())
self._state_optimizer.step()
self._global_model.fl_get_module().load_state_dict(model_state_dict)
del model_state_dict
aggregated_model = self._aggregator.aggregate()
FLModelParamUtils.set_gradient(model=self._global_model.fl_get_module(), reference_gradient=aggregated_model)
self._optimizer.step() |
def close_all(map=None, ignore_all=False):
if (map is None):
map = socket_map
for x in list(map.values()):
try:
x.close()
except OSError as x:
if (x.errno == EBADF):
pass
elif (not ignore_all):
raise
except _reraised_exceptions:
raise
except:
if (not ignore_all):
raise
map.clear() |
class EmulateEfuseController(EmulateEfuseControllerBase):
CHIP_NAME = 'ESP32-P4'
mem = None
debug = False
def __init__(self, efuse_file=None, debug=False):
self.Blocks = EfuseDefineBlocks
self.Fields = EfuseDefineFields()
self.REGS = EfuseDefineRegisters
super(EmulateEfuseController, self).__init__(efuse_file, debug)
self.write_reg(self.REGS.EFUSE_CMD_REG, 0)
' esptool method start >>'
def get_major_chip_version(self):
return 0
def get_minor_chip_version(self):
return 0
def get_crystal_freq(self):
return 40
def get_security_info(self):
return {'flags': 0, 'flash_crypt_cnt': 0, 'key_purposes': 0, 'chip_id': 0, 'api_version': 0}
' << esptool method end '
def handle_writing_event(self, addr, value):
if (addr == self.REGS.EFUSE_CMD_REG):
if (value & self.REGS.EFUSE_PGM_CMD):
self.copy_blocks_wr_regs_to_rd_regs(updated_block=((value >> 2) & 15))
self.clean_blocks_wr_regs()
self.check_rd_protection_area()
self.write_reg(addr, 0)
self.write_reg(self.REGS.EFUSE_CMD_REG, 0)
elif (value == self.REGS.EFUSE_READ_CMD):
self.write_reg(addr, 0)
self.write_reg(self.REGS.EFUSE_CMD_REG, 0)
self.save_to_file()
def get_bitlen_of_block(self, blk, wr=False):
if (blk.id == 0):
if wr:
return (32 * 8)
else:
return (32 * blk.len)
elif wr:
rs_coding = (32 * 3)
return ((32 * 8) + rs_coding)
else:
return (32 * blk.len)
def handle_coding_scheme(self, blk, data):
if (blk.id != 0):
coded_bytes = 12
data.pos = (coded_bytes * 8)
plain_data = data.readlist('32*uint:8')[::(- 1)]
rs = reedsolo.RSCodec(coded_bytes)
calc_encoded_data = list(rs.encode([x for x in plain_data]))
data.pos = 0
if (calc_encoded_data != data.readlist('44*uint:8')[::(- 1)]):
raise FatalError('Error in coding scheme data')
data = data[(coded_bytes * 8):]
if (blk.len < 8):
data = data[((8 - blk.len) * 32):]
return data |
_config(context=HTTPForbidden, accept='text/html')
_config(context=HTTPUnauthorized, accept='text/html')
_config(context=Exception, accept='text/html')
def exception_html_view(exc, request):
errors = getattr(request, 'errors', [])
status = getattr(exc, 'status_code', 500)
if (status not in (404, 403, 401)):
log.exception('Error caught. Handling HTML response.')
else:
log.warning(str(exc))
if (not len(errors)):
errors = cornice.errors.Errors(status=status)
errors.add('body', description=str(exc))
request.errors = errors
return bodhi.server.services.errors.html_handler(request) |
class TestRiskScoreMismatch(BaseRuleTest):
def test_rule_risk_score_severity_mismatch(self):
invalid_list = []
risk_severity = {'critical': (74, 100), 'high': (48, 73), 'medium': (22, 47), 'low': (0, 21)}
for rule in self.all_rules:
severity = rule.contents.data.severity
risk_score = rule.contents.data.risk_score
(min_score, max_score) = risk_severity[severity]
if (not (min_score <= risk_score <= max_score)):
invalid_list.append(f'{self.rule_str(rule)} Severity: {severity}, Risk Score: {risk_score}')
if invalid_list:
invalid_str = '\n'.join(invalid_list)
err_msg = 'The following rules have mismatches between Severity and Risk Score field values:\n'
err_msg += invalid_str
self.fail(err_msg) |
class FileUploadInput(object):
empty_template = '<input %(file)s>'
input_type = 'file'
data_template = '<div> <input %(text)s> <input type="checkbox" name="%(marker)s">Delete</input></div><input %(file)s>'
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
template = (self.data_template if field.data else self.empty_template)
if field.errors:
template = self.empty_template
if (field.data and isinstance(field.data, FileStorage)):
value = field.data.filename
else:
value = (field.data or '')
return Markup((template % {'text': html_params(type='text', readonly='readonly', value=value, name=field.name), 'file': html_params(type='file', value=value, **kwargs), 'marker': ('_%s-delete' % field.name)})) |
def test_arrow():
encoder = basic_dfs.ArrowToParquetEncodingHandler()
decoder = basic_dfs.ParquetToArrowDecodingHandler()
assert (encoder.protocol is None)
assert (decoder.protocol is None)
assert (encoder.python_type is decoder.python_type)
d = StructuredDatasetTransformerEngine.DECODERS[encoder.python_type]['fsspec']['parquet']
assert (d is not None) |
class Common():
def test_todims(self):
self.assertEqual(self.seq.todims, self.checktodims)
for trans in self.seq:
self.assertEqual(trans[0].todims, self.checktodims)
def test_fromdims(self):
self.assertEqual(self.seq.fromdims, self.checkfromdims)
for trans in self.seq:
self.assertEqual(trans[(- 1)].fromdims, self.checkfromdims)
def test_len(self):
self.assertEqual(len(self.seq), len(self.check))
def test_getitem_scalar_pos(self):
for i in range(len(self.check)):
self.assertEqual(self.seq[i], self.check[i])
def test_getitem_scalar_neg(self):
for i in range((- len(self.check)), 0):
self.assertEqual(self.seq[i], self.check[i])
def test_getitem_scalar_invalid(self):
for i in [len(self.check), ((- len(self.check)) - 1)]:
with self.assertRaises(IndexError):
self.seq[i]
def test_getitem_slice(self):
for i in range(len(self.check)):
for j in range(i, (len(self.check) + 1)):
with self.subTest('{}:{}'.format(i, j)):
self.assertEqual(tuple(self.seq[i:j]), tuple(self.check[i:j]))
def test_getitem_intarray(self):
for mask in itertools.product(*([[False, True]] * len(self.check))):
mask = numpy.array(mask, dtype=bool)
(indices,) = numpy.where(mask)
with self.subTest(tuple(indices)):
self.assertEqual(tuple(self.seq[indices]), tuple((self.check[i] for i in indices)))
def test_getitem_intarray_outofbounds(self):
for i in [(- 1), len(self.check)]:
with self.assertRaises(IndexError):
self.seq[numpy.array([i], dtype=int)]
def test_getitem_intarray_invalidndim(self):
with self.assertRaises(IndexError):
self.seq[numpy.array([[0]], dtype=int)]
def test_getitem_boolarray(self):
for mask in itertools.product(*([[False, True]] * len(self.check))):
mask = numpy.array(mask, dtype=bool)
(indices,) = numpy.where(mask)
with self.subTest(tuple(indices)):
self.assertEqual(tuple(self.seq[mask]), tuple((self.check[i] for i in indices)))
def test_getitem_boolarray_invalidshape(self):
with self.assertRaises(IndexError):
self.seq[numpy.array(([True] * (len(self.check) + 1)), dtype=bool)]
with self.assertRaises(IndexError):
self.seq[numpy.array([([True] * len(self.check))], dtype=bool)]
def test_iter(self):
self.assertEqual(tuple(self.seq), tuple(self.check))
def test_add(self):
self.assertEqual(tuple((self.seq + nutils.transformseq.EmptyTransforms(self.checktodims, self.checkfromdims))), tuple(self.check))
self.assertEqual(tuple((self.seq + self.seq)), (tuple(self.check) + tuple(self.check)))
def test_index_with_tail(self):
assert (len(self.check) == len(self.checkrefs))
for (i, (trans, ref)) in enumerate(zip(self.check, self.checkrefs)):
self.assertEqual(self.seq.index_with_tail(trans), (i, ()))
for ctrans in ref.child_transforms:
self.assertEqual(self.seq.index_with_tail((trans + (ctrans,))), (i, (ctrans,)))
if (self.checkfromdims > 0):
for etrans in ref.edge_transforms:
for shuffle in ((lambda t: t), nutils.transform.canonical):
self.assertEqual(self.seq.index_with_tail(shuffle((trans + (etrans,)))), (i, (etrans,)))
def test_index_with_tail_missing(self):
for trans in self.checkmissing:
with self.assertRaises(ValueError):
self.seq.index_with_tail(trans)
def test_index(self):
for (i, trans) in enumerate(self.check):
self.assertEqual(self.seq.index(trans), i)
def test_index_missing(self):
for trans in self.checkmissing:
with self.assertRaises(ValueError):
self.seq.index(trans)
assert (len(self.check) == len(self.checkrefs))
for (trans, ref) in zip(self.check, self.checkrefs):
for ctrans in ref.child_transforms:
with self.assertRaises(ValueError):
self.seq.index((trans + (ctrans,)))
def test_contains_with_tail(self):
assert (len(self.check) == len(self.checkrefs))
for (i, (trans, ref)) in enumerate(zip(self.check, self.checkrefs)):
self.assertEqual(self.seq.index_with_tail(trans), (i, ()))
for ctrans in ref.child_transforms:
self.assertTrue(self.seq.contains_with_tail((trans + (ctrans,))))
if (self.checkfromdims > 0):
for etrans in ref.edge_transforms:
for shuffle in ((lambda t: t), nutils.transform.canonical):
self.assertTrue(self.seq.contains_with_tail(shuffle((trans + (etrans,)))))
def test_contains_with_tail_missing(self):
for trans in self.checkmissing:
self.assertFalse(self.seq.contains_with_tail(trans))
def test_contains(self):
for (i, trans) in enumerate(self.check):
self.assertTrue(self.seq.contains(trans))
def test_contains_missing(self):
for trans in self.checkmissing:
self.assertFalse(self.seq.contains(trans))
assert (len(self.check) == len(self.checkrefs))
for (trans, ref) in zip(self.check, self.checkrefs):
for ctrans in ref.child_transforms:
self.assertFalse(self.seq.contains((trans + (ctrans,))))
def test_refined(self):
refined = self.seq.refined(self.checkrefs)
assert (len(self.check) == len(self.checkrefs))
ctransforms = ((trans + (ctrans,)) for (trans, ref) in zip(self.check, self.checkrefs) for ctrans in ref.child_transforms)
for (i, trans) in enumerate(ctransforms):
self.assertEqual(refined.index(trans), i) |
class OptionPlotoptionsTreemapSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def downgrade():
op.create_table('format', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('label_en', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('event_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', name=u'format_pkey')) |
class OptionSonificationGlobaltracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def value(self):
return self._config_get(None)
def value(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def create_server_datasets(test_config: FidesConfig, datasets: List[Dataset]) -> None:
for dataset in datasets:
api.delete(url=test_config.cli.server_url, resource_type='dataset', resource_id=dataset.fides_key, headers=test_config.user.auth_header)
api.create(url=test_config.cli.server_url, resource_type='dataset', json_resource=dataset.json(exclude_none=True), headers=test_config.user.auth_header) |
class TestBootloaderHeaderRewriteCases(EsptoolTestCase):
.skipif((arg_chip not in ['esp8266', 'esp32', 'esp32c3']), reason="Don't run on every chip, so other bootloader images are not needed")
.quick_test
def test_flash_header_rewrite(self):
bl_offset = (4096 if (arg_chip in ('esp32', 'esp32s2')) else 0)
bl_image = f'images/bootloader_{arg_chip}.bin'
output = self.run_esptool(f'write_flash -fm dout -ff 20m {bl_offset:#x} {bl_image}')
if (arg_chip in ['esp8266', 'esp32']):
('Flash params set to' in output)
else:
assert ('Flash params set to' not in output)
('not changing the flash mode setting' in output)
('not changing the flash frequency setting' in output)
def test_flash_header_no_magic_no_rewrite(self):
bl_offset = (4096 if (arg_chip in ('esp32', 'esp32s2')) else 0)
for image in ['images/one_kb.bin', 'images/one_kb_all_ef.bin']:
output = self.run_esptool(f'write_flash -fm dout -ff 20m {bl_offset:#x} {image}')
('not changing any flash settings' in output)
self.verify_readback(bl_offset, 1024, image) |
('cuda.bmm_rrr_permute.gen_function')
def gen_function(func_attrs, exec_cond_template, dim_info_dict):
default_mm_info = bmm_common.get_default_problem_info(PROBLEM_ARGS, alpha_value=func_attrs.get('alpha', 1))
(problem_args, _, input_addr_calculator, output_addr_calculator) = bmm_common.make_function_strided_args(func_attrs, dim_info_dict, default_mm_info, is_permute=True)
return bmm_permute_common.gen_function(func_attrs, exec_cond_template, problem_args, dim_info_dict, input_addr_calculator, output_addr_calculator, extra_code=common_permute.EXTRA_CODE.render()) |
.parametrize('test_dataset, conditions, result', ((pd.DataFrame({'target': [0, 0, 0, 1]}), {'eq': 0}, True), (pd.DataFrame({'target': [0, 0, None, 1], 'numeric': [None, None, None, 1]}), {'lt': 3}, False)))
def test_data_integrity_test_number_of_missing_values_no_errors(test_dataset: pd.DataFrame, conditions: dict, result: bool) -> None:
suite = TestSuite(tests=[TestNumberOfMissingValues(**conditions)])
suite.run(current_data=test_dataset, reference_data=None, column_mapping=ColumnMapping())
assert (bool(suite) is result)
assert suite.show()
assert suite.json() |
class ResolveAnchorIds(Transform):
default_priority = 879
def apply(self, **kwargs: t.Any) -> None:
slugs: dict[(str, tuple[(int, str, str)])] = getattr(self.document, 'myst_slugs', {})
explicit: dict[(str, tuple[(str, (None | str))])] = {}
for (name, is_explicit) in self.document.nametypes.items():
if (not is_explicit):
continue
labelid = self.document.nameids[name]
if (labelid is None):
continue
if (labelid is None):
continue
node = self.document.ids[labelid]
if (isinstance(node, nodes.target) and ('refid' in node)):
node = self.document.ids.get(node['refid'])
labelid = node['names'][0]
if ((node.tagname == 'footnote') or ('refuri' in node) or node.tagname.startswith('desc_')):
continue
implicit_title = None
if (node.tagname == 'rubric'):
implicit_title = clean_astext(node)
if (implicit_title is None):
for subnode in node:
if isinstance(subnode, (nodes.caption, nodes.title)):
implicit_title = clean_astext(subnode)
break
if (implicit_title is None):
if (isinstance(node, (nodes.definition_list, nodes.field_list)) and node.children):
node = node[0]
if (isinstance(node, (nodes.field, nodes.definition_list_item)) and node.children):
node = node[0]
if isinstance(node, (nodes.term, nodes.field_name)):
implicit_title = clean_astext(node)
explicit[name] = (labelid, implicit_title)
for refnode in findall(self.document)(nodes.reference):
if (not refnode.get('id_link')):
continue
target = refnode['refuri'][1:]
del refnode['refuri']
if (target in explicit):
(ref_id, implicit_title) = explicit[target]
refnode['refid'] = ref_id
if ((not refnode.children) and implicit_title):
refnode += nodes.inline(implicit_title, implicit_title, classes=['std', 'std-ref'])
elif (not refnode.children):
refnode += nodes.inline(('#' + target), ('#' + target), classes=['std', 'std-ref'])
continue
if (target in slugs):
(_, sect_id, implicit_title) = slugs[target]
refnode['refid'] = sect_id
if ((not refnode.children) and implicit_title):
refnode += nodes.inline(implicit_title, implicit_title, classes=['std', 'std-ref'])
continue
if hasattr(self.document.settings, 'env'):
from sphinx import addnodes
pending = addnodes.pending_xref(refdoc=self.document.settings.env.docname, refdomain=None, reftype='myst', reftarget=target, refexplicit=bool(refnode.children))
inner_node = nodes.inline('', '', classes=(['xref', 'myst'] + refnode['classes']))
for attr in ('ids', 'names', 'dupnames'):
inner_node[attr] = refnode[attr]
inner_node += refnode.children
pending += inner_node
refnode.parent.replace(refnode, pending)
continue
create_warning(self.document, f"'myst' reference target not found: {target!r}", MystWarnings.XREF_MISSING, line=refnode.line, append_to=refnode)
refnode['refid'] = normalizeLink(target)
if (not refnode.children):
refnode += nodes.inline(('#' + target), ('#' + target), classes=['std', 'std-ref']) |
def test_process():
from threading import Thread
class MockMonitor(Monitor):
def __init__(self):
self.sdnc = get_sdn_connect(logger)
self.logger = self.sdnc.logger
self.config = self.sdnc.config
self.sdnc.config['TYPE'] = 'None'
self.sdnc.get_sdn_context()
self.sdnc.config['TYPE'] = 'faucet'
self.sdnc.get_sdn_context()
self.job_queue = queue.Queue()
self.prom = prom
endpoint = endpoint_factory('foo')
endpoint.endpoint_data = {'active': 0, 'ipv4_subnet': '12.12.12.12/24', 'ipv6_subnet': '', 'ipv4_rdns': '', 'ipv6_rdns': '', 'controller_type': 'faucet', 'controller': '', 'name': '', 'ipv4': '12.12.12.12', 'ipv6': '', 'ether_vendor': 'foo', 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'classification': {'labels': ['developer workstation', 'foo', 'bar'], 'confidences': [0.8, 0.2, 0.0]}}}, 'ipv4_addresses': {'12.12.12.12': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}}
endpoint.operate()
self.sdnc.endpoints[endpoint.name] = endpoint
endpoint = endpoint_factory('foo2')
endpoint.endpoint_data = {'active': 0, 'ipv4_subnet': '12.12.12.12/24', 'ipv6_subnet': '', 'ipv4_rdns': '', 'ipv6_rdns': '', 'controller_type': 'faucet', 'controller': '', 'name': '', 'ipv4': '12.12.12.12', 'ipv6': '', 'ether_vendor': 'foo', 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'classification': {'labels': ['developer workstation', 'foo', 'bar'], 'confidences': [0.8, 0.2, 0.0]}}}, 'ipv4_addresses': {'12.12.12.12': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}}
endpoint.queue_next('operate')
self.sdnc.endpoints[endpoint.name] = endpoint
endpoint = endpoint_factory('foo3')
endpoint.endpoint_data = {'active': 0, 'ipv4_subnet': '12.12.12.12/24', 'ipv6_subnet': '', 'ipv4_rdns': '', 'ipv6_rdns': '', 'controller_type': 'faucet', 'controller': '', 'name': '', 'ipv4': '12.12.12.12', 'ipv6': '', 'ether_vendor': 'foo', 'tenant': 'foo', 'mac': '00:00:00:00:00:00', 'segment': 'foo', 'port': '1'}
endpoint.metadata = {'mac_addresses': {'00:00:00:00:00:00': {'classification': {'labels': ['developer workstation', 'foo', 'bar'], 'confidences': [0.8, 0.2, 0.0]}}}, 'ipv4_addresses': {'12.12.12.12': {'os': 'windows'}}, 'ipv6_addresses': {'1212::1': {'os': 'windows'}}}
self.sdnc.endpoints[endpoint.name] = endpoint
self.results = 0
def get_q_item(self, q, timeout=1):
if (not self.results):
self.results += 1
return (True, ('foo', {'data': {}}))
return (False, None)
def format_rabbit_message(self, item, faucet_event, remove_list):
return ({'data': {}}, False)
mock_monitor = MockMonitor()
assert mock_monitor.sdnc.investigation_budget()
assert mock_monitor.sdnc.coprocessing_budget()
handlers = [mock_monitor.job_update_metrics, mock_monitor.job_reinvestigation_timeout, mock_monitor.job_recoprocess, mock_monitor.schedule_mirroring, mock_monitor.schedule_coprocessing]
for handler in handlers:
handler()
def thread1():
time.sleep(5)
mock_monitor.running = False
t1 = Thread(target=thread1)
t1.start()
t1.join()
mock_monitor.sdnc.sdnc = None
for handler in handlers:
handler() |
_log_on_failure_all
class BaseTestLibp2pClientSamePeer():
_log_on_failure
def setup_class(cls):
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
os.chdir(cls.t)
MockDefaultMessageProtocol = Mock()
MockDefaultMessageProtocol.protocol_id = DefaultMessage.protocol_id
MockDefaultMessageProtocol.protocol_specification_id = DefaultMessage.protocol_specification_id
cls.log_files = []
temp_dir = os.path.join(cls.t, 'temp_dir_node')
os.mkdir(temp_dir)
cls.connection_node = _make_libp2p_connection(data_dir=temp_dir, port=(DEFAULT_PORT + 1), delegate=True, mailbox=True)
cls.multiplexer_node = Multiplexer([cls.connection_node], protocols=[MockDefaultMessageProtocol])
cls.log_files.append(cls.connection_node.node.log_file)
cls.multiplexer_node.connect()
try:
temp_dir_client_1 = os.path.join(cls.t, 'temp_dir_client_1')
os.mkdir(temp_dir_client_1)
cls.connection_client_1 = _make_libp2p_mailbox_connection(data_dir=temp_dir_client_1, peer_public_key=cls.connection_node.node.pub, ledger_api_id=FetchAICrypto.identifier)
cls.multiplexer_client_1 = Multiplexer([cls.connection_client_1], protocols=[MockDefaultMessageProtocol])
cls.multiplexer_client_1.connect()
temp_dir_client_2 = os.path.join(cls.t, 'temp_dir_client_2')
os.mkdir(temp_dir_client_2)
cls.connection_client_2 = _make_libp2p_mailbox_connection(data_dir=temp_dir_client_2, peer_public_key=cls.connection_node.node.pub, ledger_api_id=EthereumCrypto.identifier)
cls.multiplexer_client_2 = Multiplexer([cls.connection_client_2], protocols=[MockDefaultMessageProtocol])
cls.multiplexer_client_2.connect()
wait_for_condition((lambda : cls.multiplexer_client_2.is_connected), 20)
wait_for_condition((lambda : cls.multiplexer_client_1.is_connected), 20)
wait_for_condition((lambda : cls.connection_client_2.is_connected), 20)
wait_for_condition((lambda : cls.connection_client_1.is_connected), 20)
wait_for_condition((lambda : cls.connection_node.is_connected), 20)
except Exception:
cls.multiplexer_node.disconnect()
raise
def teardown_class(cls):
cls.multiplexer_client_1.disconnect()
cls.multiplexer_client_2.disconnect()
cls.multiplexer_node.disconnect()
os.chdir(cls.cwd)
print(open(cls.connection_node.node.log_file, 'r').read())
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
def _make_envelope(self, sender_address: str, receiver_address: str, message_id: int=1, target: int=0):
msg = DefaultMessage(dialogue_reference=('', ''), message_id=message_id, target=target, performative=DefaultMessage.Performative.BYTES, content=b'hello')
envelope = Envelope(to=receiver_address, sender=sender_address, protocol_specification_id=DefaultMessage.protocol_specification_id, message=DefaultSerializer().encode(msg))
return envelope |
.skipif((pytz is None), reason='As Django 4.0 has deprecated pytz, this test should eventually be able to get removed.')
class TestPytzNaiveDayLightSavingTimeTimeZoneDateTimeField(FieldValues):
valid_inputs = {}
invalid_inputs = {'2017-03-12T02:30:00': ['Invalid datetime for the timezone "America/New_York".'], '2017-11-05T01:30:00': ['Invalid datetime for the timezone "America/New_York".']}
outputs = {}
if pytz:
class MockTimezone(pytz.BaseTzInfo):
def localize(value, is_dst):
raise pytz.InvalidTimeError()
def __str__(self):
return 'America/New_York'
field = serializers.DateTimeField(default_timezone=MockTimezone()) |
class Weight(object):
def __init__(self, weight_function, support=None, pdf=False, mean=None, variance=None):
self.weight_function = weight_function
self.flag = 'function'
tmp = (lambda : 0)
if (not isinstance(self.weight_function, type(tmp))):
self.weight_function = stats.gaussian_kde(weight_function, bw_method='silverman')
self.flag = 'data'
self.pdf = pdf
if ((self.flag == 'data') and (support is None)):
support = [np.min(weight_function), np.max(weight_function)]
self.support = support
self.lower = self.support[0]
self.upper = self.support[1]
if (self.upper <= self.lower):
raise (ValueError, 'The lower bound must be less than the upper bound in the support.')
if (self.lower == (- np.inf)):
raise (ValueError, 'The lower bound cannot be negative infinity.')
if (self.upper == np.inf):
raise (ValueError, 'The upper bound cannot be infinity.')
self._verify_probability_density()
self.x_range_for_pdf = np.linspace(self.lower, self.upper, RECURRENCE_PDF_SAMPLES)
self.mean = mean
self.variance = variance
self.data = self.get_pdf()
if (self.mean is None):
self._set_mean()
if (self.variance is None):
self._set_variance()
def _evaluate_pdf(self, x):
x = np.array(x)
pdf_values = np.zeros(x.shape[0])
for i in range(0, x.shape[0]):
pdf_values[i] = self.weight_function(x[i])
return pdf_values
def get_pdf(self, points=None):
if (points is None):
return (self._evaluate_pdf(self.x_range_for_pdf) * self.integration_constant)
else:
return (self._evaluate_pdf(points) * self.integration_constant)
def _verify_probability_density(self):
(integral, _) = self._iterative_quadrature_computation(self.weight_function)
if ((np.abs((integral - 1.0)) >= 0.01) or (self.pdf is False)):
self.integration_constant = (1.0 / integral)
elif ((np.abs((integral - 1.0)) < 0.01) or (self.pdf is True)):
self.integration_constant = 1.0
def _get_quadrature_points_and_weights(self, order):
param = Parameter(distribution='uniform', lower=self.lower, upper=self.upper, order=order)
basis = Basis('univariate')
poly = Poly(method='numerical-integration', parameters=param, basis=basis)
(points, weights) = poly.get_points_and_weights()
return (points, (weights * (self.upper - self.lower)))
def _set_mean(self):
mean_integrand = (lambda x: ((x * self.weight_function(x)) * self.integration_constant))
(self.mean, self._mean_quadrature_order) = self._iterative_quadrature_computation(mean_integrand)
def _iterative_quadrature_computation(self, integrand, quadrature_order_output=True):
quadrature_error = 500.0
quadrature_order = 0
integral_before = 10.0
while (quadrature_error >= 1e-06):
quadrature_order += QUADRATURE_ORDER_INCREMENT
(pts, wts) = self._get_quadrature_points_and_weights(quadrature_order)
integral = float(np.dot(wts, evaluate_model(pts, integrand)))
quadrature_error = np.abs((integral - integral_before))
integral_before = integral
if (quadrature_order >= ORDER_LIMIT):
raise (RuntimeError, (((('Even with ' + str((ORDER_LIMIT + 1))) + ' points, an error in the mean of ') + str(0.0001)) + 'cannot be obtained.'))
if (quadrature_order_output is True):
return (integral, quadrature_order)
else:
return integral
def _set_variance(self):
variance_integrand = (lambda x: ((((x - self.mean) ** 2) * self.weight_function(x)) * self.integration_constant))
(self.variance, self._variance_quadrature_order) = self._iterative_quadrature_computation(variance_integrand) |
class OptionPlotoptionsHeatmapMarkerStates(Options):
def hover(self) -> 'OptionPlotoptionsHeatmapMarkerStatesHover':
return self._config_sub_data('hover', OptionPlotoptionsHeatmapMarkerStatesHover)
def normal(self) -> 'OptionPlotoptionsHeatmapMarkerStatesNormal':
return self._config_sub_data('normal', OptionPlotoptionsHeatmapMarkerStatesNormal)
def select(self) -> 'OptionPlotoptionsHeatmapMarkerStatesSelect':
return self._config_sub_data('select', OptionPlotoptionsHeatmapMarkerStatesSelect) |
('Build libcares - {arch}')
def build_libcares(version: str, arch: str='linux-x86_64'):
libcares = ProjectPaths('c-ares', arch)
pkgconfig.add(libcares)
if libcares.is_installed():
return
(libcares.repo.exists() or git.clone(' libcares.repo))
libcares.clean()
with chdir(libcares.repo):
git.checkout(version)
sh.aclocal()
sh.autoheader()
sh.libtoolize()
sh.automake('--add-missing')
sh.autoconf()
configure(f'--prefix={libcares.install}', '--disable-shared', '--enable-static')
make()
make.install()
libcares.set_installed() |
class FindItemLocallyTestCase(TestCase):
('aea.cli.utils.package_utils.Path.exists', return_value=True)
('aea.cli.utils.package_utils.ConfigLoader.from_configuration_type', _raise_validation_error)
def test_find_item_locally_bad_config(self, *mocks):
public_id = PublicIdMock.from_str('fetchai/echo:0.20.6')
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), 'skill', public_id)
self.assertIn('configuration file not valid', cm.exception.message)
('aea.cli.utils.package_utils.Path.exists', return_value=True)
('aea.cli.utils.package_utils.open_file', mock.mock_open())
('aea.cli.utils.package_utils.ConfigLoader.from_configuration_type', return_value=ConfigLoaderMock())
def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks):
public_id = PublicIdMock.from_str('fetchai/echo:0.20.6')
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), 'skill', public_id)
self.assertEqual(cm.exception.message, 'Cannot find skill with author and version specified.') |
class ResetPointInfo(betterproto.Message):
binary_checksum: str = betterproto.string_field(1)
run_id: str = betterproto.string_field(2)
first_workflow_task_completed_id: int = betterproto.int64_field(3)
create_time: datetime = betterproto.message_field(4)
expire_time: datetime = betterproto.message_field(5)
resettable: bool = betterproto.bool_field(6) |
class MahjongDrugHandler(THBEventHandler):
interested = ['action_after']
def handle(self, evt_type, act):
if ((evt_type == 'action_after') and isinstance(act, Heal)):
tgt = act.target
if (not tgt.has_skill(MahjongDrug)):
return act
card = getattr(act, 'associated_card', None)
if ((not card) or (not card.is_card(HealCard))):
return act
g = self.game
if g.user_input([tgt], ChooseOptionInputlet(self, (False, True))):
g.process_action(MahjongDrugAction(tgt, tgt))
return act |
class NetasciiReader():
def __init__(self, reader):
self._reader = reader
self._buffer = bytearray()
self._slurp = None
self._size = None
def read(self, size):
if (self._slurp is not None):
return self._slurp.read(size)
(data, buffer_size) = (bytearray(), 0)
if self._buffer:
buffer_size = len(self._buffer)
data.extend(self._buffer)
for char in self._reader.read((size - buffer_size)):
if (char == ord('\n')):
data.extend([ord('\r'), ord('\n')])
elif (char == ord('\r')):
data.extend([ord('\r'), 0])
else:
data.append(char)
self._buffer = bytearray(data[size:])
return data[:size]
def close(self):
self._reader.close()
def size(self):
if (self._size is not None):
return self._size
(slurp, size) = (io.BytesIO(), 0)
while True:
data = self.read(512)
if (not data):
break
size += slurp.write(data)
(self._slurp, self._size) = (slurp, size)
self._slurp.seek(0)
return size |
def test_text_store__raise_unsupported_version(tmp_path) -> None:
wallet_path = os.path.join(tmp_path, 'database')
store = TextStore(wallet_path)
try:
with pytest.raises(Exception) as e:
store._raise_unsupported_version(5)
assert ('To open this wallet' in e.value.args[0])
with pytest.raises(Exception) as e:
store._raise_unsupported_version(6)
assert ('It does not contain any keys' in e.value.args[0])
store.put('master_public_keys', 1)
with pytest.raises(Exception) as e:
store._raise_unsupported_version(6)
assert ('Please open this file' in e.value.args[0])
finally:
store.close() |
()
def setup_to_pass():
shellexec('echo -e "*\thard\tcore\t0" > /etc/security/limits.d/pytest.conf')
shellexec('echo 0 > /proc/sys/fs/suid_dumpable')
shellexec('echo -e "fs.suid_dumpable = 0" > /etc/sysctl.d/pytest.conf')
(yield None)
os.remove('/etc/security/limits.d/pytest.conf')
os.remove('/etc/sysctl.d/pytest.conf') |
.skipcomplex
def test_step_function_loop(mesh, iterations=100):
v = space(mesh)
m = VectorFunctionSpace(mesh, 'CG', 1)
if (m.shape == (1,)):
u0 = as_vector([1])
else:
u0 = as_vector([1, 0])
u = Function(m).interpolate(u0)
dt = (1.0 / iterations)
phi = TestFunction(v)
D = TrialFunction(v)
n = FacetNormal(mesh)
un = (0.5 * (dot(u, n) + abs(dot(u, n))))
a_mass = (inner(D, phi) * dx)
a_int = (inner(((- u) * D), grad(phi)) * dx)
a_flux = (inner(((un('+') * D('+')) - (un('-') * D('-'))), jump(phi)) * dS)
arhs = (a_mass - (dt * (a_int + a_flux)))
dD1 = Function(v)
D1 = Function(v)
x = SpatialCoordinate(mesh)
D0 = conditional((real(x[0]) < 0.5), 1.0, 0.0)
D = Function(v).interpolate(D0)
D1.assign(D)
t = 0.0
T = (iterations * dt)
problem = LinearVariationalProblem(a_mass, action(arhs, D1), dD1)
solver = LinearVariationalSolver(problem, solver_parameters={'ksp_type': 'cg'})
limiter = VertexBasedLimiter(v)
limiter.apply(D)
while (t < (T - (dt / 2))):
D1.assign(D)
limiter.apply(D1)
solver.solve()
D1.assign(dD1)
limiter.apply(D1)
solver.solve()
D1.assign(((0.75 * D) + (0.25 * dD1)))
limiter.apply(D1)
solver.solve()
D.assign((((1.0 / 3.0) * D) + ((2.0 / 3.0) * dD1)))
limiter.apply(D1)
t += dt
assert (np.max(u.dat.data_ro) <= 1.0), 'Failed by exceeding max values'
assert (np.min(u.dat.data_ro) >= 0.0), 'Failed by exceeding min values' |
('/charge_control')
def get_charge_control():
logger.info(request)
vin = request.args['vin']
charge_control = APP.chc.get(vin)
if (charge_control is None):
return jsonify('error: VIN not in list')
if (('hour' in request.args) and ('minute' in request.args)):
charge_control.set_stop_hour([int(request.args['hour']), int(request.args['minute'])])
if ('percentage' in request.args):
charge_control.percentage_threshold = int(request.args['percentage'])
APP.chc.save_config()
return jsonify(charge_control.get_dict()) |
class TestFINDPEAKS(unittest.TestCase):
def test_fit(self):
import numpy as np
import matplotlib.pyplot as plt
from findpeaks import findpeaks
fp = findpeaks(method='topology', whitelist=['peak'])
X = fp.import_example('2dpeaks')
results = fp.fit(X)
assert (fp.type == 'peaks2d')
assert ([*results.keys()] == ['Xraw', 'Xproc', 'Xdetect', 'Xranked', 'persistence', 'groups0'])
assert ([*fp.args] == ['limit', 'scale', 'denoise', 'togray', 'imsize', 'figsize', 'type'])
assert (results['Xraw'].shape == results['Xdetect'].shape)
assert (results['Xproc'].shape == results['Xdetect'].shape)
fp.plot(figsize=(25, 15), figure_order='horizontal')
assert (len(results['Xdetect'][(results['Xdetect'] != 0)]) > 18)
assert (len(results['Xranked'][(results['Xranked'] != 0)]) > 18)
fp = findpeaks(method='topology', limit=0, whitelist=['peak'])
X = fp.import_example('2dpeaks')
results = fp.fit(X)
fp.plot(figsize=(25, 15), figure_order='horizontal')
assert (len(results['Xdetect'][(results['Xdetect'] != 0)]) > 18)
assert (len(results['Xranked'][(results['Xranked'] != 0)]) > 18)
fp = findpeaks(method='mask', verbose=3)
X = fp.import_example('2dpeaks')
results = fp.fit(X)
assert (fp.type == 'peaks2d')
assert ([*results.keys()] == ['Xraw', 'Xproc', 'Xdetect', 'Xranked'])
assert ([*fp.args] == ['limit', 'scale', 'denoise', 'togray', 'imsize', 'figsize', 'type'])
fp.plot(figsize=(25, 15), figure_order='horizontal')
assert (np.sum(results['Xdetect']) == 20)
assert (results['Xraw'].shape == results['Xdetect'].shape)
assert (results['Xproc'].shape == results['Xdetect'].shape)
import numpy as np
from scipy.ndimage import gaussian_filter
from findpeaks import findpeaks
rng = np.random.default_rng(42)
x = rng.normal(size=(50, 50))
x = gaussian_filter(x, sigma=10.0)
fp = findpeaks(method='topology', whitelist=['peak', 'valley'], denoise=None, verbose=3)
results = fp.fit(x)
fp.plot(figsize=(25, 15), figure_order='horizontal')
fp.plot_persistence()
fp.plot_mesh()
Iloc = (results['persistence']['score'] > 1)
assert (results['persistence']['peak'][Iloc].sum() == 3)
assert (results['persistence']['valley'][Iloc].sum() == 4)
fp = findpeaks(method='topology', whitelist='peak', denoise=None, verbose=3)
fp.plot()
results = fp.fit(x)
Iloc = (results['persistence']['score'] > 1)
assert (results['persistence']['peak'][Iloc].shape[0] == results['persistence']['peak'][Iloc].sum())
fp = findpeaks(method='topology', whitelist='valley', denoise=None, verbose=3)
results = fp.fit(x)
Iloc = (results['persistence']['score'] > 1)
assert (results['persistence']['valley'].shape[0] == results['persistence']['valley'].sum())
fp = findpeaks(method='topology')
X = fp.import_example('1dpeaks')
results = fp.fit(X)
assert (fp.type == 'peaks1d')
assert ([*results.keys()] == ['persistence', 'Xdetect', 'Xranked', 'groups0', 'df'])
assert ([*fp.args] == ['method', 'params', 'lookahead', 'interpolate', 'figsize', 'type'])
assert (len(X) == len(results['Xdetect']))
assert (len(X) == len(results['Xranked']))
assert (len(X) == results['df'].shape[0])
assert np.all(np.isin(results['df'].columns, ['x', 'y', 'labx', 'rank', 'score', 'valley', 'peak']))
assert np.all(np.isin(results['persistence'].columns, ['x', 'y', 'birth_level', 'death_level', 'score']))
assert (results['persistence'].shape[0] == 7)
X = fp.import_example('1dpeaks')
fp = findpeaks(method='topology', limit=0.02)
results = fp.fit(X)
assert (len(results['Xdetect'][(results['Xdetect'] != 0)]) == len(results['Xranked'][(results['Xranked'] != 0)]))
fp = findpeaks(method='peakdetect', lookahead=1, verbose=3)
X = fp.import_example('1dpeaks')
results = fp.fit(X)
assert (fp.type == 'peaks1d')
assert ([*results.keys()] == ['df'])
assert ([*fp.args] == ['method', 'params', 'lookahead', 'interpolate', 'figsize', 'type'])
assert (len(X) == results['df'].shape[0])
assert np.all(np.isin(results['df'].columns, ['x', 'y', 'labx', 'valley', 'peak', 'rank', 'score']))
assert (results['df']['peak'].sum() == 2)
assert (results['df']['valley'].sum() == 4)
X = [10, 11, 9, 23, 21, 11, 45, 20, 11, 12]
methods = ['topology', 'peakdetect', None]
interpolates = [None, 1, 10, 1000]
lookaheads = [None, 0, 1, 10, 100]
for method in methods:
for interpolate in interpolates:
for lookahead in lookaheads:
fp = findpeaks(lookahead=lookahead, interpolate=interpolate, method=method, verbose=0)
assert fp.fit(X)
def test_denoising(self):
from findpeaks import findpeaks
fp = findpeaks()
img = fp.import_example('2dpeaks_image')
import findpeaks
winsize = 15
k_value1 = 2.0
k_value2 = 1.0
cu_value = 0.25
cu_lee_enhanced = 0.523
cmax_value = 1.73
img = findpeaks.stats.resize(img, size=(300, 300))
img = findpeaks.stats.togray(img)
img = findpeaks.stats.scale(img)
img_fastnl = findpeaks.stats.denoise(img.copy(), method='fastnl', window=winsize)
img_bilateral = findpeaks.stats.denoise(img.copy(), method='bilateral', window=winsize)
image_frost = findpeaks.stats.frost_filter(img.copy(), damping_factor=k_value1, win_size=winsize)
image_kuan = findpeaks.stats.kuan_filter(img.copy(), win_size=winsize, cu=cu_value)
image_lee = findpeaks.stats.lee_filter(img.copy(), win_size=winsize, cu=cu_value)
image_lee_enhanced = findpeaks.stats.lee_enhanced_filter(img.copy(), win_size=winsize, k=k_value2, cu=cu_lee_enhanced, cmax=cmax_value)
image_lee_sigma = findpeaks.stats.lee_sigma_filter(img.copy())
image_mean = findpeaks.stats.mean_filter(img.copy(), win_size=winsize)
image_median = findpeaks.stats.median_filter(img.copy(), win_size=winsize)
from findpeaks import findpeaks
methods = ['caerus', 'mask', 'topology', None]
filters = ['lee', 'lee_enhanced', 'lee_sigma', 'kuan', 'fastnl', 'bilateral', 'frost', 'median', 'mean', None]
windows = [None, 3, 63]
cus = [None, 0, 0.75]
img = fp.import_example('2dpeaks')
for getfilter in filters:
for window in windows:
for cu in cus:
fp = findpeaks(method='topology', scale=True, denoise=getfilter, params={'window': window, 'cu': cu}, togray=True, imsize=None, verbose=3)
assert fp.fit(img) |
class BasePayload(BaseHandler):
def __init__(self, s3_config: Optional[_T]=None):
super(BasePayload, self).__init__(s3_config=s3_config)
def _update_payload(base_payload, input_classes, ignore_classes, payload):
def payload(self, input_classes, ignore_classes, path, cmd_args, deps):
payload = self._payload(input_classes, ignore_classes, path, deps, root=True)
payload = self._handle_overrides(payload, ignore_classes, cmd_args)
return payload
def _payload(self, input_classes, ignore_classes, path, deps, root=False):
payload = {}
if (path is not None):
config_extension = Path(path).suffix.lower()
self._check_extension(file_extension=config_extension)
base_payload = self._supported_extensions.get(config_extension)().load(path, s3_config=self._s3_config)
base_payload = ({} if (base_payload is None) else base_payload)
deps = self._handle_dependencies(deps, path, root)
if ('config' in base_payload):
payload = self._handle_includes(base_payload, config_extension, input_classes, ignore_classes, path, payload, deps)
payload = self._update_payload(base_payload, input_classes, ignore_classes, payload)
return payload
def _handle_dependencies(deps, path, root):
if (root and (path in deps.get('paths'))):
raise ValueError(f'Duplicate Read -- Config file {path} has already been encountered. Please remove duplicate reads of config files.')
elif ((path in deps.get('paths')) or (path in deps.get('rel_paths'))):
raise ValueError(f'Cyclical Dependency -- Config file {path} has already been encountered. Please remove cyclical dependencies between config files.')
else:
deps.get('paths').append(path)
deps.get('rel_paths').append(os.path.basename(path))
if root:
deps.get('roots').append(path)
return deps
def _handle_includes(self, base_payload, config_extension, input_classes, ignore_classes, path: Path, payload, deps):
included_params = {}
for inc_path in base_payload['config']:
if check_path_s3(inc_path):
use_path = inc_path
elif os.path.exists(inc_path):
use_path = inc_path
elif os.path.join(os.path.dirname(path), inc_path):
use_path = (path.parent / inc_path)
else:
raise RuntimeError(f'Could not find included {config_extension} file {inc_path} or is not an S3 URI!')
included_params.update(self._payload(input_classes, ignore_classes, use_path, deps))
payload.update(included_params)
return payload
def _handle_overrides(self, payload, ignore_classes, args):
skip_keys = ['config', 'help']
pruned_args = self._prune_args(args, ignore_classes)
for (k, v) in pruned_args.items():
if ((k not in skip_keys) and (v is not None)):
payload = self._handle_payload_override(payload, k, v)
return payload
def _prune_args(args, ignore_classes):
ignored_stems = [val.__name__ for val in ignore_classes]
return {k: v for (k, v) in vars(args).items() if (k.split('.')[0] not in ignored_stems)}
def _handle_payload_override(payload, key, value): |
class AddrFilenamePairAction(argparse.Action):
def __init__(self, option_strings, dest, nargs='+', **kwargs):
super(AddrFilenamePairAction, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
pairs = []
for i in range(0, len(values), 2):
try:
address = int(values[i], 0)
except ValueError:
raise argparse.ArgumentError(self, ('Address "%s" must be a number' % values[i]))
try:
argfile = open(values[(i + 1)], 'rb')
except IOError as e:
raise argparse.ArgumentError(self, e)
except IndexError:
raise argparse.ArgumentError(self, 'Must be pairs of an address and the binary filename to write there')
argfile = intel_hex_to_bin(argfile, address)
pairs.append((address, argfile))
end = 0
for (address, argfile) in sorted(pairs, key=(lambda x: x[0])):
argfile.seek(0, 2)
size = argfile.tell()
argfile.seek(0)
sector_start = (address & (~ (ESPLoader.FLASH_SECTOR_SIZE - 1)))
sector_end = (((((address + size) + ESPLoader.FLASH_SECTOR_SIZE) - 1) & (~ (ESPLoader.FLASH_SECTOR_SIZE - 1))) - 1)
if (sector_start < end):
message = ('Detected overlap at address: 0x%x for file: %s' % (address, argfile.name))
raise argparse.ArgumentError(self, message)
end = sector_end
setattr(namespace, self.dest, pairs) |
def test_seqcap_align_mafft_untrim(o_dir, e_dir, request):
program = 'bin/align/phyluce_align_seqcap_align'
output = os.path.join(o_dir, 'mafft')
cmd = [os.path.join(request.config.rootdir, program), '--input', os.path.join(e_dir, 'taxon-set.incomplete.fasta'), '--output', output, '--taxa', '4', '--aligner', 'mafft', '--output-format', 'nexus', '--no-trim', '--cores', '1']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
assert (proc.returncode == 0), print('{}'.format(stderr.decode('utf-8')))
output_files = glob.glob(os.path.join(output, '*'))
assert output_files, 'There are no output files'
for output_file in output_files:
name = os.path.basename(output_file)
expected_file = os.path.join(e_dir, 'mafft-no-trim', name)
observed = open(output_file).read()
expected = open(expected_file).read()
assert (observed == expected) |
def test_that_sort_orders_by_line_number_after_filename():
assert (sorted([ErrorInfo('', filename='a', line=1), ErrorInfo('', filename='b', line=1), ErrorInfo('', filename='a', line=2), ErrorInfo('', filename='b', line=2)]) == [ErrorInfo('', filename='a', line=1), ErrorInfo('', filename='a', line=2), ErrorInfo('', filename='b', line=1), ErrorInfo('', filename='b', line=2)]) |
def get_file_locations():
data_path = (os.getcwd() + '/graph_data/')
envs = [env for env in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, env))]
paths = {}
for env in envs:
env_data = {}
for (directory, _, filenames) in os.walk((data_path + env)):
if ('seeds.txt' in filenames):
env_data['seeds'] = ((directory + '/') + filenames[0])
elif ('stable_baselines' in directory):
env_data['stable_baselines'] = [((directory + '/') + filename) for filename in filenames]
elif ('ppo_for_beginners' in directory):
env_data['ppo_for_beginners'] = [((directory + '/') + filename) for filename in filenames]
paths[env] = env_data
return paths |
class DictionaryItemResponse(ModelComposed):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'item_key': (str,), 'item_value': (str,), 'created_at': (datetime, none_type), 'deleted_at': (datetime, none_type), 'updated_at': (datetime, none_type), 'dictionary_id': (str,), 'service_id': (str,)}
_property
def discriminator():
return None
attribute_map = {'item_key': 'item_key', 'item_value': 'item_value', 'created_at': 'created_at', 'deleted_at': 'deleted_at', 'updated_at': 'updated_at', 'dictionary_id': 'dictionary_id', 'service_id': 'service_id'}
read_only_vars = {'created_at', 'deleted_at', 'updated_at', 'dictionary_id', 'service_id'}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
_property
def _composed_schemas():
lazy_import()
return {'anyOf': [], 'allOf': [DictionaryItem, DictionaryItemResponseAllOf, Timestamps], 'oneOf': []} |
class Keyboard_locks(IntervalModule):
interval = 1
settings = (('format', 'Format string'), ('caps_on', 'String to show in {caps} when CAPS LOCK is on'), ('caps_off', 'String to show in {caps} when CAPS LOCK is off'), ('num_on', 'String to show in {num} when NUM LOCK is on'), ('num_off', 'String to show in {num} when NUM LOCK is off'), ('scroll_on', 'String to show in {scroll} when SCROLL LOCK is on'), ('scroll_off', 'String to show in {scroll} when SCROLL LOCK is off'), 'color')
format = '{caps} {num} {scroll}'
caps_on = 'CAP'
caps_off = '___'
num_on = 'NUM'
num_off = '___'
scroll_on = 'SCR'
scroll_off = '___'
color = '#FFFFFF'
data = {}
def get_status(self):
xset = str(subprocess.check_output(['xset', 'q']))
cap = xset.split('Caps Lock:')[1][0:8]
num = xset.split('Num Lock:')[1][0:8]
scr = xset.split('Scroll Lock:')[1][0:8]
return (('on' in cap), ('on' in num), ('on' in scr))
def run(self):
(cap, num, scr) = self.get_status()
self.data['caps'] = (self.caps_on if cap else self.caps_off)
self.data['num'] = (self.num_on if num else self.num_off)
self.data['scroll'] = (self.scroll_on if scr else self.scroll_off)
output_format = self.format
self.output = {'full_text': output_format.format(**self.data), 'color': self.color} |
def _check_type(arg_name: str, arg_values: Any, expected_type: Union[(Type[Any], Tuple[(Type[Any], ...)])], element_type: Optional[Union[(Type[Any], Tuple[(Type[Any], ...)])]]=None) -> None:
if isinstance(expected_type, tuple):
class_names = [cls.__name__ for cls in expected_type]
expected_type_string = ', '.join(class_names)
else:
expected_type_string = expected_type.__name__
element_type_string = None
if (element_type is not None):
if isinstance(element_type, tuple):
class_names = [cls.__name__ for cls in element_type]
element_type_string = ', '.join(class_names)
else:
element_type_string = element_type.__name__
validation_failed = False
if (not isinstance(arg_values, expected_type)):
validation_failed = True
if (element_type is not None):
if (isinstance(arg_values, pd.DataFrame) and (not all((arg_values.dtypes == element_type)))):
validation_failed = True
if isinstance(arg_values, np.ndarray):
if ((arg_values.ndim == 2) and (not (arg_values.dtype == element_type))):
validation_failed = True
elif ((arg_values.ndim == 1) and (not all((isinstance(val, element_type) for val in arg_values)))):
validation_failed = True
elif (isinstance(arg_values, List) and (not all((isinstance(val, element_type) for val in arg_values)))):
validation_failed = True
if validation_failed:
error_msg = f'Error: {arg_name} is expected to be {expected_type_string}'
if element_type_string:
error_msg += f" with dtype '{element_type_string}'"
raise TypeError(error_msg) |
class OptionPlotoptionsColumnpyramidSonificationContexttracksActivewhen(Options):
def crossingDown(self):
return self._config_get(None)
def crossingDown(self, num: float):
self._config(num, js_type=False)
def crossingUp(self):
return self._config_get(None)
def crossingUp(self, num: float):
self._config(num, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get(None)
def prop(self, text: str):
self._config(text, js_type=False) |
def test_custom_token(sample_tenant, api_key):
client = tenant_mgt.auth_for_tenant(sample_tenant.tenant_id)
custom_token = client.create_custom_token('user1')
id_token = _sign_in(custom_token, sample_tenant.tenant_id, api_key)
claims = client.verify_id_token(id_token)
assert (claims['uid'] == 'user1')
assert (claims['firebase']['tenant'] == sample_tenant.tenant_id) |
def _get_name_and_record_counts_from_union(schema: List[Schema]) -> Tuple[(int, int)]:
record_type_count = 0
named_type_count = 0
for s in schema:
extracted_type = extract_record_type(s)
if (extracted_type == 'record'):
record_type_count += 1
named_type_count += 1
elif ((extracted_type == 'enum') or (extracted_type == 'fixed')):
named_type_count += 1
elif (extracted_type not in AVRO_TYPES):
named_type_count += 1
record_type_count += 1
return (named_type_count, record_type_count) |
def invalidate_aws_cloudfront_data(opts, owner, project, chroot):
distro_id = opts.aws_cloudfront_distribution
if (not distro_id):
return
base = urlparse(opts.results_baseurl).path
path_pattern = ('/'.join([base, owner, project, chroot]) + '/*')
log.info('Invalidating CDN cache %s', path_pattern)
command = ['aws', 'cloudfront', 'create-invalidation', '--distribution-id', distro_id, '--paths', path_pattern]
run_cmd(command, logger=log) |
def _setup_simple_unet_3d() -> Tuple[(SimpleUnet3D, torch.Tensor, torch.Tensor)]:
simple_unet_3d = SimpleUnet3D().to(device)
test_batch_size = 1
resol = (32, 32, 32)
dummy_x = torch.randn(test_batch_size, simple_unet_3d.in_channels, *resol, dtype=torch.float32, device=device)
dummy_timesteps = torch.randint(0, 1000, (test_batch_size,), dtype=torch.long, device=device)
return (simple_unet_3d, dummy_x, dummy_timesteps) |
()
('--strategy_interval', default=20, help='Update the current strategy whenever the iteration % strategy_interval == 0.')
('--n_iterations', default=1500, help='The total number of iterations we should train the model for.')
('--lcfr_threshold', default=400, help="A threshold for linear CFR which means don't apply discounting before this iteration.")
('--discount_interval', default=400, help='Discount the current regret and strategy whenever iteration % discount_interval == 0.')
('--prune_threshold', default=400, help='When a uniform random number is less than 95%, and the iteration > prune_threshold, use CFR with pruning.')
('--c', default=(- 20000), help="Pruning threshold for regret, which means when we are using CFR with pruning and have a state with a regret of less than `c`, then we'll elect to not recusrively visit it and it's child nodes.")
('--n_players', default=3, help='The number of players in the game.')
('--dump_iteration', default=20, help='When the iteration % dump_iteration == 0, we will compute a new strategy and write that to the accumlated strategy, which gets normalised at a later time.')
('--update_threshold', default=400, help='When the iteration is greater than update_threshold we can start updating the strategy.')
('--lut_path', default='.', help='The path to the files for clustering the infosets.')
('--pickle_dir', default=False, help='Whether or not the lut files are pickle files. This lookup method is deprecated.')
('--single_process/--multi_process', default=False, help="Either use or don't use multiple processes.")
('--sync_update_strategy/--async_update_strategy', default=False, help="Do or don't synchronise update_strategy.")
('--sync_cfr/--async_cfr', default=False, help="Do or don't synchronuse CFR.")
('--sync_discount/--async_discount', default=False, help="Do or don't synchronise the discounting.")
('--sync_serialise/--async_serialise', default=False, help="Do or don't synchronise the serialisation.")
('--nickname', default='', help='The nickname of the study.')
def start(strategy_interval: int, n_iterations: int, lcfr_threshold: int, discount_interval: int, prune_threshold: int, c: int, n_players: int, dump_iteration: int, update_threshold: int, lut_path: str, pickle_dir: bool, single_process: bool, sync_update_strategy: bool, sync_cfr: bool, sync_discount: bool, sync_serialise: bool, nickname: str):
config: Dict[(str, int)] = {**locals()}
save_path: Path = utils.io.create_dir(nickname)
with open((save_path / 'config.yaml'), 'w') as steam:
yaml.dump(config, steam)
if single_process:
log.info('Only one process specified so using poker_ai.ai.singleprocess.simple_search for the optimisation.')
simple_search(config=config, save_path=save_path, lut_path=lut_path, pickle_dir=pickle_dir, strategy_interval=strategy_interval, n_iterations=n_iterations, lcfr_threshold=lcfr_threshold, discount_interval=discount_interval, prune_threshold=prune_threshold, c=c, n_players=n_players, dump_iteration=dump_iteration, update_threshold=update_threshold)
else:
log.info('Mulitple processes specifed so using poker_ai.ai.multiprocess.server.Server for the optimisation.')
server = Server(strategy_interval=strategy_interval, n_iterations=n_iterations, lcfr_threshold=lcfr_threshold, discount_interval=discount_interval, prune_threshold=prune_threshold, c=c, n_players=n_players, dump_iteration=dump_iteration, update_threshold=update_threshold, save_path=save_path, lut_path=lut_path, pickle_dir=pickle_dir, sync_update_strategy=sync_update_strategy, sync_cfr=sync_cfr, sync_discount=sync_discount, sync_serialise=sync_serialise)
_safe_search(server) |
('/experiments/{experiment_id}/observations', response_model=List[js.ObservationOut])
def get_observations(*, res: LibresFacade=DEFAULT_LIBRESFACADE, experiment_id: UUID) -> List[js.ObservationOut]:
return [js.ObservationOut(id=UUID(int=0), userData=[], errors=obs['errors'], values=obs['values'], x_axis=obs['x_axis'], name=obs['name']) for obs in create_observations(res)] |
_command
def add_data(dm: DataManager, key: str, video: str=None, masks: str=None, mask_name: str=None, v2i_step: int=1, v2i_limit: int=200, v2i_skip: str=None, run_extra: bool=True):
(key, ds_cat, ds_name) = validate_key(key)
if (key not in dm.datasets):
dm.datasets[key] = Dataset(ds_cat, ds_name)
avail = dm.datasets[key].local
if (not avail.images):
assert (video is not None), 'Images not found, provide with --video, or manually copy from elsewhere'
copy_images(dm, key, 'rgb_1x', video, v2i_step, v2i_limit, v2i_skip, False)
elif (video is not None):
logger.warning('Image folder exists, provided video is ignored')
if (masks is None):
masks = []
else:
masks = masks.split(',')
if (len(masks) > 0):
mask_name = (mask_name or 'mask')
if (mask_name not in avail.masks):
for (i, src) in enumerate(masks):
copy_images(dm, key, f'masks/{mask_name}/{i:02d}', src, v2i_step, v2i_limit, v2i_skip, True)
else:
logger.warning(f'Mask folder ({mask_name}) exists, provided masks are ignored')
dm.load_local_dataset(dm.get_local_data_path(key))
if run_extra:
run_flow(dm, key)
run_segmentation(dm, key)
run_homogrpahy(dm, key) |
def convert(config_path, input_path, output_path):
config = Config.fromfile(config_path)
model = DiffSingerLightning(config)
logger.info('Loading Diff-SVC checkpoint...')
diff_svc_state_dict = torch.load(input_path, map_location='cpu')['state_dict']
residual_channels = diff_svc_state_dict['model.denoise_fn.input_projection.weight'].shape[0]
if (residual_channels != config.model.diffusion.denoiser.residual_channels):
logger.error(f'Residual channels mismatch: {residual_channels} vs {config.model.diffusion.denoiser.residual_channels}. Please update the `model.diffusion.denoiser.residual_channels` to {residual_channels} in the config file.')
return
logger.info(f'Residual channels: {residual_channels}')
spec_min = diff_svc_state_dict['model.spec_min'].shape[(- 1)]
spec_max = diff_svc_state_dict['model.spec_max'].shape[(- 1)]
config_spec_min = model.model.diffusion.spec_min.shape[(- 1)]
if (not (spec_min == spec_max == config_spec_min)):
logger.error(f'Spec min and max shape mismatch: {spec_min} vs {spec_max} vs {config_spec_min}. Please update the `model.diffusion.spec_min` and `model.diffusion.spec_max` to [0] * {spec_min} in the config file.')
return
logger.info(f'Spec min and max shape: {spec_min}')
fish_denoiser_keys = list(model.model.diffusion.state_dict().keys())
diffusion_state_dict = {}
for i in fish_denoiser_keys:
fixed = ('model.' + i.replace('.conv.', '.').replace('.linear.', '.').replace('.conv_layer.', '.dilated_conv.'))
if ('_noise_predictor' in fixed):
continue
diffusion_state_dict[i] = diff_svc_state_dict.pop(fixed)
for i in list(diff_svc_state_dict.keys()):
x = i.split('.')
if ((x[0] == 'model') and (len(x) == 2)):
diff_svc_state_dict.pop(i)
if any(((not k.startswith('model.fs2')) for k in diff_svc_state_dict.keys())):
logger.error(f'Keys not mapped: {diff_svc_state_dict.keys()}')
return
result = model.model.diffusion.load_state_dict(diffusion_state_dict, strict=False)
assert (all((('_noise_predictor' in k) for k in result.missing_keys)) and (not result.unexpected_keys))
logger.info('Diffusion and Denoiser are converted.')
pitch_encoder_state_dict = {'embedding.weight': diff_svc_state_dict.pop('model.fs2.pitch_embed.weight')}
model.model.pitch_encoder.load_state_dict(pitch_encoder_state_dict, strict=True)
logger.info('Pitch Encoder is converted.')
if ('model.fs2.spk_embed_proj.weight' in diff_svc_state_dict):
speaker_encoder_state_dict = {'embedding.weight': diff_svc_state_dict.pop('model.fs2.spk_embed_proj.weight')}
num_speakers = model.model.speaker_encoder.embedding.weight.shape[0]
diff_svc_num_speakers = speaker_encoder_state_dict['embedding.weight'].shape[0]
if (diff_svc_num_speakers != num_speakers):
logger.error(f'Speaker number mismatch: {diff_svc_num_speakers} vs {num_speakers}. Please update the speaker_encoder.input_size to {diff_svc_num_speakers} in the config file.')
return
model.model.speaker_encoder.load_state_dict(speaker_encoder_state_dict, strict=True)
logger.info('Speaker Encoder is converted.')
else:
logger.info('Speaker Encoder not found in the checkpoint, set to zero.')
model.model.speaker_encoder.embedding.weight.data.zero_()
torch.save(model.state_dict(), output_path)
logger.info('All components are converted.')
logger.info(f'Saved to {output_path}') |
class TestMarkdown():
def test_md(self, tmpdir):
md_path = str(tmpdir.join('regs.md'))
print('md_path:', md_path)
rmap = utils.create_template()
generators.Markdown(rmap, md_path).generate()
with open(md_path, 'r') as f:
raw_str = ''.join(f.readlines())
assert ('## Register map' in raw_str)
assert ('Back to [Register map](#register-map-summary).' in raw_str) |
_2_unicode_compatible
class MediaType(object):
def __init__(self, media_type_str):
if (media_type_str is None):
media_type_str = ''
self.orig = media_type_str
(self.full_type, self.params) = parse_header(media_type_str.encode(HTTP_HEADER_ENCODING))
(self.main_type, sep, self.sub_type) = self.full_type.partition('/')
def match(self, other):
for key in self.params.keys():
if ((key != 'q') and (other.params.get(key, None) != self.params.get(key, None))):
return False
if ((self.sub_type != '*') and (other.sub_type != '*') and (other.sub_type != self.sub_type)):
return False
if ((self.main_type != '*') and (other.main_type != '*') and (other.main_type != self.main_type)):
return False
return True
def precedence(self):
if (self.main_type == '*'):
return 0
elif (self.sub_type == '*'):
return 1
elif ((not self.params) or (list(self.params.keys()) == ['q'])):
return 2
return 3
def __str__(self):
ret = ('%s/%s' % (self.main_type, self.sub_type))
for (key, val) in self.params.items():
ret += ('; %s=%s' % (key, val))
return ret |
def infer_fids_by_tr_outputs(output_filename=None):
if (output_filename is None):
output_filename = 'fid.xlsx'
infos = KiwoomOpenApiPlusTrInfo.infos_from_data_dir()
fields = []
for info in infos:
for field in info.single_outputs:
fields.append(field)
for field in info.multi_outputs:
fields.append(field)
pairs = [[field.fid, field.name] for field in fields if (field.fid != (- 1))]
import string
for pair in pairs:
if pair[1].startswith('_'):
pair[1] = pair[1][2:]
if ((not (pair[1][0] in string.ascii_letters)) and (pair[1][(- 1)] in 'ns')):
pair[1] = pair[1][:(- 1)]
pair[1] = pair[1].upper()
pairs = [tuple(pair) for pair in pairs]
pairs = list(set(pairs))
pairs = sorted(pairs, key=(lambda item: item[0]))
import pandas as pd
df = pd.DataFrame.from_records(pairs)
df.to_excel(output_filename, header=False, index=False) |
def run_tests(package, mask, verbosity, search_leaks):
(skipped, testcases) = get_tests(package, mask, verbosity)
runner = TestRunner(verbosity=verbosity)
suites = [unittest.makeSuite(o) for o in testcases]
suite = unittest.TestSuite(suites)
result = runner.run(suite, skipped)
if search_leaks:
runner = BasicTestRunner()
for t in testcases:
test_with_refcounts(runner, verbosity, t)
return (bool(result.errors) or bool(result.failures)) |
def _get_results_for_tar_file(file_info: tarfile.TarInfo) -> FileMetadata:
file_path = file_info.name
if (file_path[:2] == './'):
file_path = file_path[2:]
file_mode = _get_tar_file_mode_str(file_info)
return FileMetadata(mode=file_mode, name=Path(file_path).name, path=file_path, user=file_info.uname, group=file_info.gname, uid=file_info.uid, gid=file_info.gid, modification_time=file_info.mtime, suid_bit=_file_mode_contains_bit(file_mode, SUID_BIT), sgid_bit=_file_mode_contains_bit(file_mode, SGID_BIT), sticky_bit=_file_mode_contains_bit(file_mode, STICKY_BIT), key=b64encode(file_path.encode()).decode()) |
class OptionPlotoptionsVennSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class Block():
def __init__(self, parent, txstate):
self.parent = parent
self.score = (1 if (parent is None) else (parent.score + 1))
if ((parent is None) or (parent.txstate is None)):
self.txstate = txstate
else:
self.txstate = parent.txstate |
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "tagged"\n'
CONFIG = '\n interfaces:\n %(port_1)d:\n tagged_vlans: [100]\n %(port_2)d:\n tagged_vlans: [100]\n %(port_3)d:\n # port 3 will mirror port 1\n mirror: %(port_1)d\n %(port_4)d:\n tagged_vlans: [100]\n'
def test_tagged(self):
(first_host, second_host, mirror_host) = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(((first_host, self.port_map['port_1']), (second_host, self.port_map['port_2'])), MIN_MBPS, first_host_ip, second_host_ip, sync_counters_func=(lambda : self.one_ipv4_ping(first_host, second_host_ip)))
tagged_ports = (self.port_map['port_1'], self.port_map['port_2'], self.port_map['port_4'])
for port in tagged_ports:
self.wait_until_matching_flow({'vlan_vid': 100, 'in_port': port}, table_id=self._VLAN_TABLE, actions=[('GOTO_TABLE:%u' % self._ETH_SRC_TABLE)])
self.change_port_config(self.port_map['port_3'], 'mirror', None, restart=True, cold_start=False)
for port in tagged_ports:
self.wait_until_matching_flow({'vlan_vid': 100, 'in_port': port}, table_id=self._VLAN_TABLE, actions=[('GOTO_TABLE:%u' % self._ETH_SRC_TABLE)]) |
class OptionSeriesStreamgraphSonificationDefaultinstrumentoptionsMappingFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ScheduleAByStateRecipientTotals(BaseModel):
__tablename__ = 'ofec_sched_a_aggregate_state_recipient_totals_mv'
total = db.Column(db.Numeric(30, 2), index=True, doc='The calculated total.')
count = db.Column(db.Integer, index=True, doc='Number of records making up the total.')
cycle = db.Column(db.Integer, index=True, doc=docs.CYCLE)
state = db.Column(db.String, index=True, doc=docs.STATE_GENERIC)
state_full = db.Column(db.String, index=True, doc=docs.STATE_GENERIC)
committee_type = db.Column(db.String, index=True, doc=docs.COMMITTEE_TYPE)
committee_type_full = db.Column(db.String, index=True, doc=docs.COMMITTEE_TYPE) |
class Piece():
def __init__(self, x_pos, y_pos, color):
diameter = 0.7
self.x = x_pos
self.y = y_pos
self.radius = (diameter / 2)
self.grabbed = False
self.targeted = False
self.color = color
self.start_x = self.x
self.start_y = self.y
text_scale = 0.85
self.letter = 'X'
self.font = pygame.font.SysFont('segoeuisymbol', int((((diameter / 8) * 640) * text_scale)))
self.text = self.font.render(self.letter, True, (255, 255, 255))
self.direction = False
self.targeted = False
self.turn = 0
def set_letter(self, letter):
self.letter = letter
if (not self.grabbed):
self.text = self.font.render(self.letter, True, ((255 - self.color[0]), (255 - self.color[1]), (255 - self.color[2])))
else:
self.text = self.font.render(self.letter, True, (0, 255, 0))
def can_promote(self):
return False
def draw_paths(self, pieces):
pass
def target(self):
self.targeted = True
self.text = self.font.render(self.letter, True, (255, 0, 0))
def untarget(self):
self.targeted = False
self.set_letter(self.letter)
def draw(self):
x = int(((self.x / 8) * width))
y = (height - int(((self.y / 8) * height)))
draw_circle(screen, x, y, int(((self.radius / 8) * width)), self.color)
screen.blit(self.text, ((x - (self.text.get_width() // 2)), ((y - 2) - (self.text.get_height() // 2))))
def try_grab(self, pos):
if (dist(pos, (self.x, self.y)) < self.radius):
self.grabbed = True
self.text = self.font.render(self.letter, True, (0, 255, 0))
def cancel(self, pieces):
if self.grabbed:
self.grabbed = False
for piece in pieces:
if piece.targeted:
piece.untarget()
self.direction = False
self.text = self.font.render(self.letter, True, ((255 - self.color[0]), (255 - self.color[1]), (255 - self.color[2])))
self.x = self.start_x
self.y = self.start_y
def confirm(self, pieces):
global whites_turn
if self.grabbed:
self.grabbed = False
for piece in pieces:
if piece.targeted:
piece.x = 100
piece.start_x = 100
self.direction = False
self.text = self.font.render(self.letter, True, ((255 - self.color[0]), (255 - self.color[1]), (255 - self.color[2])))
self.start_x = self.x
self.start_y = self.y
self.turn += 1
whites_turn = (not whites_turn)
def ungrab(self, pieces):
if self.grabbed:
if ((abs((self.x - self.start_x)) < (1 / 1000)) and (abs((self.y - self.start_y)) < (1 / 1000))):
self.cancel(pieces)
return
font = pygame.font.SysFont('oldenglishtext', int(80))
confirm_text = font.render('Confirm?', True, (0, 0, 0))
screen.blit(confirm_text, (((width // 2) - (confirm_text.get_width() // 2)), ((height // 2) - (confirm_text.get_height() // 2))))
pygame.display.flip()
while (not done):
for event in pygame.event.get():
if (event.type == pygame.QUIT):
pygame.quit()
quit()
elif (event.type == pygame.KEYDOWN):
if (event.key == pygame.K_RETURN):
self.confirm(pieces)
return
elif (event.key == pygame.K_ESCAPE):
self.cancel(pieces)
return
def overlaps(self, piece):
return (dist((self.x, self.y), (piece.x, piece.y)) < (self.radius * 2))
def slide(self, dx, dy, pieces, capture=True, fake=False):
all_pieces = pieces
if capture:
pieces = [p for p in pieces if (((((p.x - self.start_x) * dx) + ((p.y - self.start_y) * dy)) > 0) and (p != self) and (p.color == self.color))]
if fake:
pieces = [p for p in pieces if (((((p.x - self.start_x) * dx) + ((p.y - self.start_y) * dy)) > 0) and (p != self) and (p.color == self.color) and (p.targeted == False))]
else:
pieces = [p for p in pieces if (((((p.x - self.start_x) * dx) + ((p.y - self.start_y) * dy)) > 0) and (p != self))]
angle = math.atan2(dy, dx)
if ((0 <= self.start_x <= 8) and (0 <= self.start_y <= 8)):
if (abs(dx) > 0):
if (((self.start_x + dx) + self.radius) > 8):
ratio = (dy / dx)
dx = ((8 - self.start_x) - self.radius)
dy = (ratio * ((8 - self.start_x) - self.radius))
if (((self.start_x + dx) - self.radius) < 0):
ratio = (dy / dx)
dx = ((- self.start_x) + self.radius)
dy = (ratio * ((- self.start_x) + self.radius))
if (abs(dy) > 0):
if (((self.start_y + dy) + self.radius) > 8):
ratio = (dx / dy)
dy = ((8 - self.start_y) - self.radius)
dx = (ratio * ((8 - self.start_y) - self.radius))
if (((self.start_y + dy) - self.radius) < 0):
ratio = (dx / dy)
dy = ((- self.start_y) + self.radius)
dx = (ratio * ((- self.start_y) + self.radius))
first_block = False
block_dist =
block_perp_dist =
full_dist = math.sqrt(((dx ** 2) + (dy ** 2)))
new_dist = full_dist
for piece in pieces:
h = abs(((math.cos(angle) * (self.y - piece.y)) - (math.sin(angle) * (self.x - piece.x))))
if (h < (piece.radius * 2)):
proj_dist = math.sqrt(((dist((self.start_x, self.start_y), (piece.x, piece.y)) ** 2) - (h ** 2)))
if (proj_dist < block_dist):
block_dist = proj_dist
block_perp_dist = h
first_block = piece
hit_first_block = False
if first_block:
distance = dist((first_block.x, first_block.y), ((self.start_x + dx), (self.start_y + dy)))
if (math.sqrt(((dx ** 2) + (dy ** 2))) > block_dist):
hit_first_block = True
new_dist = (block_dist - math.sqrt(((4 * (self.radius ** 2)) - (block_perp_dist ** 2))))
if (abs(full_dist) > 0):
self.x = (self.start_x + ((dx * new_dist) / full_dist))
self.y = (self.start_y + ((dy * new_dist) / full_dist))
new_new_dist = new_dist
first_hit_piece = False
for piece in pieces:
if self.overlaps(piece):
block_perp_dist = abs(((math.cos(angle) * (self.y - piece.y)) - (math.sin(angle) * (self.x - piece.x))))
block_dist = math.sqrt(((dist((self.start_x, self.start_y), (piece.x, piece.y)) ** 2) - (block_perp_dist ** 2)))
new_new_dist = (block_dist - math.sqrt(((4 * (self.radius ** 2)) - (block_perp_dist ** 2))))
if (new_new_dist < new_dist):
new_dist = new_new_dist
first_hit_piece = piece
if (abs(full_dist) > 0):
self.x = (self.start_x + ((dx * new_dist) / full_dist))
self.y = (self.start_y + ((dy * new_dist) / full_dist))
else:
self.x = self.start_x
self.y = self.start_y
if capture:
self.slide_attack((self.x - self.start_x), (self.y - self.start_y), all_pieces, fake=fake)
def slide_attack(self, dx, dy, pieces, fake=False):
angle = math.atan2(dy, dx)
all_pieces = pieces
pieces = [p for p in pieces if (((((p.x - self.start_x) * dx) + ((p.y - self.start_y) * dy)) > 0) and (p != self) and (p.color != self.color))]
first_piece_hit = False
first_hit_dist =
perp_dist =
full_dist = math.sqrt(((dx ** 2) + (dy ** 2)))
new_dist = full_dist
for piece in pieces:
h = abs(((math.cos(angle) * (self.y - piece.y)) - (math.sin(angle) * (self.x - piece.x))))
if (h < (piece.radius * 2)):
d = dist((piece.x, piece.y), (self.start_x, self.start_y))
hit_dist = (math.sqrt(((d ** 2) - (h ** 2))) - math.sqrt(((4 * (piece.radius ** 2)) - (h ** 2))))
if (hit_dist < first_hit_dist):
first_hit_dist = hit_dist
perp_dist = h
first_piece_hit = piece
if (not fake):
for piece in all_pieces:
piece.untarget()
if first_piece_hit:
if self.overlaps(first_piece_hit):
if (not fake):
first_piece_hit.target()
elif (dist((self.x, self.y), (self.start_x, self.start_y)) > (first_hit_dist + (2 * math.sqrt(((4 * (piece.radius ** 2)) - (perp_dist ** 2)))))):
new_dist = (first_hit_dist + (2 * math.sqrt(((4 * (piece.radius ** 2)) - (perp_dist ** 2)))))
if (not fake):
first_piece_hit.target()
if (abs(full_dist) > 0):
self.x = (self.start_x + ((dx * new_dist) / full_dist))
self.y = (self.start_y + ((dy * new_dist) / full_dist))
if (not fake):
for piece in pieces:
if self.overlaps(piece):
piece.target()
def select_path(self, start, paths, point):
min_h = 9999999
min_path = None
for path in paths:
h = (abs(((path[0] * (start[1] - point[1])) - ((start[0] - point[0]) * path[1]))) / math.sqrt(((path[0] ** 2) + (path[1] ** 2))))
if (h < min_h):
min_h = h
min_path = path
dot_prod = ((path[0] * (point[0] - start[0])) + (path[1] * (point[1] - start[1])))
if (dot_prod == 0):
min_l = 0
else:
min_l = ((math.sqrt(((dist(point, start) ** 2) - (h ** 2))) * dot_prod) / abs(dot_prod))
return (min_path, min_l)
def draw_moves(self, pieces):
pass |
class OptionSeriesBellcurveSonificationTracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class BaseEventStorage(ABC):
def add_event(self, event: Event, uuid: str):
pass
def list_events(self, key: str=None, namespace: str=None, sender: str=None, begin_offset: int=None, end_offset: int=None):
pass
def count_events(self, key: str=None, namespace: str=None, sender: str=None, begin_offset: int=None, end_offset: int=None):
pass
def clean_up(self):
pass
def register_client(self, namespace: str=None, sender: str=None) -> int:
pass
def delete_client(self, client_id):
pass
def is_client_exists(self, client_id) -> bool:
pass
def get_event_by_uuid(self, uuid: str):
pass
def timestamp_to_event_offset(self, timestamp: int) -> int:
pass |
class TestFetchProduction(unittest.TestCase):
def setUp(self):
self.adapter = requests_mock.Adapter()
self.session = requests.Session()
self.session.mount(' self.adapter)
_time('2021-12-30 09:58:40', tz_offset=(- 5))
def test_nominal_response_uses_timestamp_from_page(self):
self.adapter.register_uri(requests_mock.GET, PA.PRODUCTION_URL, text=resources.files('parsers.test.mocks').joinpath('PA_nominal_generation.html').read_text(), status_code=200)
result = PA.fetch_production(session=self.session)
self.assertEqual(result['datetime'], datetime(2021, 12, 30, 9, 58, 37, tzinfo=PA.TIMEZONE))
_time('2021-12-30 09:57:47', tz_offset=(- 5))
def test_nominal_response_maps_to_electricitymap_fuels(self):
self.adapter.register_uri(requests_mock.GET, PA.PRODUCTION_URL, text=resources.files('parsers.test.mocks').joinpath('PA_nominal_generation.html').read_text(), status_code=200)
result = PA.fetch_production(session=self.session)
self.assertEqual(result['production'], {'biomass': 2.75, 'coal': 149.6, 'gas': 355.88, 'geothermal': 0.0, 'hydro': 421.84, 'nuclear': 0.0, 'oil': 238., 'solar': 262.76, 'unknown': 0.0, 'wind': 115.4}) |
def test_job_override_dirname(tmpdir: Path) -> None:
cmd = ['examples/configure_hydra/job_override_dirname/my_app.py', ('hydra.sweep.dir=' + str(tmpdir)), 'hydra.job.chdir=True', 'learning_rate=0.1,0.01', 'batch_size=32', 'seed=999', '-m']
run_python_script(cmd)
assert Path((tmpdir / 'batch_size=32,learning_rate=0.01/seed=999/')).is_dir()
assert Path((tmpdir / 'batch_size=32,learning_rate=0.1/seed=999/')).is_dir() |
class Uncle(TypedDict):
author: ChecksumAddress
difficulty: HexStr
extraData: HexStr
gasLimit: HexStr
gasUsed: HexStr
hash: HexBytes
logsBloom: HexStr
miner: HexBytes
mixHash: HexBytes
nonce: HexStr
number: HexStr
parentHash: HexBytes
receiptsRoot: HexBytes
sealFields: Sequence[HexStr]
sha3Uncles: HexBytes
size: int
stateRoot: HexBytes
timestamp: Timestamp
totalDifficulty: HexStr
transactions: Sequence[HexBytes]
transactionsRoot: HexBytes
uncles: Sequence[HexBytes] |
def get_macrocycle_atom_types(pdbqt_string):
macrocycle_carbon = ['CG0', 'CG1', 'CG2', 'CG3', 'CG4', 'CG5', 'CG6', 'CG7', 'CG8', 'CG9']
macrocycle_pseudo = ['G0', 'G1', 'G2', 'G3', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9']
cg_atoms = []
g_atoms = []
lines = pdbqt_string.split('\n')
for line in lines:
if (line.startswith('ATOM') or line.startswith('HETATM')):
atom_type = line[77:].strip()
if (atom_type in macrocycle_carbon):
cg_atoms.append(atom_type)
elif (atom_type in macrocycle_pseudo):
g_atoms.append(atom_type)
return (cg_atoms, g_atoms) |
class TestRequestOptions():
def test_option_defaults(self):
options = RequestOptions()
assert options.keep_blank_qs_values
assert (not options.auto_parse_form_urlencoded)
assert (not options.auto_parse_qs_csv)
assert (not options.strip_url_path_trailing_slash)
.parametrize('option_name', ['keep_blank_qs_values', 'auto_parse_form_urlencoded', 'auto_parse_qs_csv', 'strip_url_path_trailing_slash'])
def test_options_toggle(self, option_name):
options = RequestOptions()
setattr(options, option_name, True)
assert getattr(options, option_name)
setattr(options, option_name, False)
assert (not getattr(options, option_name))
def test_incorrect_options(self):
options = RequestOptions()
def _assign_invalid():
options.invalid_option_and_attribute = True
with pytest.raises(AttributeError):
_assign_invalid() |
class Backtest():
def __init__(self, task, session):
self.session = session
self.task = task
self.gApis = {}
self.tpls = task['Code']
del task['Code']
self.ctx = VCtx(task=self.task, gApis=self.gApis, progressCallback=self.progressCallback)
def progressCallback(self, st):
if (self.session is None):
return
self.session.sendall((struct.pack('!II', json_loads(st)['TaskStatus'], len(st)) + st))
def waitStop(self, ctx):
if (self.session is None):
return
try:
buf = b''
ack = 0
self.session.settimeout(None)
while True:
if (ack > 0):
if ((len(buf) - 4) >= ack):
if (buf[4:(4 + ack)] == b'stop'):
ctx.Join()
self.session.close()
os._exit(2)
break
elif (len(buf) >= 4):
(ack,) = struct.unpack('!I', buf[:4])
continue
buf += self.session.recv(((ack - (len(buf) - 4)) if (ack > 0) else 4))
except:
pass
def exit_handler(self, signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.ctx.Join()
self.session.shutdown(socket.SHUT_RDWR)
os._exit(0)
def Run(self):
signal.signal(signal.SIGINT, self.exit_handler)
if (self.session and (platform.system() == 'Windows')):
t = threading.Thread(target=self.waitStop, args=(self.ctx,))
t.setDaemon(True)
t.start()
try:
initPlot = False
tplsLen = len(self.tpls)
for i in xrange(0, tplsLen):
tpl = self.tpls[i]
vv = copy.copy(self.gApis)
for pair in tpl[1]:
vv[pair[0]] = pair[1]
code = (tpl[0] + "\n\nif 'init' in locals() and callable(init):\n init()\n")
if (i == (tplsLen - 1)):
code += "\nmain()\nif 'onexit' in globals():\n onexit()"
if ((not initPlot) and ('matplotlib' in code)):
initPlot = True
try:
__import__('matplotlib').use('Agg')
except:
pass
exec(code.replace('\r\n', '\n'), vv)
except (EOFError, SystemExit):
pass
except:
(etype, value, tb) = sys.exc_info()
arr = [x for x in traceback.extract_tb(tb) if (x[0] == '<string>')]
if arr:
tList = ['Traceback (most recent call last):\n']
tList = (tList + traceback.format_list(arr))
else:
tList = []
tList = (tList + traceback.format_exception_only(etype, value))
self.ctx.g_LogError(''.join(tList))
self.ctx.Join()
self.session.shutdown(socket.SHUT_RDWR) |
class TaskDataOutputMetaData(TaskOutputMetaData):
def __init__(self, sample_df, o_sequence, o_label, dtypes, number_of_records, df_description, df_info, explanation=''):
super(TaskDataOutputMetaData, self).__init__(o_sequence, o_label, elmdpenum.TaskOutputMetaDataTypes.DATA_OUTPUT.value)
self.sample_df = sample_df
self.explanation = explanation
self.dtypes = dtypes
self.number_of_records = number_of_records
self.df_description = df_description
self.df_info = df_info
def to_dict(self):
meta_data = super(TaskDataOutputMetaData, self).to_dict()
meta_data['number_of_records'] = self.number_of_records
meta_data['dtypes'] = self._dtypes_to_dict_parser(self.dtypes)
meta_data['dtypes_list'] = self._dtypes_to_list_parser(self.dtypes)
meta_data['df_description'] = self.df_description.to_dict()
meta_data['sample_data'] = self.sample_df.to_dict(orient='records')
meta_data['df_info'] = self.df_info
meta_data['explanation'] = self.explanation
return meta_data
def to_json(self):
return json.dumps(self.to_dict())
def _dtypes_to_dict_parser(dtypes):
col_names = list(dtypes.keys())
col_dtypes = [col for col in list(dtypes.values())]
return dict(zip(col_names, col_dtypes))
def _dtypes_to_list_parser(dtypes):
col_names = list(dtypes.keys())
col_dtypes = [col for col in list(dtypes.values())]
dtypes_list = []
for (n, t) in zip(col_names, col_dtypes):
dtypes_list.append(dict(col_name=n, col_dtype=t))
return dtypes_list |
def validate_pce(region: str, key_id: str, key_data: str, pce_id: str, role: MPCRoles, skip_steps: List[ValidationStepNames], run_steps: List[ValidationStepNames]) -> ValidatorResult:
duplicate_resource_checker = DuplicatePCEResourcesChecker(region, key_id, key_data, None)
duplicate_resources = duplicate_resource_checker.check_pce(pce_id)
if duplicate_resources:
logging.error(f'Failed to load PCE due to duplicate resources tagged under same pce id. Only one each of these resources can be tagged with the pce:pce-id ({pce_id}), and the others are mistagged. Look at other properties of these resources (like id) for a hint to the pce:pce-id the resource may correctly belong to. Details follow:')
for duplicate_resource in duplicate_resources:
logging.error(f'Multiple {duplicate_resource.resource_name_plural} tagged with pce:pce-id ({pce_id}): {duplicate_resource.duplicate_resource_ids}')
return ValidatorResult.ERROR
pce_service = AWSPCEService(region, key_id, key_data, None)
logging.info(f'Loading the PCE {pce_id}...')
pce = pce_service.get_pce(pce_id)
logging.info(f'PCE loaded: {pce}')
arn = get_arn(region, key_id, key_data)
logging.info(f'ARN: {arn}')
validator = ValidationSuite(region, key_id, key_data, None, role)
failed_results = validator.validate_network_and_compute(pce, skip_steps, run_steps)
if failed_results:
logging.error(f'''Validation failed for PCE {pce_id}:
{ValidationSuite.summarize_errors(failed_results)}''')
if ValidationSuite.contains_error_result(failed_results):
return ValidatorResult.ERROR
else:
logging.info(OVERALL_SUCCESS_MESSAGE)
return ValidatorResult.SUCCESS |
class OptionAnnotationsShapes(Options):
def dashStyle(self):
return self._config_get(None)
def dashStyle(self, text: str):
self._config(text, js_type=False)
def fill(self):
return self._config_get('rgba(0, 0, 0, 0.75)')
def fill(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def markerEnd(self):
return self._config_get(None)
def markerEnd(self, text: str):
self._config(text, js_type=False)
def markerStart(self):
return self._config_get(None)
def markerStart(self, text: str):
self._config(text, js_type=False)
def point(self):
return self._config_get(None)
def point(self, text: str):
self._config(text, js_type=False)
def points(self):
return self._config_get(None)
def points(self, value: Any):
self._config(value, js_type=False)
def r(self):
return self._config_get('0')
def r(self, text: str):
self._config(text, js_type=True)
def ry(self):
return self._config_get(None)
def ry(self, num: float):
self._config(num, js_type=False)
def snap(self):
return self._config_get(2)
def snap(self, num: float):
self._config(num, js_type=False)
def src(self):
return self._config_get(None)
def src(self, text: str):
self._config(text, js_type=False)
def stroke(self):
return self._config_get('rgba(0, 0, 0, 0.75)')
def stroke(self, text: str):
self._config(text, js_type=False)
def strokeWidth(self):
return self._config_get(1)
def strokeWidth(self, num: float):
self._config(num, js_type=False)
def type(self):
return self._config_get('rect')
def type(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False)
def xAxis(self):
return self._config_get(None)
def xAxis(self, num: float):
self._config(num, js_type=False)
def yAxis(self):
return self._config_get(None)
def yAxis(self, num: float):
self._config(num, js_type=False) |
class OperatorNode(RegistryNode):
type = NodeTypes.OPERATOR
def operator_class(self):
return self.config['operator_class']
def operator_class_module(self):
return self.config['operator_class_module']
def __init__(self, config, item):
super(OperatorNode, self).__init__(config=config, item=item)
self._resolved_properties = None
self._preprocessor_imports = None
self._default_task_args = None
def set_default_task_args(self, args):
self._default_task_args = (args or {})
def resolved_properties(self):
if (not self._resolved_properties):
raise Exception('Cannot retrieve resolved properties for operator {}: resolve_properties() has not been called yet!'.format(self))
return self._resolved_properties
def operator_args(self):
resolved = self.resolved_properties
result = resolved.values.copy()
for property_name in resolved.sources.default_task_args:
if (self._default_task_args[property_name] == resolved.values[property_name]):
result.pop(property_name)
return result
def imports(self):
if (not self._resolved_properties):
raise Exception('Cannot retrieve imports for operator {}: resolve_properties() has not been called yet!'.format(self))
loaded = ImportSchema().load(self.config.get('imports', {}))
assert (not loaded.errors), 'Internal error: processing `imports` config {} for operator `{}`'.format(self.config.get('imports', {}), self.name)
result = loaded.data
if self.operator_class:
result.setdefault('objects', [])
result['objects'].append({'module': self.operator_class_module, 'objects': [self.operator_class]})
for item in self._preprocessor_imports.values():
if ('objects' in item):
result.setdefault('objects', [])
result['objects'] += item['objects']
if ('modules' in item):
result.setdefault('modules', [])
result['modules'] += item['modules']
return result
def resolve_properties(self, execution_context, default_task_args=None, base_operator_loader=None, preprocessor_loader=None):
schema = self.get_schema(base_operator_loader)
schema_properties = frozenset(schema.get('properties', {}).keys())
self.set_default_task_args(default_task_args)
(sources, property_values) = self._get_property_sources_and_values(schema_properties, execution_context)
validated = validator.validate_and_fill_defaults(item=property_values, schema=schema)
for key in validated:
if (key not in property_values):
continue
sources.schema.add(key)
logger.debug('%s: validated partitioned properties: %s', self.name, sources)
preprocessors = self._load_preprocessors(base_operator_loader, preprocessor_loader)
self._preprocessor_imports = {pp_name: pp.imports() for (pp_name, pp) in six.iteritems(preprocessors)}
preprocessed_values = self._apply_preprocessors(args=validated, preprocessors=preprocessors)
if self._resolved_properties:
if (preprocessed_values != self._resolved_properties.values):
raise Exception('resolve_properties() was already called for operator {}, and different values were computed this time! Found: {}, expected: {}. This was probably caused by repeated references to a sub-dag or generator using different resource contexts. This is not presently supported!'.format(self, preprocessed_values, self._resolved_properties.values))
else:
logger.warning('resolve_properties() was already called for operator %s, but no differences in the computed properties were found.', self)
self._resolved_properties = ResolvedProperties(sources=sources, values=preprocessed_values)
return self._resolved_properties
def _get_property_sources_and_values(self, schema_properties, execution_context):
sources = PropertySources(dag=set(), default_task_args=set(), resources=set(), schema=set(), global_defaults=set(), fixed_args=set(), unknown_to_schema=set())
property_values = {}
resource_args = self._get_resource_args(execution_context)
global_defaults = self._get_global_defaults(execution_context)
fixed_args = self._get_fixed_args()
invalid_properties = [property_name for property_name in fixed_args if (property_name in self.properties)]
if invalid_properties:
raise Exception('Illegal properties `{}` provided for operator `{}`: these properties are assigned fixed values by boundary-layer that cannot be overridden'.format('` ,`'.join(invalid_properties), self))
for property_name in schema_properties:
if (property_name in fixed_args):
value = fixed_args[property_name]
logger.debug('%s: Inserting value `%s` for argument `%s` from fixed_args', self.name, value, property_name)
property_values[property_name] = value
sources.fixed_args.add(property_name)
continue
if (property_name in self.properties):
logger.debug('%s: Property `%s` found in user-props', self.name, property_name)
property_values[property_name] = self.properties[property_name]
sources.dag.add(property_name)
continue
resource_hits = resource_args.get(property_name, {})
if (len(resource_hits) > 1):
raise ValueError('Error in operator {}: Multiple available resources provide the argument {}: {}. Please specify a value or limit limit the resource scope'.format(self.name, property_name, resource_hits))
if (len(resource_hits) == 1):
(resource_name, value) = resource_hits.popitem()
logger.debug('%s: Inserting value `%s` for argument `%s` from resource `%s`', self.name, value, property_name, resource_name)
property_values[property_name] = value
sources.resources.add(property_name)
continue
if (property_name in self._default_task_args):
value = self._default_task_args[property_name]
logger.debug('%s: Inserting value `%s` for argument `%s` from default_task_args', self.name, value, property_name)
property_values[property_name] = value
sources.default_task_args.add(property_name)
continue
if (property_name in global_defaults):
value = global_defaults[property_name]
logger.debug('%s: Inserting value `%s` for argument `%s` from global defaults', self.name, value, property_name)
property_values[property_name] = value
sources.global_defaults.add(property_name)
continue
logger.debug('%s: No resources or defaults available for property `%s`', self.name, property_name)
unknown_to_schema = [property_name for property_name in self.properties if (property_name not in schema_properties)]
for property_name in unknown_to_schema:
value = self.properties[property_name]
logger.debug('%s: Inserting value `%s` for user-property `%s` which is not part of the schema for this operator', self.name, value, property_name)
property_values[property_name] = value
sources.unknown_to_schema.add(property_name)
return (sources, property_values)
def _apply_preprocessors(self, args, preprocessors):
result = args.copy()
for (property_name, preprocessor) in six.iteritems(preprocessors):
if (property_name not in args):
continue
processed_value = preprocessor.process_arg(args[property_name], node=self, raw_args=args)
logger.debug('Property `%s` raw value: `%s`, processed value: `%s`', property_name, args[property_name], processed_value)
result[property_name] = processed_value
return result
def _get_resource_args(self, execution_context):
resources_available = self._get_resources_available(execution_context)
result = {}
for (resource_name, resource) in six.iteritems(resources_available):
for (property_name, value) in six.iteritems(resource.get_provided_args()):
result.setdefault(property_name, {})
result[property_name][resource_name] = value
return result
def _get_resources_available(self, execution_context):
keys_available = (self.requires_resources & frozenset(execution_context.resources))
return {key: execution_context.resources[key] for key in keys_available}
def _load_preprocessors(self, base_loader, preprocessor_loader):
def aggregator(previous_result, node):
return (previous_result + node.config.get('property_preprocessors', []))
preprocessor_configs = self._aggregate_over_hierarchy(base_loader=base_loader, initial_value=self.config.get('property_preprocessors', []), aggregator=aggregator)
if (not preprocessor_configs):
return {}
if (not preprocessor_loader):
raise MissingPreprocessorException('Node {} of type {} requires preprocessors {}, but no preprocessor_loader is available!'.format(self, self.type, [config['type'] for config in preprocessor_configs]))
result = {}
for preprocessor_conf in preprocessor_configs:
preprocessor = preprocessor_loader(preprocessor_conf)
for property_name in preprocessor_conf['apply_to_properties']:
result[property_name] = preprocessor
return result
def _get_fixed_args(self):
return {'dag': '<<dag>>'}
def _get_global_defaults(self, execution_context):
return {'task_id': self._build_task_id(execution_context)}
def _build_task_id(self, execution_context):
base_name = util.sanitize_operator_name(self.name)
if ((not execution_context.referrer) or (execution_context.referrer.type != NodeTypes.GENERATOR)):
return base_name
suffix_mode = execution_context.referrer.item.get('auto_task_id_mode')
if ((not suffix_mode) or (suffix_mode == 'item_name')):
return (base_name + '-<<item_name>>')
elif (suffix_mode == 'index'):
return (base_name + '-<<str(index)>>')
raise Exception('Unknown suffix_mode `{}` for generator `{}` found while processing node `{}`'.format(suffix_mode, execution_context.referrer.name, self.name)) |
class SPIFlash():
def __init__(self, regs):
self.regs = regs
def spi_xfer(self, length, mosi):
self.regs.spiflash_spi_mosi.write(mosi)
self.regs.spiflash_spi_control.write(((length * CTRL_LENGTH) | CTRL_START))
while (not (self.regs.spiflash_spi_status.read() & STATUS_DONE)):
pass
return self.regs.spiflash_spi_miso.read()
def read_id(self):
return (self.spi_xfer(32, format_mosi(cmd=READ_ID)) & )
def write_enable(self):
self.spi_xfer(8, format_mosi(cmd=WREN))
def write_disable(self):
self.spi_xfer(8, format_mosi(cmd=WRDI))
def read_status(self):
return (self.spi_xfer(16, format_mosi(cmd=RDSR)) & 255)
def write_status(self, value):
self.spi_xfer(16, format_mosi(cmd=WRSR, data=value))
def erase_sector(self, addr):
self.spi_xfer(32, format_mosi(cmd=SE, addr=addr))
def read_sector_lock(self, addr):
return (self.spi_xfer(40, format_mosi(cmd=RDSR, addr=addr)) & 255)
def write_sector_lock(self, addr, byte):
self.spi_xfer(40, format_mosi(cmd=WRSR, addr=addr, data=byte))
def read(self, addr):
return (self.spi_xfer(40, format_mosi(cmd=READ, addr=addr)) & 255)
def write(self, addr, byte):
self.spi_xfer(40, format_mosi(cmd=PP, addr=addr, data=byte))
def read_nvcr(self):
return (self.spi_xfer(24, format_mosi(cmd=RDNVCR)) & 65535)
def write_nvcr(self, data):
self.spi_xfer(24, format_mosi(cmd=WRNVCR, data=data)) |
class EncodeTextLoader(BaseLoader):
def __init__(self, file_path: str, encoding: Optional[str]=None):
self.file_path = file_path
self.encoding = encoding
def load(self) -> List[Document]:
with open(self.file_path, 'rb') as f:
raw_text = f.read()
result = chardet.detect(raw_text)
if (result['encoding'] is None):
text = raw_text.decode('utf-8')
else:
text = raw_text.decode(result['encoding'])
metadata = {'source': self.file_path}
return [Document(page_content=text, metadata=metadata)] |
def test_build_matrix_cooler_multiple():
outfile = NamedTemporaryFile(suffix='.mcool', delete=False)
outfile.close()
outfile_bam = NamedTemporaryFile(suffix='.bam', delete=False)
outfile.close()
qc_folder = mkdtemp(prefix='testQC_')
args = '-s {} {} --outFileName {} -bs 5000 10000 20000 -b {} --QCfolder {} --threads 4 --restrictionSequence GATC --danglingSequence GATC -rs {}'.format(sam_R1, sam_R2, outfile.name, outfile_bam.name, qc_folder, dpnii_file).split()
compute(hicBuildMatrix.main, args, 5)
test_5000 = hm.hiCMatrix((ROOT + 'hicBuildMatrix/multi_small_test_matrix.mcool::/resolutions/5000'))
test_10000 = hm.hiCMatrix((ROOT + 'hicBuildMatrix/multi_small_test_matrix.mcool::/resolutions/10000'))
test_20000 = hm.hiCMatrix((ROOT + 'hicBuildMatrix/multi_small_test_matrix.mcool::/resolutions/20000'))
new_5000 = hm.hiCMatrix((outfile.name + '::/resolutions/5000'))
new_10000 = hm.hiCMatrix((outfile.name + '::/resolutions/10000'))
new_20000 = hm.hiCMatrix((outfile.name + '::/resolutions/20000'))
nt.assert_equal(test_5000.matrix.data, new_5000.matrix.data)
nt.assert_equal(test_10000.matrix.data, new_10000.matrix.data)
nt.assert_equal(test_20000.matrix.data, new_20000.matrix.data)
nt.assert_equal(len(new_5000.cut_intervals), len(test_5000.cut_intervals))
nt.assert_equal(len(new_10000.cut_intervals), len(test_10000.cut_intervals))
nt.assert_equal(len(new_20000.cut_intervals), len(test_20000.cut_intervals))
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_5000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_5000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_10000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_10000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
cut_interval_new_ = []
cut_interval_test_ = []
for x in new_20000.cut_intervals:
cut_interval_new_.append(x[:3])
for x in test_20000.cut_intervals:
cut_interval_test_.append(x[:3])
nt.assert_equal(cut_interval_new_, cut_interval_test_)
assert are_files_equal((ROOT + 'QC/QC.log'), (qc_folder + '/QC.log'))
assert (set(os.listdir((ROOT + 'QC/'))) == set(os.listdir(qc_folder)))
os.unlink(outfile.name)
shutil.rmtree(qc_folder) |
class VersionTester(unittest.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.patcher = PlatformPatcher()
from stalker import Status, StatusList
self.test_status1 = Status(name='Status1', code='STS1')
self.test_status2 = Status(name='Status2', code='STS2')
self.test_status3 = Status(name='Status3', code='STS3')
self.test_status4 = Status(name='Status4', code='STS4')
self.test_status5 = Status(name='Status5', code='STS5')
self.test_task_status_list = StatusList(name='Task Status List', statuses=[self.test_status1, self.test_status2, self.test_status3, self.test_status4, self.test_status5], target_entity_type='Task')
self.test_asset_status_list = StatusList(name='Asset Status List', statuses=[self.test_status1, self.test_status2, self.test_status3, self.test_status4, self.test_status5], target_entity_type='Asset')
self.test_shot_status_list = StatusList(name='Shot Status List', statuses=[self.test_status1, self.test_status2, self.test_status3, self.test_status4, self.test_status5], target_entity_type='Shot')
self.test_sequence_status_list = StatusList(name='Sequence Status List', statuses=[self.test_status1, self.test_status2, self.test_status3, self.test_status4, self.test_status5], target_entity_type='Sequence')
self.test_project_status_list = StatusList(name='Project Status List', statuses=[self.test_status1, self.test_status2, self.test_status3, self.test_status4, self.test_status5], target_entity_type='Project')
from stalker import Repository, Type
self.test_repo = Repository(name='Test Repository', code='TR', linux_path='/mnt/T/', windows_path='T:/', osx_path='/Volumes/T/')
self.test_project_type = Type(name='Test', code='test', target_entity_type='Project')
from stalker import Structure
self.test_structure = Structure(name='Test Project Structure')
from stalker import Project
self.test_project = Project(name='Test Project', code='tp', type=self.test_project_type, status_list=self.test_project_status_list, repositories=[self.test_repo], structure=self.test_structure)
from stalker import Sequence
self.test_sequence = Sequence(name='Test Sequence', code='SEQ1', project=self.test_project, status_list=self.test_sequence_status_list)
from stalker import Shot
self.test_shot1 = Shot(name='SH001', code='SH001', project=self.test_project, sequences=[self.test_sequence], status_list=self.test_shot_status_list)
from stalker import Task
self.test_task1 = Task(name='Task1', parent=self.test_shot1, status_list=self.test_task_status_list)
from stalker import Link
self.test_input_link1 = Link(name='Input Link 1', full_path='/mnt/M/JOBs/TestProj/Seqs/TestSeq/Shots/SH001/FX/Outputs/SH001_beauty_v001.###.exr')
self.test_input_link2 = Link(name='Input Link 2', full_path='/mnt/M/JOBs/TestProj/Seqs/TestSeq/Shots/SH001/FX/Outputs/SH001_occ_v001.###.exr')
self.test_output_link1 = Link(name='Output Link 1', full_path='/mnt/M/JOBs/TestProj/Seqs/TestSeq/Shots/SH001/FX/Outputs/SH001_beauty_v001.###.exr')
self.test_output_link2 = Link(name='Output Link 2', full_path='/mnt/M/JOBs/TestProj/Seqs/TestSeq/Shots/SH001/FX/Outputs/SH001_occ_v001.###.exr')
self.kwargs = {'take_name': 'TestTake', 'inputs': [self.test_input_link1, self.test_input_link2], 'outputs': [self.test_output_link1, self.test_output_link2], 'task': self.test_task1, 'created_with': 'Houdini'}
self.take_name_test_values = [('Take Name', 'Take_Name'), ('TakeName', 'TakeName'), ('take name', 'take_name'), (' take_name', 'take_name'), ('take_name ', 'take_name'), (' take name ', 'take_name'), ('TakeName', 'TakeName'), ('Take___Name', 'Take___Name'), ('', '')]
from stalker import Version
self.test_version = Version(**self.kwargs)
self.test_version.is_published = False
def tearDown(self):
self.patcher.restore()
super(VersionTester, self).tearDown()
def test_children_attribute_will_not_allow_circular_dependencies(self):
from stalker import Version
self.kwargs['parent'] = None
new_version1 = Version(**self.kwargs)
new_version2 = Version(**self.kwargs)
new_version1.parent = new_version2
from stalker.exceptions import CircularDependencyError
with pytest.raises(CircularDependencyError) as cm:
new_version1.children.append(new_version2)
assert (str(cm.value) == '<tp_SH001_Task1_TestTake_v003 (Version)> (Version) and <tp_SH001_Task1_TestTake_v002 (Version)> (Version) creates a circular dependency in their "children" attribute')
def test_children_attribute_will_not_allow_deeper_circular_dependencies(self):
self.kwargs['parent'] = None
from stalker import Version
new_version1 = Version(**self.kwargs)
new_version2 = Version(**self.kwargs)
new_version3 = Version(**self.kwargs)
new_version1.parent = new_version2
new_version2.parent = new_version3
from stalker.exceptions import CircularDependencyError
with pytest.raises(CircularDependencyError) as cm:
new_version1.children.append(new_version3)
assert (str(cm.value) == '<tp_SH001_Task1_TestTake_v004 (Version)> (Version) and <tp_SH001_Task1_TestTake_v002 (Version)> (Version) creates a circular dependency in their "children" attribute') |
class OptionSeriesCylinderSonificationTracksMappingTremoloSpeed(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def gemini_generate_stream(model: ProxyModel, tokenizer, params, device, context_len=2048):
'Zhipu ai, see:
model_params = model.get_params()
print(f'Model: {model}, model_params: {model_params}')
global history
proxy_api_key = model_params.proxy_api_key
proxyllm_backend = (GEMINI_DEFAULT_MODEL or model_params.proxyllm_backend)
generation_config = {'temperature': 0.7, 'top_p': 1, 'top_k': 1, 'max_output_tokens': 2048}
safety_settings = [{'category': 'HARM_CATEGORY_HARASSMENT', 'threshold': 'BLOCK_MEDIUM_AND_ABOVE'}, {'category': 'HARM_CATEGORY_HATE_SPEECH', 'threshold': 'BLOCK_MEDIUM_AND_ABOVE'}, {'category': 'HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'BLOCK_MEDIUM_AND_ABOVE'}, {'category': 'HARM_CATEGORY_DANGEROUS_CONTENT', 'threshold': 'BLOCK_MEDIUM_AND_ABOVE'}]
import google.generativeai as genai
if model_params.proxy_api_base:
from google.api_core import client_options
client_opts = client_options.ClientOptions(api_endpoint=model_params.proxy_api_base)
genai.configure(api_key=proxy_api_key, transport='rest', client_options=client_opts)
else:
genai.configure(api_key=proxy_api_key)
model = genai.GenerativeModel(model_name=proxyllm_backend, generation_config=generation_config, safety_settings=safety_settings)
messages: List[ModelMessage] = params['messages']
(user_prompt, gemini_hist) = _transform_to_gemini_messages(messages)
chat = model.start_chat(history=gemini_hist)
response = chat.send_message(user_prompt, stream=True)
text = ''
for chunk in response:
text += chunk.text
print(text)
(yield text) |
class Pawn(Piece):
def __init__(self, x, y, d):
super().__init__(x, y, d)
self.set_letter('')
def draw_moves(self, pieces):
fake_piece = Pawn(self.start_x, self.start_y, self.color)
end_positions = []
forward_dist = 1
if (self.turn == 0):
forward_dist = 2
if (self.color == white):
directions = [[1, 1], [(- 1), 1]]
fake_piece.slide(0, forward_dist, [p for p in pieces if (p != self)], capture=False)
end_positions.append((fake_piece.x, fake_piece.y))
fake_piece.slide(0, 0, [p for p in pieces if (p != self)], capture=False)
else:
directions = [[(- 1), (- 1)], [1, (- 1)]]
fake_piece.slide(0, (- forward_dist), [p for p in pieces if (p != self)], capture=False)
end_positions.append((fake_piece.x, fake_piece.y))
fake_piece.slide(0, 0, [p for p in pieces if (p != self)], capture=False)
for d in directions:
fake_piece.slide(d[0], d[1], [p for p in pieces if (p != self)], fake=True)
end_positions.append((fake_piece.x, fake_piece.y))
fake_piece.slide(0, 0, [p for p in pieces if (p != self)], fake=True)
for end_pos in end_positions:
draw_line_round_corners_polygon(see_through2, to_screen_coords((self.start_x, self.start_y)), to_screen_coords(end_pos), GREEN_HIGHLIGHT, (((self.radius * 2) * 640) / 8))
def drag(self, new_p, pieces):
if self.grabbed:
if (self.color == white):
(path, dist) = self.select_path((self.start_x, self.start_y), [[1, 1], [(- 1), 1], [0, 1]], new_p)
path_len = math.sqrt(((path[0] ** 2) + (path[1] ** 2)))
self.direction = path
if (self.direction == [1, 1]):
self.slide(clamp(((path[0] * dist) / path_len), 0, 1), clamp(((path[1] * dist) / path_len), 0, 1), pieces)
elif (self.direction == [(- 1), 1]):
self.slide(clamp(((path[0] * dist) / path_len), (- 1), 0), clamp(((path[1] * dist) / path_len), 0, 1), pieces)
else:
max_move = 1
if (self.turn == 0):
max_move = 2
self.slide(0, clamp(((path[1] * dist) / path_len), 0, max_move), pieces, capture=False)
else:
(path, dist) = self.select_path((self.start_x, self.start_y), [[1, (- 1)], [(- 1), (- 1)], [0, (- 1)]], new_p)
path_len = math.sqrt(((path[0] ** 2) + (path[1] ** 2)))
self.direction = path
if (self.direction == [1, (- 1)]):
self.slide(clamp(((path[0] * dist) / path_len), 0, 1), clamp(((path[1] * dist) / path_len), (- 1), 0), pieces)
elif (self.direction == [(- 1), (- 1)]):
self.slide(clamp(((path[0] * dist) / path_len), (- 1), 0), clamp(((path[1] * dist) / path_len), (- 1), 0), pieces)
else:
max_move = 1
if (self.turn == 0):
max_move = 2
self.slide(0, clamp(((path[1] * dist) / path_len), (- max_move), 0), pieces, capture=False)
def can_promote(self):
if (self.color == white):
if ((self.y - self.radius) > 7):
return True
if (self.color == black):
if ((self.y + self.radius) < 1):
return True
def ungrab(self, pieces):
if self.grabbed:
attacked = False
for piece in pieces:
if piece.targeted:
attacked = True
if self.direction:
if ((not attacked) and (self.direction[0] != 0)):
self.cancel(pieces)
self.cancel(pieces)
return
super().ungrab(pieces)
def draw_paths(self, pieces):
if self.targeted:
return
fake_piece = Pawn(self.start_x, self.start_y, self.color)
if (self.color == white):
directions = [[1, 1], [(- 1), 1]]
else:
directions = [[(- 1), (- 1)], [1, (- 1)]]
end_positions = []
for d in directions:
fake_piece.slide(d[0], d[1], [p for p in pieces if (p != self)], fake=True)
end_positions.append((fake_piece.x, fake_piece.y))
fake_piece.slide(0, 0, [p for p in pieces if (p != self)], fake=True)
for end_pos in end_positions:
draw_line_round_corners_polygon(see_through, to_screen_coords((self.start_x, self.start_y)), to_screen_coords(end_pos), RED_HIGHLIGHT, (((self.radius * 2) * 640) / 8)) |
class RowLimitedIDVDownloadViewSet(BaseDownloadViewSet):
endpoint_doc = 'usaspending_api/api_contracts/contracts/v2/download/idv.md'
def post(self, request):
request.data['constraint_type'] = 'row_count'
return BaseDownloadViewSet.post(self, request, validator_type=IdvDownloadValidator) |
class Dialogues():
_keep_terminal_state_dialogues = False
def __init__(self, self_address: Address, end_states: FrozenSet[Dialogue.EndState], message_class: Type[Message], dialogue_class: Type[Dialogue], role_from_first_message: Callable[([Message, Address], Dialogue.Role)], keep_terminal_state_dialogues: Optional[bool]=None) -> None:
self._dialogues_storage = PersistDialoguesStorageWithOffloading(self)
self._self_address = self_address
self._dialogue_stats = DialogueStats(end_states)
if (keep_terminal_state_dialogues is not None):
self._keep_terminal_state_dialogues = keep_terminal_state_dialogues
enforce(issubclass(message_class, Message), 'message_class is not a subclass of Message.')
self._message_class = message_class
enforce(issubclass(dialogue_class, Dialogue), 'dialogue_class is not a subclass of Dialogue.')
self._dialogue_class = dialogue_class
sig = signature(role_from_first_message)
parameter_length = len(sig.parameters.keys())
enforce((parameter_length == 2), 'Invalid number of parameters for role_from_first_message. Expected 2. Found {}.'.format(parameter_length))
parameter_1_type = list(sig.parameters.values())[0].annotation
enforce((parameter_1_type == Message), "Invalid type for the first parameter of role_from_first_message. Expected 'Message'. Found {}.".format(parameter_1_type))
parameter_2_type = list(sig.parameters.values())[1].annotation
enforce((parameter_2_type == Address), "Invalid type for the second parameter of role_from_first_message. Expected 'Address'. Found {}.".format(parameter_2_type))
return_type = sig.return_annotation
enforce((return_type == Dialogue.Role), "Invalid return type for role_from_first_message. Expected 'Dialogue.Role'. Found {}.".format(return_type))
self._role_from_first_message = role_from_first_message
def is_keep_dialogues_in_terminal_state(self) -> bool:
return self._keep_terminal_state_dialogues
def self_address(self) -> Address:
enforce((self._self_address != ''), 'self_address is not set.')
return self._self_address
def dialogue_stats(self) -> DialogueStats:
return self._dialogue_stats
def message_class(self) -> Type[Message]:
return self._message_class
def dialogue_class(self) -> Type[Dialogue]:
return self._dialogue_class
def get_dialogues_with_counterparty(self, counterparty: Address) -> List[Dialogue]:
return self._dialogues_storage.get_dialogues_with_counterparty(counterparty)
def _is_message_by_self(self, message: Message) -> bool:
return (message.sender == self.self_address)
def _is_message_by_other(self, message: Message) -> bool:
return (not self._is_message_by_self(message))
def _counterparty_from_message(self, message: Message) -> Address:
counterparty = (message.to if self._is_message_by_self(message) else message.sender)
return counterparty
def new_self_initiated_dialogue_reference(cls) -> Tuple[(str, str)]:
return (cls._generate_dialogue_nonce(), Dialogue.UNASSIGNED_DIALOGUE_REFERENCE)
def create(self, counterparty: Address, performative: Message.Performative, **kwargs: Any) -> Tuple[(Message, Dialogue)]:
initial_message = self._message_class(dialogue_reference=self.new_self_initiated_dialogue_reference(), message_id=Dialogue.STARTING_MESSAGE_ID, target=Dialogue.STARTING_TARGET, performative=performative, **kwargs)
initial_message.sender = self.self_address
initial_message.to = counterparty
dialogue = self._create_dialogue(counterparty, initial_message)
return (initial_message, dialogue)
def create_with_message(self, counterparty: Address, initial_message: Message) -> Dialogue:
enforce((not initial_message.has_sender), "The message's 'sender' field is already set {}".format(initial_message))
enforce((not initial_message.has_to), "The message's 'to' field is already set {}".format(initial_message))
initial_message.sender = self.self_address
initial_message.to = counterparty
dialogue = self._create_dialogue(counterparty, initial_message)
return dialogue
def _create_dialogue(self, counterparty: Address, initial_message: Message) -> Dialogue:
dialogue = self._create_self_initiated(dialogue_opponent_addr=counterparty, dialogue_reference=initial_message.dialogue_reference, role=self._role_from_first_message(initial_message, self.self_address))
try:
dialogue._update(initial_message)
except InvalidDialogueMessage as e:
self._dialogues_storage.remove(dialogue.dialogue_label)
raise ValueError(f'Cannot create a dialogue with the specified performative and contents. {e}') from e
return dialogue
def update(self, message: Message) -> Optional[Dialogue]:
enforce((message.has_sender and self._is_message_by_other(message)), "Invalid 'update' usage. Update must only be used with a message by another agent.")
enforce(message.has_to, "The message's 'to' field is not set {}".format(message))
enforce((message.to == self.self_address), f"Message to and dialogue self address do not match. Got 'to={message.to}' expected 'to={self.self_address}'.")
dialogue_reference = message.dialogue_reference
is_invalid_label = ((dialogue_reference[0] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (dialogue_reference[1] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE))
is_new_dialogue = ((dialogue_reference[0] != Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (dialogue_reference[1] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (message.message_id == Dialogue.STARTING_MESSAGE_ID))
is_incomplete_label_and_non_initial_msg = ((dialogue_reference[0] != Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (dialogue_reference[1] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (message.message_id not in (Dialogue.STARTING_MESSAGE_ID, Dialogue.STARTING_TARGET)))
if is_invalid_label:
dialogue = None
elif is_new_dialogue:
dialogue = self._create_opponent_initiated(dialogue_opponent_addr=message.sender, dialogue_reference=dialogue_reference, role=self._role_from_first_message(message, self.self_address))
elif is_incomplete_label_and_non_initial_msg:
dialogue = self.get_dialogue(message)
else:
self._complete_dialogue_reference(message)
dialogue = self.get_dialogue(message)
if (dialogue is not None):
try:
dialogue._update(message)
result = dialogue
except InvalidDialogueMessage:
result = None
if is_new_dialogue:
self._dialogues_storage.remove(dialogue.dialogue_label)
else:
result = None
return result
def _complete_dialogue_reference(self, message: Message) -> None:
complete_dialogue_reference = message.dialogue_reference
enforce((Dialogue.UNASSIGNED_DIALOGUE_REFERENCE not in (complete_dialogue_reference[0], complete_dialogue_reference[1])), 'Only complete dialogue references allowed.')
incomplete_dialogue_reference = (complete_dialogue_reference[0], Dialogue.UNASSIGNED_DIALOGUE_REFERENCE)
incomplete_dialogue_label = DialogueLabel(incomplete_dialogue_reference, message.sender, self.self_address)
if (self._dialogues_storage.is_dialogue_present(incomplete_dialogue_label) and (not self._dialogues_storage.is_in_incomplete(incomplete_dialogue_label))):
dialogue = self._dialogues_storage.get(incomplete_dialogue_label)
if (not dialogue):
raise ValueError('no dialogue found')
self._dialogues_storage.remove(incomplete_dialogue_label)
final_dialogue_label = DialogueLabel(complete_dialogue_reference, incomplete_dialogue_label.dialogue_opponent_addr, incomplete_dialogue_label.dialogue_starter_addr)
dialogue._update_dialogue_label(final_dialogue_label)
self._dialogues_storage.add(dialogue)
self._dialogues_storage.set_incomplete_dialogue(incomplete_dialogue_label, final_dialogue_label)
def get_dialogue(self, message: Message) -> Optional[Dialogue]:
self_initiated_dialogue_label = DialogueLabel(message.dialogue_reference, self._counterparty_from_message(message), self.self_address)
other_initiated_dialogue_label = DialogueLabel(message.dialogue_reference, self._counterparty_from_message(message), self._counterparty_from_message(message))
self_initiated_dialogue_label = self._get_latest_label(self_initiated_dialogue_label)
other_initiated_dialogue_label = self._get_latest_label(other_initiated_dialogue_label)
self_initiated_dialogue = self.get_dialogue_from_label(self_initiated_dialogue_label)
other_initiated_dialogue = self.get_dialogue_from_label(other_initiated_dialogue_label)
result = (self_initiated_dialogue or other_initiated_dialogue)
return result
def _get_latest_label(self, dialogue_label: DialogueLabel) -> DialogueLabel:
return self._dialogues_storage.get_latest_label(dialogue_label)
def get_dialogue_from_label(self, dialogue_label: DialogueLabel) -> Optional[Dialogue]:
return self._dialogues_storage.get(dialogue_label)
def _create_self_initiated(self, dialogue_opponent_addr: Address, dialogue_reference: Tuple[(str, str)], role: Dialogue.Role) -> Dialogue:
enforce(((dialogue_reference[0] != Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (dialogue_reference[1] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE)), 'Cannot initiate dialogue with preassigned dialogue_responder_reference!')
incomplete_dialogue_label = DialogueLabel(dialogue_reference, dialogue_opponent_addr, self.self_address)
dialogue = self._create(incomplete_dialogue_label, role)
return dialogue
def _create_opponent_initiated(self, dialogue_opponent_addr: Address, dialogue_reference: Tuple[(str, str)], role: Dialogue.Role) -> Dialogue:
enforce(((dialogue_reference[0] != Dialogue.UNASSIGNED_DIALOGUE_REFERENCE) and (dialogue_reference[1] == Dialogue.UNASSIGNED_DIALOGUE_REFERENCE)), 'Cannot initiate dialogue with preassigned dialogue_responder_reference!')
incomplete_dialogue_label = DialogueLabel(dialogue_reference, dialogue_opponent_addr, dialogue_opponent_addr)
new_dialogue_reference = (dialogue_reference[0], self._generate_dialogue_nonce())
complete_dialogue_label = DialogueLabel(new_dialogue_reference, dialogue_opponent_addr, dialogue_opponent_addr)
dialogue = self._create(incomplete_dialogue_label, role, complete_dialogue_label)
return dialogue
def _create(self, incomplete_dialogue_label: DialogueLabel, role: Dialogue.Role, complete_dialogue_label: Optional[DialogueLabel]=None) -> Dialogue:
enforce((not self._dialogues_storage.is_in_incomplete(incomplete_dialogue_label)), 'Incomplete dialogue label already present.')
if (complete_dialogue_label is None):
dialogue_label = incomplete_dialogue_label
else:
self._dialogues_storage.set_incomplete_dialogue(incomplete_dialogue_label, complete_dialogue_label)
dialogue_label = complete_dialogue_label
enforce((not self._dialogues_storage.is_dialogue_present(dialogue_label)), 'Dialogue label already present in dialogues.')
dialogue = self._dialogue_class(dialogue_label=dialogue_label, message_class=self._message_class, self_address=self.self_address, role=role)
self._dialogues_storage.add(dialogue)
return dialogue
def _generate_dialogue_nonce() -> str:
return secrets.token_hex(DialogueLabel.NONCE_BYTES_NB)
def setup(self) -> None:
self._dialogues_storage.setup()
super_obj = super()
if hasattr(super_obj, 'setup'):
super_obj.setup()
def teardown(self) -> None:
self._dialogues_storage.teardown()
super_obj = super()
if hasattr(super_obj, 'teardown'):
super_obj.teardown() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.