code stringlengths 281 23.7M |
|---|
class NBytes_List():
def __init__(self, name, nbytes=2):
self.name = name
self.length = nbytes
self.lonbytes = []
def decode(self, data):
while data:
mynbyte = NBytes('', self.length)
data = mynbyte.decode(data)
self.lonbytes.append(mynbyte)
return data
def show(self, depth=0):
print('{}{}:'.format((PRINT_INDENT * depth), self.name))
for x in self.lonbytes:
x.show((depth + 1))
def __len__(self):
return (len(self.lonbytes) + self.length)
def __contains__(self, b):
for x in self.lonbytes:
if (b == x):
return True
return False |
def test_returning_a_pathlib_path(local_dummy_file):
def t1() -> FlyteFile:
return pathlib.Path(local_dummy_file)
def wf1() -> FlyteFile:
return t1()
wf_out = wf1()
assert isinstance(wf_out, FlyteFile)
with open(wf_out, 'r') as fh:
assert (fh.read() == 'Hello world')
assert wf_out._downloaded
os.remove(wf_out.path)
p = wf_out.download()
assert (not os.path.exists(wf_out.path))
assert (p == wf_out.path)
def t2() -> os.PathLike:
return pathlib.Path(local_dummy_file)
def wf2() -> os.PathLike:
return t2()
wf_out = wf2()
assert isinstance(wf_out, FlyteFile)
with open(wf_out, 'r') as fh:
assert (fh.read() == 'Hello world') |
def get_network_from_param_list(param_list, new_network):
assert (len(list(new_network.parameters())) == len(param_list))
layer_idx = 0
model_state_dict = new_network.state_dict()
for (key, _) in model_state_dict.items():
model_state_dict[key] = param_list[layer_idx]
layer_idx += 1
new_network.load_state_dict(model_state_dict)
return new_network |
.django_db
def test_federal_accounts_endpoint_keyword_filter_agency_name(client, fixture_data):
resp = client.post('/api/v2/federal_accounts/', content_type='application/json', data=json.dumps({'filters': {'fy': '2017'}, 'keyword': 'burea'}))
response_data = resp.json()
assert (len(response_data['results']) == 1)
assert (response_data['results'][0]['managing_agency'] == 'The Bureau') |
def _is_chinese_char_bert(cp: int) -> bool:
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False |
def negative_sharpe_ratio(weights: ARRAY_OR_SERIES[FLOAT], mean_returns: ARRAY_OR_SERIES[FLOAT], cov_matrix: ARRAY_OR_DATAFRAME[FLOAT], risk_free_rate: FLOAT) -> FLOAT:
type_validation(weights=weights, means=mean_returns, cov_matrix=cov_matrix, risk_free_rate=risk_free_rate)
sharpe = annualised_portfolio_quantities(weights, mean_returns, cov_matrix, risk_free_rate=risk_free_rate)[2]
return (- sharpe) |
class Spool(Boxes):
ui_group = 'Misc'
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.buildArgParser(h=100)
self.argparser.add_argument('--outer_diameter', action='store', type=float, default=200.0, help='diameter of the flanges')
self.argparser.add_argument('--inner_diameter', action='store', type=float, default=80.0, help='diameter of the center part')
self.argparser.add_argument('--axle_diameter', action='store', type=float, default=40.0, help='diameter of the axle hole (axle not part of drawing)')
self.argparser.add_argument('--sides', action='store', type=int, default=8, help='number of pieces for the center part')
self.argparser.add_argument('--reinforcements', action='store', type=int, default=8, help='number of reinforcement ribs per side')
self.argparser.add_argument('--reinforcement_height', action='store', type=float, default=0.0, help='height of reinforcement ribs on the flanges')
def sideCB(self):
self.hole(0, 0, d=self.axle_diameter)
(r, h, side) = self.regularPolygon(self.sides, radius=(self.inner_diameter / 2))
t = self.thickness
for i in range(self.sides):
self.fingerHolesAt(((- side) / 2), (h + (0.5 * self.thickness)), side, 0)
self.moveTo(0, 0, (360 / self.sides))
if self.reinforcement_height:
for i in range(self.reinforcements):
self.fingerHolesAt((self.axle_diameter / 2), 0, (h - (self.axle_diameter / 2)), 0)
self.fingerHolesAt((r + t), 0, (((self.outer_diameter / 2) - r) - t), 0)
self.moveTo(0, 0, (360 / self.reinforcements))
def reinforcementCB(self):
for i in range(self.reinforcements):
self.fingerHolesAt((self.axle_diameter / 2), 0, (((self.inner_diameter - self.axle_diameter) / 2) + self.thickness), 0)
self.moveTo(0, 0, (360 / self.reinforcements))
def render(self):
t = self.thickness
(r, h, side) = self.regularPolygon(self.sides, radius=(self.inner_diameter / 2))
for i in range(2):
self.parts.disc(self.outer_diameter, callback=self.sideCB, move='right')
for i in range(self.sides):
self.rectangularWall(side, self.h, 'fefe', move='right')
if self.reinforcement_height:
for i in range((self.reinforcements * 2)):
edge = edges.CompoundEdge(self, 'fef', [(((self.outer_diameter / 2) - r) - t), ((r - h) + t), (h - (self.axle_diameter / 2))])
self.trapezoidWall((self.reinforcement_height - t), ((self.outer_diameter - self.axle_diameter) / 2), (((self.inner_diameter - self.axle_diameter) / 2) + t), ['e', 'f', 'e', edge], move='right')
for i in range(2):
self.parts.disc((self.inner_diameter + (2 * t)), hole=self.axle_diameter, callback=self.reinforcementCB, move='right') |
def clean_if_exist(path, files):
path = os.path.abspath(path)
for filename in files:
filename = os.path.join(path, filename)
if os.path.exists(filename):
if os.path.isdir(filename):
log.info('removing folder %s', filename)
shutil.rmtree(filename)
else:
log.info('removing file %s', filename)
os.remove(filename) |
def setup(app):
app.router.add_route('POST', '/fledge/control/script/{script_name}/schedule', add_schedule_and_configuration)
app.router.add_route('POST', '/fledge/control/script', add)
app.router.add_route('GET', '/fledge/control/script', get_all)
app.router.add_route('GET', '/fledge/control/script/{script_name}', get_by_name)
app.router.add_route('PUT', '/fledge/control/script/{script_name}', update)
app.router.add_route('DELETE', '/fledge/control/script/{script_name}', delete) |
class Billboard():
def __init__(self, ui):
self.page = ui.page
self.chartFamily = 'BB'
def plot(self, record=None, y=None, x=None, kind='line', profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None, **kwargs):
if ((y is not None) and (not isinstance(y, list))):
y = [y]
return getattr(self, kind)(record=record, y_columns=y, x_axis=x, profile=profile, width=width, height=height, options=options, html_code=html_code)
def line(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartLine(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
line_chart.options.axis.x.tick.count = 5
line_chart.options.axis.x.tick.rotate = 0
line_chart.options.axis.x.tick.multiline = False
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def line_range(self, record=None, y_columns=None, x_axis=None, range=5, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartLine(self.page, width, height, html_code, options, profile)
line_chart.options.type = 'area-line-range'
line_chart.colors(self.page.theme.charts)
if data['labels']:
line_chart.labels(data['labels'])
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def bubble(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
bubble_chart = graph.GraphBillboard.ChartBubble(self.page, width, height, html_code, options, profile)
if data['labels']:
bubble_chart.labels(data['labels'])
bubble_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
bubble_chart.add_dataset(d, data['series'][i])
return bubble_chart
def radar(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
radar_chart = graph.GraphBillboard.ChartRadar(self.page, width, height, html_code, options, profile)
if data['labels']:
radar_chart.labels(data['labels'])
radar_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
radar_chart.add_dataset(d, data['series'][i])
return radar_chart
def spline(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartSpline(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def step(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartSpline(self.page, width, height, html_code, options, profile)
line_chart.options.type = 'step'
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def area(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartArea(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def area_step(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartArea(self.page, width, height, html_code, options, profile)
line_chart.options.type = 'area-step'
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def timeseries(self, record=None, y_columns=None, x_axis=None, profile=None, options=None, width=(100, '%'), height=(330, 'px'), html_code=None):
line = self.line(record, y_columns, x_axis, profile, width, height, options, html_code)
line.options.axis.x.type = 'timeseries'
line.options.axis.x.tick.format = '%Y-%m-%d'
return line
def bar(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartBar(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
line_chart.options.axis.x.tick.count = 5
line_chart.options.axis.x.tick.rotate = 0
line_chart.options.axis.x.tick.multiline = False
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def stacked(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
line_chart = graph.GraphBillboard.ChartBar(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
line_chart.options.data.groups = [data['series']]
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def hbar(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
h_bar = self.bar(record, y_columns, x_axis, profile, width, height, options, html_code)
h_bar.options.axis.rotated = True
return h_bar
def scatter(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis, options={'agg': options.get('agg', 'distinct')})
line_chart = graph.GraphBillboard.ChartScatter(self.page, width, height, html_code, options, profile)
if data['labels']:
line_chart.labels(data['labels'])
line_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
line_chart.add_dataset(d, data['series'][i])
return line_chart
def pie(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
pie_chart = graph.GraphBillboard.ChartPie(self.page, width, height, html_code, options, profile)
if data['labels']:
pie_chart.labels(data['labels'])
pie_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
pie_chart.add_dataset(d, data['series'])
return pie_chart
def donut(self, record=None, y_columns=None, x_axis=None, profile=None, width=(100, '%'), height=(330, 'px'), options=None, html_code=None):
options = (options or {})
options.update({'y_columns': (y_columns or []), 'x_column': x_axis})
data = self.page.data.bb.y((record or []), y_columns, x_axis)
pie_chart = graph.GraphBillboard.ChartDonut(self.page, width, height, html_code, options, profile)
if data['labels']:
pie_chart.labels(data['labels'])
pie_chart.colors(self.page.theme.charts)
for (i, d) in enumerate(data['datasets']):
pie_chart.add_dataset(d, data['series'][i])
return pie_chart
def gauge(self, value: int=0, text: str='', profile=None, options=None, width=(100, '%'), height=(330, 'px'), html_code=None):
g_chart = graph.GraphBillboard.ChartGauge(self.page, width, height, html_code, options, profile)
g_chart.colors(self.page.theme.charts)
g_chart.add_dataset(value, text)
return g_chart |
def _find_graph_differences(previous_graph: Optional[GraphRepr], current_graph: GraphRepr, previous_results: Dict[(str, Optional[List[Row]])], previous_erasure_results: Dict[(str, int)]) -> Optional[GraphDiff]:
if (not previous_graph):
return None
def all_edges(graph: GraphRepr) -> Set[str]:
edge_list: List[str] = []
for (_, dependent_collections) in graph.items():
for (_, edges) in dependent_collections.items():
if edges:
edge_list.extend(edges)
return set(edge_list)
current_collections: Set[str] = (set(list(current_graph.keys())) - artificial_collections)
current_edges: Set[str] = all_edges(current_graph)
previous_collections: Set[str] = (set(list(previous_graph.keys())) - artificial_collections)
previous_edges: Set[str] = all_edges(previous_graph)
added_collections: List[str] = list((current_collections - previous_collections))
added_edges: List[str] = list((current_edges - previous_edges))
removed_collections: List[str] = list((previous_collections - current_collections))
removed_edges: List[str] = list((previous_edges - current_edges))
already_processed_access_collections = list(previous_results.keys())
skipped_added_edges: List[str] = get_skipped_added_edges(already_processed_access_collections, current_graph, added_edges)
already_processed_erasure_collections = list(previous_erasure_results.keys())
return GraphDiff(previous_collections=list(sorted(previous_collections)), current_collections=list(sorted(current_collections)), added_collections=sorted(added_collections), removed_collections=sorted(removed_collections), added_edges=sorted(added_edges), removed_edges=sorted(removed_edges), already_processed_access_collections=sorted(already_processed_access_collections), already_processed_erasure_collections=sorted(already_processed_erasure_collections), skipped_added_edges=sorted(skipped_added_edges)) |
def refine_hit(args):
(seqname, seq, group_fasta, excluded_taxa, tempdir) = args
F = NamedTemporaryFile(delete=True, dir=tempdir, mode='w+')
F.write(f'''>{seqname}
{seq}''')
F.flush()
best_hit = get_best_hit(F.name, group_fasta, excluded_taxa, tempdir)
F.close()
return ([seqname] + best_hit) |
def upgrade():
op.alter_column('sessions_version', 'end_time', new_column_name='ends_at')
op.alter_column('sessions_version', 'start_time', new_column_name='starts_at')
op.alter_column('events_version', 'end_time', new_column_name='ends_at')
op.alter_column('events_version', 'start_time', new_column_name='starts_at')
op.create_index(op.f('ix_sessions_version_operation_type'), 'sessions_version', ['operation_type'], unique=False)
op.create_index(op.f('ix_sessions_version_transaction_id'), 'sessions_version', ['transaction_id'], unique=False)
op.drop_index('ix_session_version_operation_type', table_name='sessions_version')
op.drop_index('ix_session_version_transaction_id', table_name='sessions_version') |
def _print_cursor_stmt(cur: Node, target: Cursor, env: PrintEnv, indent: str) -> list[str]:
stmt = cur._node
if isinstance(stmt, LoopIR.If):
cond = _print_expr(stmt.cond, env)
lines = [f'{indent}if {cond}:']
lines.extend(_print_cursor_block(cur.body(), target, env.push(), (indent + ' ')))
if stmt.orelse:
lines.append(f'{indent}else:')
lines.extend(_print_cursor_block(cur.orelse(), target, env.push(), (indent + ' ')))
elif isinstance(stmt, LoopIR.For):
lo = _print_expr(stmt.lo, env)
hi = _print_expr(stmt.hi, env)
body_env = env.push()
loop_type = ('par' if isinstance(stmt.loop_mode, LoopIR.Par) else 'seq')
lines = [f'{indent}for {body_env.get_name(stmt.iter)} in {loop_type}({lo}, {hi}):', *_print_cursor_block(cur.body(), target, body_env, (indent + ' '))]
else:
lines = _print_stmt(stmt, env, indent)
if (cur == target):
lines[0] = f'{lines[0]} # <-- NODE'
return lines |
_order_decorator(jwt_required)
def is_registrar(f):
(f)
def decorated_function(*args, **kwargs):
user = current_user
if user.is_staff:
return f(*args, **kwargs)
if (('event_id' in kwargs) and (user.is_registrar(kwargs['event_id']) or user.has_event_access(kwargs['event_id']))):
return f(*args, **kwargs)
raise ForbiddenError({'source': ''}, 'Registrar Access is Required.')
return decorated_function |
class OptionSeriesColumnpyramidSonificationTracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
class OptionSeriesColumnStates(Options):
def hover(self) -> 'OptionSeriesColumnStatesHover':
return self._config_sub_data('hover', OptionSeriesColumnStatesHover)
def inactive(self) -> 'OptionSeriesColumnStatesInactive':
return self._config_sub_data('inactive', OptionSeriesColumnStatesInactive)
def normal(self) -> 'OptionSeriesColumnStatesNormal':
return self._config_sub_data('normal', OptionSeriesColumnStatesNormal)
def select(self) -> 'OptionSeriesColumnStatesSelect':
return self._config_sub_data('select', OptionSeriesColumnStatesSelect) |
class OptionSeriesBubbleStatesSelectMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
.scheduler
.integration_test
.parametrize('extra_config, extra_poly_eval, cmd_line_arguments,num_successful,num_iters,progress,assert_present_in_snapshot, expected_state', [pytest.param('MAX_RUNTIME 5', ' import time; time.sleep(1000)', [ENSEMBLE_EXPERIMENT_MODE, '--realizations', '0,1', 'poly_example/poly.ert'], 0, 1, 1.0, [('.*', 'reals.*.forward_models.*.status', FORWARD_MODEL_STATE_FAILURE), ('.*', 'reals.*.forward_models.*.error', 'The run is cancelled due to reaching MAX_RUNTIME')], ([RealizationStorageState.LOAD_FAILURE] * 2), id='ee_poly_experiment_cancelled_by_max_runtime'), pytest.param('', '', [ENSEMBLE_EXPERIMENT_MODE, '--realizations', '0,1', 'poly_example/poly.ert'], 2, 1, 1.0, [('.*', 'reals.*.forward_models.*.status', FORWARD_MODEL_STATE_FINISHED)], ([RealizationStorageState.HAS_DATA] * 2), id='ee_poly_experiment'), pytest.param('', '', [ENSEMBLE_SMOOTHER_MODE, '--target-case', 'poly_runpath_file', '--realizations', '0,1', 'poly_example/poly.ert'], 2, 2, 1.0, [('.*', 'reals.*.forward_models.*.status', FORWARD_MODEL_STATE_FINISHED)], ([RealizationStorageState.HAS_DATA] * 2), id='ee_poly_smoother'), pytest.param('', ' import os\n if os.getcwd().split("/")[-2].split("-")[1] == "0": sys.exit(1)', [ENSEMBLE_SMOOTHER_MODE, '--target-case', 'poly_runpath_file', '--realizations', '0,1', 'poly_example/poly.ert'], 1, 1, 0.5, [('0', "reals.'0'.forward_models.'0'.status", FORWARD_MODEL_STATE_FAILURE), ('0', "reals.'0'.forward_models.'1'.status", FORWARD_MODEL_STATE_START), ('.*', "reals.'1'.forward_models.*.status", FORWARD_MODEL_STATE_FINISHED)], [RealizationStorageState.LOAD_FAILURE, RealizationStorageState.HAS_DATA], id='ee_failing_poly_smoother')])
def test_tracking(extra_config, extra_poly_eval, cmd_line_arguments, num_successful, num_iters, progress, assert_present_in_snapshot, expected_state, tmpdir, source_root, storage, try_queue_and_scheduler, monkeypatch):
experiment_folder = 'poly_example'
shutil.copytree(os.path.join(source_root, 'test-data', f'{experiment_folder}'), os.path.join(str(tmpdir), f'{experiment_folder}'))
config_lines = ['INSTALL_JOB poly_eval2 POLY_EVAL\nSIMULATION_JOB poly_eval2\n', extra_config]
with tmpdir.as_cwd():
with open(f'{experiment_folder}/poly.ert', 'a', encoding='utf-8') as fh:
fh.writelines(config_lines)
with fileinput.input(f'{experiment_folder}/poly_eval.py', inplace=True) as fin:
for line in fin:
if line.strip().startswith('coeffs'):
print(extra_poly_eval)
print(line, end='')
parser = ArgumentParser(prog='test_main')
parsed = ert_parser(parser, cmd_line_arguments)
FeatureToggling.update_from_args(parsed)
ert_config = ErtConfig.from_file(parsed.config)
os.chdir(ert_config.config_path)
experiment_id = storage.create_experiment(parameters=ert_config.ensemble_config.parameter_configuration, responses=ert_config.ensemble_config.response_configuration, observations=ert_config.observations)
model = create_model(ert_config, storage, parsed, experiment_id)
evaluator_server_config = EvaluatorServerConfig(custom_port_range=range(1024, 65535), custom_host='127.0.0.1', use_token=False, generate_cert=False)
thread = threading.Thread(name='ert_cli_simulation_thread', target=model.start_simulations_thread, args=(evaluator_server_config,))
thread.start()
tracker = EvaluatorTracker(model, ee_con_info=evaluator_server_config.get_connection_info())
snapshots = {}
for event in tracker.track():
if isinstance(event, FullSnapshotEvent):
snapshots[event.iteration] = event.snapshot
if (isinstance(event, SnapshotUpdateEvent) and (event.partial_snapshot is not None)):
snapshots[event.iteration].merge(event.partial_snapshot.data())
if isinstance(event, EndEvent):
pass
assert (tracker._progress() == progress)
assert (len(snapshots) == num_iters)
for snapshot in snapshots.values():
successful_reals = list(filter((lambda item: (item[1].status == REALIZATION_STATE_FINISHED)), snapshot.reals.items()))
assert (len(successful_reals) == num_successful)
for (iter_expression, snapshot_expression, expected) in assert_present_in_snapshot:
for (i, snapshot) in snapshots.items():
if re.match(iter_expression, str(i)):
check_expression(snapshot.to_dict(), snapshot_expression, expected, f'''Snapshot {i} did not match:
''')
thread.join()
state_map = storage.get_ensemble_by_name('default').state_map
assert (state_map[:2] == expected_state)
FeatureToggling.reset() |
def get_label_arrays(fnames, column_num=None):
try:
assert (column_num is not None)
except AssertionError:
print('You need to call this function with a column number for me to read from')
sys.exit()
low_array = []
medium_array = []
high_array = []
fname2label_dict = {}
f = open(fnames)
cnt = 0
for line in f:
if (cnt == 0):
cnt += 1
continue
print(line)
line = line.split('\n')[0]
fname = line.split(',')[0].split('.')[0]
try:
feature = int(line.split(',')[column_num])
except ValueError:
continue
if (feature == 0):
low_array.append(fname)
elif (feature == 1):
medium_array.append(fname)
elif (feature == 2):
high_array.append(fname)
else:
print('Houston, we have got problems. I think the feature is ', feature)
sys.exit()
fname2label_dict[fname] = feature
f.close()
return (low_array, medium_array, high_array, fname2label_dict) |
('aea.cli.upgrade.get_latest_version_available_in_registry')
('click.echo')
class TestUpgradeProjectWithoutNewerVersion(BaseTestUpgradeProject):
def test_run(self, mock_click_echo, mock_get_latest_version):
fake_old_public_id = self.OLD_AGENT_PUBLIC_ID
mock_get_latest_version.return_value = fake_old_public_id
result = self.run_cli_command('upgrade', '--remote', cwd=self._get_cwd())
assert (result.exit_code == 0)
version_str = str(self.OLD_AGENT_PUBLIC_ID.version)
mock_click_echo.assert_any_call(f"Latest version found is '{version_str}' which is smaller or equal than current version '{version_str}'. Continuing...")
ignore = ([DEFAULT_AEA_CONFIG_FILE, DEFAULT_README_FILE] + filecmp.DEFAULT_IGNORES)
dircmp = filecmp.dircmp(self.current_agent_context, self.EXPECTED, ignore=ignore)
(_left_only, _right_only, diff) = dircmp_recursive(dircmp)
assert (diff == set()) |
class TestNull(util.ColorAsserts, unittest.TestCase):
def test_null_input(self):
c = Color('hct', [NaN, 20, 30], 1)
self.assertTrue(c.is_nan('hue'))
def test_none_input(self):
c = Color('color(--hct none 20 30 / 1)')
self.assertTrue(c.is_nan('hue'))
def test_null_normalization_min_chroma(self):
c = Color(Color('white').convert('hct').to_string()).normalize()
self.assertTrue(c.is_nan('hue'))
c = Color(Color('gray').convert('hct').to_string()).normalize()
self.assertTrue(c.is_nan('hue'))
c = Color(Color('darkgray').convert('hct').to_string()).normalize()
self.assertTrue(c.is_nan('hue'))
def test_achromatic_hue(self):
for space in ('srgb', 'display-p3', 'rec2020', 'a98-rgb', 'prophoto-rgb'):
for x in range(0, 256):
color = Color('color({space} {num:f} {num:f} {num:f})'.format(space=space, num=(x / 255)))
color2 = color.convert('hct')
self.assertTrue(color2.is_nan('hue')) |
('/get_vehicleinfo/<string:vin>')
def get_vehicle_info(vin):
from_cache = (int(request.args.get('from_cache', 0)) == 1)
response = app.response_class(response=json.dumps(APP.myp.get_vehicle_info(vin, from_cache).to_dict(), default=str), status=200, mimetype='application/json')
return response |
def print_default_benchmark_total_line(stat: DefaultStat) -> None:
logging.info(SINGLE_UNDERLINE)
logging.info(bold_white(f"|{'Total':^19}|{stat.total_seconds:^16.3f}|{stat.total_tx:^16}|{'-':^16}|{stat.total_blocks:^16}|{'-':^20}|{stat.total_gas:^16,}|{'-':^16}|"))
logging.info(bold_white(f"|{'Avg':^19}|{stat.avg_total_seconds:^16.3f}|{stat.avg_total_tx:^16.0f}|{stat.tx_per_second:^16.3f}|{stat.avg_total_blocks:^16.0f}|{stat.blocks_per_second:^20.3f}|{stat.avg_total_gas:^16,.0f}|{stat.gas_per_second:^16,.3f}|"))
logging.info((DOUBLE_UNDERLINE + '\n')) |
class IShellLinkA(IUnknown):
_iid_ = GUID('{000214EE-0000-0000-C000-}')
_methods_ = [COMMETHOD([], HRESULT, 'GetPath', (['in', 'out'], c_char_p, 'pszFile'), (['in'], c_int, 'cchMaxPath'), (['in', 'out'], POINTER(WIN32_FIND_DATAA), 'pfd'), (['in'], DWORD, 'fFlags')), COMMETHOD([], HRESULT, 'GetIDList', (['retval', 'out'], POINTER(LPITEMIDLIST), 'ppidl')), COMMETHOD([], HRESULT, 'SetIDList', (['in'], LPCITEMIDLIST, 'pidl')), COMMETHOD([], HRESULT, 'GetDescription', (['in', 'out'], c_char_p, 'pszName'), (['in'], c_int, 'cchMaxName')), COMMETHOD([], HRESULT, 'SetDescription', (['in'], c_char_p, 'pszName')), COMMETHOD([], HRESULT, 'GetWorkingDirectory', (['in', 'out'], c_char_p, 'pszDir'), (['in'], c_int, 'cchMaxPath')), COMMETHOD([], HRESULT, 'SetWorkingDirectory', (['in'], c_char_p, 'pszDir')), COMMETHOD([], HRESULT, 'GetArguments', (['in', 'out'], c_char_p, 'pszArgs'), (['in'], c_int, 'cchMaxPath')), COMMETHOD([], HRESULT, 'SetArguments', (['in'], c_char_p, 'pszArgs')), COMMETHOD(['propget'], HRESULT, 'Hotkey', (['retval', 'out'], POINTER(c_short), 'pwHotkey')), COMMETHOD(['propput'], HRESULT, 'Hotkey', (['in'], c_short, 'pwHotkey')), COMMETHOD(['propget'], HRESULT, 'ShowCmd', (['retval', 'out'], POINTER(c_int), 'piShowCmd')), COMMETHOD(['propput'], HRESULT, 'ShowCmd', (['in'], c_int, 'piShowCmd')), COMMETHOD([], HRESULT, 'GetIconLocation', (['in', 'out'], c_char_p, 'pszIconPath'), (['in'], c_int, 'cchIconPath'), (['in', 'out'], POINTER(c_int), 'piIcon')), COMMETHOD([], HRESULT, 'SetIconLocation', (['in'], c_char_p, 'pszIconPath'), (['in'], c_int, 'iIcon')), COMMETHOD([], HRESULT, 'SetRelativePath', (['in'], c_char_p, 'pszPathRel'), (['in'], DWORD, 'dwReserved')), COMMETHOD([], HRESULT, 'Resolve', (['in'], c_int, 'hwnd'), (['in'], DWORD, 'fFlags')), COMMETHOD([], HRESULT, 'SetPath', (['in'], c_char_p, 'pszFile'))]
def GetPath(self, flags=SLGP_SHORTPATH):
buf = create_string_buffer(MAX_PATH)
self.__com_GetPath(buf, MAX_PATH, None, flags)
return buf.value
def GetDescription(self):
buf = create_string_buffer(1024)
self.__com_GetDescription(buf, 1024)
return buf.value
def GetWorkingDirectory(self):
buf = create_string_buffer(MAX_PATH)
self.__com_GetWorkingDirectory(buf, MAX_PATH)
return buf.value
def GetArguments(self):
buf = create_string_buffer(1024)
self.__com_GetArguments(buf, 1024)
return buf.value
def GetIconLocation(self):
iIcon = c_int()
buf = create_string_buffer(MAX_PATH)
self.__com_GetIconLocation(buf, MAX_PATH, byref(iIcon))
return (buf.value, iIcon.value) |
class StringCounter():
def __init__(self):
self._counter = {}
def add(self, string):
if (string is None):
return
if (string in self._counter):
self._counter[string] += 1
else:
self._counter[string] = 1
def count(self, string):
return self._counter.get(string, 0)
def __str__(self):
items = ['{}={}'.format(key, value) for (key, value) in self._counter.items()]
return ', '.join(items) |
def generate_sample_data(output: IO[Any]):
with Writer(output) as writer:
for i in range(1, 11):
simple_message = SimpleMessage(data=f'Hello MCAP protobuf world #{i}!')
writer.write_message(topic='/simple_message', message=simple_message, log_time=(i * 1000), publish_time=(i * 1000))
complex_message = ComplexMessage(intermediate1=IntermediateMessage1(simple=SimpleMessage(data=f'Field A {i}')), intermediate2=IntermediateMessage2(simple=SimpleMessage(data=f'Field B {i}')))
writer.write_message(topic='/complex_message', message=complex_message, log_time=(i * 1000), publish_time=(i * 1000))
output.seek(0) |
class OptionSeriesScatter3dSonificationTracksMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'log_fortianalyzer3_override_filter': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['log_fortianalyzer3_override_filter']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['log_fortianalyzer3_override_filter']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'log_fortianalyzer3_override_filter')
(is_error, has_changed, result, diff) = fortios_log_fortianalyzer3(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class BFBase(torch.nn.Module):
def __init__(self, mask_model: torch.nn.Module, ref_mic: Optional[int]=0, eps: Optional[float]=1e-05):
super().__init__()
self.mask_model = mask_model
self._ref_mic = ref_mic
self._eps = eps
self.fake = torch.nn.Parameter(torch.zeros(1))
def _set_params(self, **kwargs):
for (key, value) in kwargs.items():
if (value is None):
kwargs[key] = getattr(self, key)
return kwargs.values()
def ref_mic(self):
return self._ref_mic
def eps(self):
return self._eps |
def extractClairyclairetranslationWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class init_cond(object):
def __init__(self, L, scaling=0.75, r=1):
self.radius = 0.15
self.xc = 0.5
self.yc = 0.75
self.scaling = scaling
if ((r % 2) == 0):
r += 1
self.r = r
def uOfXT(self, x, t):
import numpy as np
theta = math.atan2((x[1] - self.yc), (x[0] - self.xc))
C = 0.0
k = 10
return (self.scaling * (((self.radius + ((C * self.radius) * math.cos((k * theta)))) - math.sqrt((((x[0] - self.xc) ** 2) + ((x[1] - self.yc) ** 2)))) ** self.r)) |
def _try_add_key(ctx: Context, type_: str, filepath: str, connection: bool=False) -> None:
try:
if connection:
ctx.agent_config.connection_private_key_paths.create(type_, filepath)
else:
ctx.agent_config.private_key_paths.create(type_, filepath)
except ValueError as e:
raise click.ClickException(str(e))
with open_file(os.path.join(ctx.cwd, DEFAULT_AEA_CONFIG_FILE), 'w') as fp:
ctx.agent_loader.dump(ctx.agent_config, fp) |
class AddableJoinOp(JoinOp):
__slots__ = ('_parent',)
_parent: Optional[Event]
def __init__(self, *sources: Event):
JoinOp.__init__(self)
self._sources = deque()
self._parent = None
self._set_sources(*sources)
def _set_sources(self, *sources):
for source in sources:
source = Event.create(source)
self.add_source(source)
def add_source(self, source):
raise NotImplementedError
def set_parent(self, parent: Event):
self._parent = parent
if parent.done_event:
parent.done_event += self._on_parent_done
def on_source_done(self, source):
self._disconnect_from(source)
self._sources.remove(source)
if ((not self._sources) and (self._parent is None)):
self.set_done()
def _on_parent_done(self, parent):
parent -= self._on_parent_done
self._parent = None
if (not self._sources):
self.set_done() |
def bps(bandwidth):
match = re.match('(\\d+)\\s*(\\w+)', bandwidth)
if (not match):
raise ValueError('bandwidth not in form of 10Mbps')
(bw, units) = match.groups()
bw = int(bw)
units = units.lower()
if (units == 'gbps'):
return (bw * )
if (units == 'mbps'):
return (bw * 1000000)
if (units == 'kbps'):
return (bw * 1000)
if (units == 'bps'):
return bw
raise ValueError('unsupported unit, must be Gbps, Mbps, Kbps, Bps') |
(scope='function')
def slack_enterprise_user_id(slack_enterprise_test_client: SlackTestClient, slack_enterprise_identity_email) -> Generator:
response = slack_enterprise_test_client.get_user_from_email(email=slack_enterprise_identity_email)
if (response.ok and response.json()['user']):
return response.json()['user']['id'] |
def compare_results(mdir, relaxed=False, artifacts_dir=None):
if (artifacts_dir is None):
artifacts_path = os.environ['rmtoo_test_dir']
else:
artifacts_path = os.path.join(os.environ['rmtoo_test_dir'], artifacts_dir)
files_is = find(artifacts_path)
files_should = find(os.path.join(mdir, 'result_should'))
(additional_files, missing_files, files_mapping) = handle_alternative_result_files(files_is, files_should)
r = {}
for (df_is, df_should) in files_mapping.items():
if df_is.endswith('.xml'):
if (not compare_xml(mdir, df_is, df_should)):
r[df_is] = 'XML files differ'
else:
sorted_diff = (relaxed and (df_is in ['stderr', 'makefile_deps', 'req-graph1.dot', 'reqsprios.tex']))
ud = unified_diff(mdir, df_is, df_should, sorted_diff, artifacts_dir)
if (ud is not None):
r[df_is] = ud
return (missing_files, additional_files, r) |
.django_db
def test_object_budget_match(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
baker.make(entry.pop('model'), **entry)
baker.make(FinancialAccountsByProgramActivityObjectClass, **{'financial_accounts_by_program_activity_object_class_id': (- 4), 'submission_id': (- 1), 'treasury_account_id': (- 1), 'obligations_incurred_by_program_object_class_cpe': (- 5), 'object_class_id': 1})
json_request = {'type': 'budget_function', 'filters': {'fy': '1600', 'quarter': '1'}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert (response.status_code == status.HTTP_200_OK)
json_response_1 = response.json()
json_request = {'type': 'object_class', 'filters': {'fy': '1600', 'quarter': '1'}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response_2 = response.json()
assert (json_response_1['results'][0]['amount'] == json_response_2['results'][0]['amount']) |
class ConflictIterator(object):
def __init__(self, index):
citer = ffi.new('git_index_conflict_iterator **')
err = C.git_index_conflict_iterator_new(citer, index._index)
check_error(err)
self._index = index
self._iter = citer[0]
def __del__(self):
C.git_index_conflict_iterator_free(self._iter)
def next(self):
return self.__next__()
def __next__(self):
cancestor = ffi.new('git_index_entry **')
cours = ffi.new('git_index_entry **')
ctheirs = ffi.new('git_index_entry **')
err = C.git_index_conflict_next(cancestor, cours, ctheirs, self._iter)
if (err == C.GIT_ITEROVER):
raise StopIteration
check_error(err)
ancestor = IndexEntry._from_c(cancestor[0])
ours = IndexEntry._from_c(cours[0])
theirs = IndexEntry._from_c(ctheirs[0])
return (ancestor, ours, theirs) |
_OptParamCapability.register_type(BGP_CAP_MULTIPROTOCOL)
class BGPOptParamCapabilityMultiprotocol(_OptParamCapability):
_CAP_PACK_STR = '!HBB'
def __init__(self, afi, safi, reserved=0, **kwargs):
super(BGPOptParamCapabilityMultiprotocol, self).__init__(**kwargs)
self.afi = afi
self.reserved = reserved
self.safi = safi
def parse_cap_value(cls, buf):
(afi, reserved, safi) = struct.unpack_from(cls._CAP_PACK_STR, six.binary_type(buf))
return {'afi': afi, 'reserved': reserved, 'safi': safi}
def serialize_cap_value(self):
self.reserved = 0
buf = bytearray()
msg_pack_into(self._CAP_PACK_STR, buf, 0, self.afi, self.reserved, self.safi)
return buf |
class TestLegacyTabSlugsSep(util.MdCase):
extension = ['pymdownx.tabbed', 'toc']
extension_configs = {'pymdownx.tabbed': {'slugify': slugify(case='lower'), 'separator': '_', 'alternate_style': True}}
MD = '\n ### Here is some text\n\n === "Here is some text"\n content\n\n === "Here is some text"\n content\n '
def test_slug_with_separator(self):
self.check_markdown(self.MD, '\n <h3 id="here-is-some-text">Here is some text</h3>\n <div class="tabbed-set tabbed-alternate" data-tabs="1:2"><input checked="checked" id="here_is_some_text" name="__tabbed_1" type="radio" /><input id="here_is_some_text_1" name="__tabbed_1" type="radio" /><div class="tabbed-labels"><label for="here_is_some_text">Here is some text</label><label for="here_is_some_text_1">Here is some text</label></div>\n <div class="tabbed-content">\n <div class="tabbed-block">\n <p>content</p>\n </div>\n <div class="tabbed-block">\n <p>content</p>\n </div>\n </div>\n </div>\n ', True) |
class TestTagPropagation():
.SchedulerTestConfig(items_to_analyze=15, pipeline=True)
def test_run_analysis_with_tag(self, analysis_finished_event, unpacking_scheduler, backend_db, analysis_scheduler):
test_fw = Firmware(file_path=f'{get_test_data_dir()}/container/with_key.7z')
(test_fw.version, test_fw.vendor, test_fw.device_name, test_fw.device_class) = (['foo'] * 4)
test_fw.release_date = '2017-01-01'
test_fw.scheduled_analysis = ['crypto_material']
unpacking_scheduler.add_task(test_fw)
assert analysis_finished_event.wait(timeout=20)
processed_fo = backend_db.get_object(uid_of_key_file, analysis_filter=['crypto_material'])
assert processed_fo.processed_analysis['crypto_material']['tags'], 'no tags set in analysis'
processed_fw = backend_db.get_object(test_fw.uid, analysis_filter=['crypto_material'])
assert processed_fw.analysis_tags, 'tags not propagated properly'
assert processed_fw.analysis_tags['crypto_material']['private_key_inside'] |
def get_host_port(args, kwargs):
host = (args[0] if args else kwargs.get('server'))
port = None
if (not host):
host = kwargs.get('host', 'localhost')
for sep in (',', ':'):
if (sep in host):
(host, port) = host.rsplit(sep, 1)
port = int(port)
break
if (not port):
port = int(kwargs.get('port', default_ports.get('mssql')))
return (host, port) |
class CacheStorage(object):
def __init__(self):
self.items = {}
def get(self, cachepath, create_template):
template = self.items.get(cachepath)
if (not template):
dct = self._load(cachepath)
if dct:
template = create_template()
for k in dct:
setattr(template, k, dct[k])
self.items[cachepath] = template
return template
def set(self, cachepath, template):
self.items[cachepath] = template
dct = self._save_data_of(template)
return self._store(cachepath, dct)
def _save_data_of(self, template):
return {'args': template.args, 'bytecode': template.bytecode, 'script': template.script, 'timestamp': template.timestamp}
def unset(self, cachepath):
self.items.pop(cachepath, None)
return self._delete(cachepath)
def clear(self):
(d, self.items) = (self.items, {})
for k in d.keys():
self._delete(k)
d.clear()
def _load(self, cachepath):
raise NotImplementedError.new(('%s#_load(): not implemented yet.' % self.__class__.__name__))
def _store(self, cachepath, template):
raise NotImplementedError.new(('%s#_store(): not implemented yet.' % self.__class__.__name__))
def _delete(self, cachepath):
raise NotImplementedError.new(('%s#_delete(): not implemented yet.' % self.__class__.__name__)) |
class HSL(Cylindrical, Space):
BASE = 'srgb'
NAME = 'hsl'
SERIALIZE = ('--hsl',)
CHANNELS = (Channel('h', 0.0, 360.0, bound=True, flags=FLG_ANGLE), Channel('s', 0.0, 1.0, bound=True, flags=FLG_PERCENT), Channel('l', 0.0, 1.0, bound=True, flags=FLG_PERCENT))
CHANNEL_ALIASES = {'hue': 'h', 'saturation': 's', 'lightness': 'l'}
WHITE = WHITES['2deg']['D65']
GAMUT_CHECK = 'srgb'
def normalize(self, coords: Vector) -> Vector:
coords = alg.no_nans(coords)
if ((coords[1] == 0) or (coords[2] in (0, 1))):
coords[0] = alg.NaN
return coords
def to_base(self, coords: Vector) -> Vector:
return hsl_to_srgb(coords)
def from_base(self, coords: Vector) -> Vector:
return srgb_to_hsl(coords) |
def loadAllJsonObjects(dir):
objects = []
badFiles = []
for (root, dirNames, fileNames) in os.walk(dir):
for fileName in fnmatch.filter(fileNames, '*.json'):
try:
path = os.path.join(root, fileName)
with open(path, 'r') as f:
loadedObject = json.load(f)
if isinstance(loadedObject, list):
for o in loadedObject:
objects.append((path, fileName, o))
else:
objects.append((path, fileName, loadedObject))
except Exception as e:
print(e, flush=True)
badFiles.append(path)
return (objects, badFiles) |
def add_blank_grams(pruned_ngrams, num_tokens, blank):
all_grams = [gram for grams in pruned_ngrams for gram in grams]
maxorder = len(pruned_ngrams)
blank_grams = {}
if (blank == 'forced'):
pruned_ngrams = [(pruned_ngrams[0] if (i == 0) else []) for i in range(maxorder)]
pruned_ngrams[0].append(tuple([num_tokens]))
blank_grams[tuple([num_tokens])] = True
for gram in all_grams:
if (blank == 'optional'):
onehot_vectors = itertools.product([0, 1], repeat=(len(gram) + 1))
elif (blank == 'forced'):
onehot_vectors = [([1] * (len(gram) + 1))]
else:
raise ValueError('Invalid value specificed for blank. Must be in |optional|forced|none|')
for j in onehot_vectors:
new_array = []
for (idx, oz) in enumerate(j[:(- 1)]):
if ((oz == 1) and (gram[idx] != START_IDX)):
new_array.append(num_tokens)
new_array.append(gram[idx])
if ((j[(- 1)] == 1) and (gram[(- 1)] != END_IDX)):
new_array.append(num_tokens)
for n in range(maxorder):
for e in range(n, len(new_array)):
cur_gram = tuple(new_array[(e - n):(e + 1)])
if ((num_tokens in cur_gram) and (cur_gram not in blank_grams)):
pruned_ngrams[n].append(cur_gram)
blank_grams[cur_gram] = True
return pruned_ngrams |
class NodeTreeModel(TreeModel):
node_manager = Instance(NodeManager, ())
_monitors = Dict()
def has_children(self, node):
node_type = self.node_manager.get_node_type(node)
if node_type.allows_children(node):
has_children = node_type.has_children(node)
else:
has_children = False
return has_children
def get_children(self, node):
node_type = self.node_manager.get_node_type(node)
if node_type.allows_children(node):
children = node_type.get_children(node)
else:
children = []
return children
def get_default_action(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.get_default_action(node)
def get_drag_value(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.get_drag_value(node)
def can_drop(self, node, data):
node_type = self.node_manager.get_node_type(node)
return node_type.can_drop(node, data)
def drop(self, node, data):
node_type = self.node_manager.get_node_type(node)
node_type.drop(node, data)
def get_image(self, node, selected, expanded):
node_type = self.node_manager.get_node_type(node)
return node_type.get_image(node, selected, expanded)
def get_key(self, node):
return self.node_manager.get_key(node)
def get_selection_value(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.get_selection_value(node)
def get_text(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.get_text(node)
def can_set_text(self, node, text):
node_type = self.node_manager.get_node_type(node)
return node_type.can_set_text(node, text)
def set_text(self, node, text):
node_type = self.node_manager.get_node_type(node)
return node_type.set_text(node, text)
def is_collapsible(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.is_collapsible(node)
def is_draggable(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.is_draggable(node)
def is_editable(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.is_editable(node)
def is_expandable(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.is_expandable(node)
def add_listener(self, node):
node_type = self.node_manager.get_node_type(node)
monitor = node_type.get_monitor(node)
if (monitor is not None):
self._start_monitor(monitor)
self._monitors[self.node_manager.get_key(node)] = monitor
def remove_listener(self, node):
key = self.node_manager.get_key(node)
monitor = self._monitors.get(key)
if (monitor is not None):
self._stop_monitor(monitor)
del self._monitors[key]
return
def get_context_menu(self, node):
node_type = self.node_manager.get_node_type(node)
return node_type.get_context_menu(node)
def _start_monitor(self, monitor):
monitor.observe(self._on_nodes_changed, 'nodes_changed')
monitor.observe(self._on_nodes_inserted, 'nodes_inserted')
monitor.observe(self._on_nodes_removed, 'nodes_removed')
monitor.observe(self._on_nodes_replaced, 'nodes_replaced')
monitor.observe(self._on_structure_changed, 'structure_changed')
monitor.start()
def _stop_monitor(self, monitor):
monitor.observe(self._on_nodes_changed, 'nodes_changed', remove=True)
monitor.observe(self._on_nodes_inserted, 'nodes_inserted', remove=True)
monitor.observe(self._on_nodes_removed, 'nodes_removed', remove=True)
monitor.observe(self._on_nodes_replaced, 'nodes_replaced', remove=True)
monitor.observe(self._on_structure_changed, 'structure_changed', remove=True)
monitor.stop()
return
def _on_nodes_changed(self, event):
self.nodes_changed = event.new
def _on_nodes_inserted(self, event):
self.nodes_inserted = event.new
def _on_nodes_removed(self, event):
self.nodes_removed = event.new
def _on_nodes_replaced(self, event):
self.nodes_replaced = event.new
def _on_structure_changed(self, event):
self.structure_changed = event.new
return |
class TensorGlyph(Module):
__version__ = 0
glyph = Instance(glyph.Glyph, allow_none=False, record=True)
actor = Instance(Actor, allow_none=False, record=True)
input_info = PipelineInfo(datasets=['any'], attribute_types=['any'], attributes=['tensors'])
view = View(Group(Item(name='actor', style='custom'), show_labels=False, label='Actor'), Group(Item(name='glyph', style='custom', resizable=True), label='Tensor Glyph', selected=True, show_labels=False))
def setup_pipeline(self):
self.glyph = glyph.Glyph(glyph_type='tensor')
self.glyph.glyph_source.glyph_source = self.glyph.glyph_source.glyph_list[4]
self.actor = Actor()
def update_pipeline(self):
mm = self.module_manager
if (mm is None):
return
self.glyph.inputs = [mm.source]
self.pipeline_changed = True
def update_data(self):
self.data_changed = True
def _glyph_changed(self, old, new):
new.module = self
actor = self.actor
if (actor is not None):
actor.inputs = [new]
self._change_components(old, new)
def _actor_changed(self, old, new):
new.scene = self.scene
g = self.glyph
if (g is not None):
new.inputs = [g]
self._change_components(old, new) |
()
def setup_to_fail():
is_active = shellexec('systemctl is-active chronyd').stdout[0]
is_enabled = shellexec('systemctl is-enabled chronyd').stdout[0]
shutil.copy('/etc/chrony.conf', '/etc/chrony.conf.bak')
shellexec('systemctl stop chronyd')
shellexec('systemctl disable chronyd')
shellexec('sed -i "/^server/d" /etc/chrony.conf')
(yield None)
if (is_active == 'active'):
shellexec('systemctl start chronyd')
if (is_enabled == 'enabled'):
shellexec('systemctl enable chronyd')
shutil.move('/etc/chrony.conf.bak', '/etc/chrony.conf') |
class TestComboField(FieldMixin, unittest.TestCase):
def _create_widget_simple(self, **traits):
traits.setdefault('value', 'one')
traits.setdefault('values', ['one', 'two', 'three', 'four'])
traits.setdefault('tooltip', 'Dummy')
return ComboField(**traits)
def test_combo_field(self):
self._create_widget_control()
self.widget.value = 'two'
self.gui.process_events()
self.assertEqual(self.widget._get_control_value(), 'two')
self.assertEqual(self.widget._get_control_text(), 'two')
def test_combo_field_set(self):
self._create_widget_control()
with self.assertTraitChanges(self.widget, 'value', count=1):
self.widget._set_control_value('two')
self.gui.process_events()
self.assertEqual(self.widget.value, 'two')
def test_combo_field_formatter(self):
self.widget.formatter = str
self.widget.values = [0, 1, 2, 3]
self._create_widget_control()
self.widget.value = 2
self.gui.process_events()
self.assertEqual(self.widget._get_control_value(), 2)
self.assertEqual(self.widget._get_control_text(), '2')
def test_combo_field_formatter_changed(self):
self.widget.values = [1, 2, 3, 4]
self.widget.value = 2
self.widget.formatter = str
self._create_widget_control()
self.widget.formatter = 'Number {}'.format
self.gui.process_events()
self.assertEqual(self.widget._get_control_value(), 2)
self.assertEqual(self.widget._get_control_text(), 'Number 2')
def test_combo_field_formatter_set(self):
self.widget.values = [1, 2, 3, 4]
self.widget.formatter = str
self._create_widget_control()
with self.assertTraitChanges(self.widget, 'value', count=1):
self.widget._set_control_value(2)
self.gui.process_events()
self.assertEqual(self.widget.value, 2)
def test_combo_field_icon_formatter(self):
image = ImageResource('question')
self.widget.values = [1, 2, 3, 4]
self.widget.formatter = (lambda x: (image, str(x)))
self._create_widget_control()
self.widget.value = 2
self.gui.process_events()
self.assertEqual(self.widget._get_control_value(), 2)
self.assertEqual(self.widget._get_control_text(), '2')
def test_combo_field_values(self):
self._create_widget_control()
self.widget.values = ['four', 'five', 'one', 'six']
self.gui.process_events()
self.assertIn(self.widget.value, {'one', 'four'}) |
def add_dataset(dataset, name='', **kwargs):
if isinstance(dataset, (tvtk.DataSet, vtk.vtkDataSet)):
d = VTKDataSource()
d.data = tvtk.to_tvtk(dataset)
elif isinstance(dataset, (tvtk.DataObject, vtk.vtkDataObject)):
d = VTKObjectSource()
tp = tvtk.TrivialProducer()
tp.set_output(tvtk.to_tvtk(dataset))
d.object = tp
elif isinstance(dataset, (tvtk.Object, vtk.vtkObject)):
d = VTKObjectSource()
d.object = tvtk.to_tvtk(dataset)
elif isinstance(dataset, Source):
d = dataset
else:
raise TypeError('first argument should be either a TVTK object or a mayavi source')
if (len(name) > 0):
d.name = name
engine = _get_engine_from_kwarg(kwargs)
if (engine is None):
return d
engine.add_source(d)
return d |
class OptionSeriesArcdiagramDatalabels(Options):
def align(self):
return self._config_get('undefined')
def align(self, text: str):
self._config(text, js_type=False)
def allowOverlap(self):
return self._config_get(False)
def allowOverlap(self, flag: bool):
self._config(flag, js_type=False)
def animation(self) -> 'OptionSeriesArcdiagramDatalabelsAnimation':
return self._config_sub_data('animation', OptionSeriesArcdiagramDatalabelsAnimation)
def backgroundColor(self):
return self._config_get('none')
def backgroundColor(self, text: str):
self._config(text, js_type=False)
def borderColor(self):
return self._config_get(None)
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderRadius(self):
return self._config_get(0)
def borderRadius(self, num: float):
self._config(num, js_type=False)
def borderWidth(self):
return self._config_get(0)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def crop(self):
return self._config_get(False)
def crop(self, flag: bool):
self._config(flag, js_type=False)
def defer(self):
return self._config_get(True)
def defer(self, flag: bool):
self._config(flag, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def filter(self) -> 'OptionSeriesArcdiagramDatalabelsFilter':
return self._config_sub_data('filter', OptionSeriesArcdiagramDatalabelsFilter)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get(None)
def formatter(self, value: Any):
self._config(value, js_type=False)
def inside(self):
return self._config_get(True)
def inside(self, flag: bool):
self._config(flag, js_type=False)
def linkTextPath(self) -> 'OptionSeriesArcdiagramDatalabelsLinktextpath':
return self._config_sub_data('linkTextPath', OptionSeriesArcdiagramDatalabelsLinktextpath)
def nodeFormat(self):
return self._config_get('undefined')
def nodeFormat(self, text: str):
self._config(text, js_type=False)
def nodeFormatter(self):
return self._config_get(None)
def nodeFormatter(self, value: Any):
self._config(value, js_type=False)
def nullFormat(self):
return self._config_get(None)
def nullFormat(self, flag: bool):
self._config(flag, js_type=False)
def nullFormatter(self):
return self._config_get(None)
def nullFormatter(self, value: Any):
self._config(value, js_type=False)
def overflow(self):
return self._config_get('justify')
def overflow(self, text: str):
self._config(text, js_type=False)
def padding(self):
return self._config_get(5)
def padding(self, num: float):
self._config(num, js_type=False)
def position(self):
return self._config_get('center')
def position(self, text: str):
self._config(text, js_type=False)
def rotation(self):
return self._config_get(0)
def rotation(self, num: float):
self._config(num, js_type=False)
def shadow(self):
return self._config_get(False)
def shadow(self, flag: bool):
self._config(flag, js_type=False)
def shape(self):
return self._config_get('square')
def shape(self, text: str):
self._config(text, js_type=False)
def style(self):
return self._config_get(None)
def style(self, value: Any):
self._config(value, js_type=False)
def textPath(self) -> 'OptionSeriesArcdiagramDatalabelsTextpath':
return self._config_sub_data('textPath', OptionSeriesArcdiagramDatalabelsTextpath)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False)
def verticalAlign(self):
return self._config_get('undefined')
def verticalAlign(self, text: str):
self._config(text, js_type=False)
def x(self):
return self._config_get(0)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get('undefined')
def y(self, num: float):
self._config(num, js_type=False)
def zIndex(self):
return self._config_get(6)
def zIndex(self, num: float):
self._config(num, js_type=False) |
class OptionSonificationGlobaltracksMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def value(self):
return self._config_get(None)
def value(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def SemiCoarsenedExtrudedHierarchy(base_mesh, height, nref=1, base_layer=(- 1), refinement_ratio=2, layers=None, kernel=None, extrusion_type='uniform', gdim=None, mesh_builder=firedrake.ExtrudedMesh):
if (not isinstance(base_mesh, firedrake.mesh.MeshGeometry)):
raise ValueError(f'Can only extruded a mesh, not a {type(base_mesh)}')
base_mesh.init()
if base_mesh.cell_set._extruded:
raise ValueError('Base mesh must not be extruded')
if (layers is None):
if (base_layer == (- 1)):
raise ValueError('Must specify number of layers for coarsest grid with base_layer=N')
layers = [(base_layer * (refinement_ratio ** idx)) for idx in range((nref + 1))]
else:
if (base_layer != (- 1)):
raise ValueError("Can't specify both layers and base_layer")
if (len(layers) == (nref + 1)):
raise ValueError(f'Need to provide a number of layers for every refined mesh. Got {len(layers)}, needed {(nref + 1)}')
meshes = [mesh_builder(base_mesh, layer, kernel=kernel, layer_height=(height / layer), extrusion_type=extrusion_type, gdim=gdim) for layer in layers]
refinements_per_level = 1
identity = np.arange(base_mesh.cell_set.size, dtype=IntType).reshape((- 1), 1)
coarse_to_fine_cells = dict(((Fraction(i, refinements_per_level), identity) for i in range(nref)))
fine_to_coarse_cells = dict(((Fraction((i + 1), refinements_per_level), identity) for i in range(nref)))
return HierarchyBase(meshes, coarse_to_fine_cells, fine_to_coarse_cells, refinements_per_level=refinements_per_level, nested=True) |
class KvmManageController(VmController):
def __init__(self):
import libvirt
self.libvirt = libvirt
def stop_vm(self, vm_name):
with self.libvirt.open(None) as conn:
vm = conn.lookupByName(vm_name)
vm.destroy()
def set_snapshot(self, vm_name, snapshot_name):
with self.libvirt.open(None) as conn:
vm = conn.lookupByName(vm_name)
snapshot = vm.snapshotLookupByName(snapshot_name)
vm.revertToSnapshot(snapshot)
def start_vm(self, vm_name):
pass |
class StateStorageBase():
def __init__(self) -> None:
pass
async def set_data(self, chat_id, user_id, key, value):
raise NotImplementedError
async def get_data(self, chat_id, user_id):
raise NotImplementedError
async def set_state(self, chat_id, user_id, state):
raise NotImplementedError
async def delete_state(self, chat_id, user_id):
raise NotImplementedError
async def reset_data(self, chat_id, user_id):
raise NotImplementedError
async def get_state(self, chat_id, user_id):
raise NotImplementedError
async def save(self, chat_id, user_id, data):
raise NotImplementedError |
class TestUnreadTopicsTag(BaseTrackingTagsTestCase):
def test_can_determine_unread_forums(self):
def get_rendered(topics, user):
request = self.get_request()
request.user = user
ForumPermissionMiddleware((lambda r: HttpResponse('Response'))).process_request(request)
t = Template((self.loadstatement + '{% get_unread_topics topics request.user as unread_topics %}'))
c = Context({'topics': topics, 'request': request})
rendered = t.render(c)
return (c, rendered)
(context, rendered) = get_rendered(self.forum_2.topics.all(), self.u2)
assert (rendered == '')
assert (set(context['unread_topics']) == set(self.forum_2.topics.all()))
TopicReadTrackFactory.create(topic=self.forum_2_topic, user=self.u2)
(context, rendered) = get_rendered(self.forum_2.topics.all(), self.u2)
assert (rendered == '')
assert (not len(context['unread_topics']))
PostFactory.create(topic=self.forum_2_topic, poster=self.u1)
(context, rendered) = get_rendered(self.forum_2.topics.all(), self.u2)
assert (rendered == '')
assert (set(context['unread_topics']) == set(self.forum_2.topics.all()))
ForumReadTrackFactory.create(forum=self.forum_1, user=self.u2)
PostFactory.create(topic=self.forum_1_topic, poster=self.u1)
(context, rendered) = get_rendered(self.forum_1.topics.all(), self.u2)
assert (rendered == '')
assert (set(context['unread_topics']) == set(self.forum_1.topics.all())) |
def process_location(location: str, data_dir: Path, split_path: Path, token: str, cfg: DictConfig, generate_tiles: bool=False):
params = location_to_params[location]
bbox = params['bbox']
projection = Projection(*bbox.center)
splits = json.loads(split_path.read_text())
image_ids = [i for split in splits.values() for i in split[location]]
loc_dir = (data_dir / location)
infos_dir = (loc_dir / 'image_infos')
raw_image_dir = (loc_dir / 'images_raw')
out_image_dir = (loc_dir / 'images')
for d in (infos_dir, raw_image_dir, out_image_dir):
d.mkdir(parents=True, exist_ok=True)
downloader = MapillaryDownloader(token)
loop = asyncio.get_event_loop()
logger.info('Fetching metadata for all images.')
(image_infos, num_fail) = loop.run_until_complete(fetch_image_infos(image_ids, downloader, infos_dir))
logger.info('%d failures (%.1f%%).', num_fail, ((100 * num_fail) / len(image_ids)))
logger.info('Fetching image pixels.')
image_urls = [(i, info['thumb_2048_url']) for (i, info) in image_infos.items()]
num_fail = loop.run_until_complete(fetch_images_pixels(image_urls, downloader, raw_image_dir))
logger.info('%d failures (%.1f%%).', num_fail, ((100 * num_fail) / len(image_urls)))
seq_to_image_ids = defaultdict(list)
for (i, info) in image_infos.items():
seq_to_image_ids[info['sequence']].append(i)
seq_to_image_ids = dict(seq_to_image_ids)
dump = {}
for seq_image_ids in tqdm(seq_to_image_ids.values()):
dump.update(process_sequence(seq_image_ids, image_infos, projection, cfg, raw_image_dir, out_image_dir))
write_json((loc_dir / 'dump.json'), dump)
view_ids = []
views_latlon = []
for seq in dump:
for (view_id, view) in dump[seq]['views'].items():
view_ids.append(view_id)
views_latlon.append(view['latlong'])
views_latlon = np.stack(views_latlon)
view_ids = np.array(view_ids)
views_xy = projection.project(views_latlon)
tiles_path = (loc_dir / MapillaryDataModule.default_cfg['tiles_filename'])
if generate_tiles:
logger.info('Creating the map tiles.')
bbox_data = BoundaryBox(views_xy.min(0), views_xy.max(0))
bbox_tiling = (bbox_data + cfg.tiling.margin)
osm_dir = (data_dir / 'osm')
osm_path = (osm_dir / params['osm_file'])
if (not osm_path.exists()):
logger.info('Downloading OSM raw data.')
download_file((DATA_URL + f"/osm/{params['osm_file']}"), osm_path)
if (not osm_path.exists()):
raise FileNotFoundError(f'Cannot find OSM data file {osm_path}.')
tile_manager = TileManager.from_bbox(projection, bbox_tiling, cfg.tiling.ppm, tile_size=cfg.tiling.tile_size, path=osm_path)
tile_manager.save(tiles_path)
else:
logger.info('Downloading pre-generated map tiles.')
download_file((DATA_URL + f'/tiles/{location}.pkl'), tiles_path)
tile_manager = TileManager.load(tiles_path)
plotter = GeoPlotter()
view_ids_val = set(splits['val'][location])
is_val = np.array([(int(i.rsplit('_', 1)[0]) in view_ids_val) for i in view_ids])
plotter.points(views_latlon[(~ is_val)], 'red', view_ids[(~ is_val)], 'train')
plotter.points(views_latlon[is_val], 'green', view_ids[is_val], 'val')
plotter.bbox(bbox, 'blue', 'query bounding box')
plotter.bbox(projection.unproject(tile_manager.bbox), 'black', 'tiling bounding box')
geo_viz_path = (loc_dir / f'split_{location}.html')
plotter.fig.write_html(geo_viz_path)
logger.info('Wrote split visualization to %s.', geo_viz_path)
shutil.rmtree(raw_image_dir)
logger.info('Done processing for location %s.', location) |
class TestSplitConverter(AITTestCase):
([[[2, 10], [2, 3, 5]], [[2, 10], 2], [[2, 10], 3]])
def test_with_dim(self, input_shape: List[int], split_size_or_sections: Union[(int, List[int])]) -> None:
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.split(x, split_size_or_sections, dim=1)
model = TestModule().cuda()
inputs = [torch.randn(*input_shape).half().cuda()]
self.run_test(model, inputs, expected_ops={ait_acc_ops.split})
([[[10], [2, 3, 5]], [[10], 2], [[10], 3]])
def test_without_dim(self, input_shape: List[int], split_size_or_sections: Union[(int, List[int])]) -> None:
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.split(x, split_size_or_sections)
model = TestModule().cuda()
inputs = [torch.randn(*input_shape).half().cuda()]
self.run_test(model, inputs, expected_ops={ait_acc_ops.split})
([[[2, 10], [2, 3, 5]], [[2, 10], 2], [[2, 10], 3]])
def test_tensor_split_with_dim(self, input_shape: List[int], split_size_or_sections: Union[(int, List[int])]) -> None:
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.split(split_size_or_sections, dim=1)
model = TestModule().cuda()
inputs = [torch.randn(*input_shape).half().cuda()]
self.run_test(model, inputs, expected_ops={ait_acc_ops.split})
([[[10], [2, 3, 5]], [[10], 2], [[10], 3]])
def test_tensor_split_without_dim(self, input_shape: List[int], split_size_or_sections: Union[(int, List[int])]) -> None:
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.split(split_size_or_sections)
model = TestModule().cuda()
inputs = [torch.randn(*input_shape).half().cuda()]
self.run_test(model, inputs, expected_ops={ait_acc_ops.split})
def test_with_dim_dynamic_shape(self) -> None:
class TestModule(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.split(x, 2, dim=1)
model = TestModule().cuda()
inputs_spec = TensorSpec.create_spec_from_shapes(inputs_min=[[2, 10]], inputs_max=[[20, 10]], dtype_list=[torch.float16])
self.run_test_with_dynamic_shape(model, inputs_spec, expected_ops={ait_acc_ops.split}) |
(return_object=_return_object_docstring, return_boundaries=_return_boundaries_docstring, precision=_precision_docstring, binner_dict_=_binner_dict_docstring, fit=_fit_discretiser_docstring, transform=_transform_discretiser_docstring, variables=_variables_numerical_docstring, variables_=_variables_attribute_docstring, feature_names_in_=_feature_names_in_docstring, n_features_in_=_n_features_in_docstring, fit_transform=_fit_transform_docstring, power='{1/n}', subindex='{i+1}')
class GeometricWidthDiscretiser(BaseDiscretiser):
def __init__(self, variables: Union[(None, int, str, List[Union[(str, int)]])]=None, bins: int=10, return_object: bool=False, return_boundaries: bool=False, precision: int=7):
if (not isinstance(bins, int)):
raise ValueError(f'bins must be an integer. Got {bins} instead.')
super().__init__(return_object, return_boundaries, precision)
self.bins = bins
self.variables = _check_variables_input_value(variables)
def fit(self, X: pd.DataFrame, y: Optional[pd.Series]=None):
X = super().fit(X)
self.binner_dict_ = {}
for var in self.variables_:
(min_, max_) = (X[var].min(), X[var].max())
increment = np.power((max_ - min_), (1.0 / self.bins))
bins = np.r_[((- np.inf), (min_ + np.power(increment, np.arange(1, self.bins))), np.inf)]
bins = np.sort(bins)
bins = list(bins)
self.binner_dict_[var] = bins
return self |
def check_sequential(speed_model):
timing_keywords = {'setup': 'setup', 'remov': 'removal', 'hold': 'hold', 'recov': 'recovery', 'removal': 'removal', 'recovery': 'recovery'}
tmp = speed_model.split('_')
for keyword in sorted(timing_keywords):
if (keyword in tmp):
return [keyword, timing_keywords[keyword]]
return None |
class OptionSeriesFunnel3dData(Options):
def accessibility(self) -> 'OptionSeriesFunnel3dDataAccessibility':
return self._config_sub_data('accessibility', OptionSeriesFunnel3dDataAccessibility)
def borderColor(self):
return self._config_get(None)
def borderColor(self, text: str):
self._config(text, js_type=False)
def borderWidth(self):
return self._config_get(None)
def borderWidth(self, num: float):
self._config(num, js_type=False)
def className(self):
return self._config_get(None)
def className(self, text: str):
self._config(text, js_type=False)
def color(self):
return self._config_get(None)
def color(self, text: str):
self._config(text, js_type=False)
def colorIndex(self):
return self._config_get(None)
def colorIndex(self, num: float):
self._config(num, js_type=False)
def custom(self):
return self._config_get(None)
def custom(self, value: Any):
self._config(value, js_type=False)
def dashStyle(self):
return self._config_get(None)
def dashStyle(self, text: str):
self._config(text, js_type=False)
def dataLabels(self) -> 'OptionSeriesFunnel3dDataDatalabels':
return self._config_sub_data('dataLabels', OptionSeriesFunnel3dDataDatalabels)
def description(self):
return self._config_get(None)
def description(self, text: str):
self._config(text, js_type=False)
def dragDrop(self) -> 'OptionSeriesFunnel3dDataDragdrop':
return self._config_sub_data('dragDrop', OptionSeriesFunnel3dDataDragdrop)
def drilldown(self):
return self._config_get(None)
def drilldown(self, text: str):
self._config(text, js_type=False)
def events(self) -> 'OptionSeriesFunnel3dDataEvents':
return self._config_sub_data('events', OptionSeriesFunnel3dDataEvents)
def gradientForSides(self):
return self._config_get(None)
def gradientForSides(self, flag: bool):
self._config(flag, js_type=False)
def id(self):
return self._config_get(None)
def id(self, text: str):
self._config(text, js_type=False)
def labelrank(self):
return self._config_get(None)
def labelrank(self, num: float):
self._config(num, js_type=False)
def name(self):
return self._config_get(None)
def name(self, text: str):
self._config(text, js_type=False)
def pointWidth(self):
return self._config_get('undefined')
def pointWidth(self, num: float):
self._config(num, js_type=False)
def selected(self):
return self._config_get(False)
def selected(self, flag: bool):
self._config(flag, js_type=False)
def x(self):
return self._config_get(None)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(None)
def y(self, num: float):
self._config(num, js_type=False) |
def _get_files_as_regularsurfaces_thread(option=1):
surfs = []
with concurrent.futures.ThreadPoolExecutor(max_workers=NTHREAD) as executor:
if (option == 1):
futures = {executor.submit(_get_regsurff, i): i for i in range(NTHREAD)}
else:
futures = {executor.submit(_get_regsurfi, i): i for i in range(NTHREAD)}
for future in concurrent.futures.as_completed(futures):
try:
surf = future.result()
except Exception as exc:
logger.error('Error: %s', exc)
else:
surfs.append(surf)
return xtgeo.Surfaces(surfs) |
def run_task(task, partial_config, logging_handler):
loggers = ['roamer']
for logger_name in loggers:
logger = logging.getLogger(logger_name)
logger.addHandler(logging_handler)
loaded_base_config = importlib.import_module(task['config'])
config = appy_partial_on_base_config(loaded_base_config, partial_config)
roamer = RoAMer(config, task['headless'], task['vm'], task['snapshot'], task['ident'])
roamer.run(task['sample'], output_folder=task['output_folder'])
for logger_name in loggers:
logger = logging.getLogger(logger_name)
logger.removeHandler(logging_handler) |
class RunLogger(ErsiliaBase):
def __init__(self, model_id, config_json):
ErsiliaBase.__init__(self, config_json=config_json, credentials_json=None)
self.model_id = model_id
self.ersilia_runs_folder = os.path.join(EOS, ERSILIA_RUNS_FOLDER)
if (not os.path.exists(self.ersilia_runs_folder)):
os.mkdir(self.ersilia_runs_folder)
self.metadata_folder = os.path.join(self.ersilia_runs_folder, 'metadata')
if (not os.path.exists(self.metadata_folder)):
os.mkdir(self.metadata_folder)
self.lake_folder = os.path.join(self.ersilia_runs_folder, 'lake')
if (not os.path.exists(self.lake_folder)):
os.mkdir(self.lake_folder)
self.logs_folder = os.path.join(self.ersilia_runs_folder, 'logs')
if (not os.path.exists(self.logs_folder)):
os.mkdir(self.logs_folder)
self.tabular_result_logger = TabularResultLogger()
def log_result(self, result):
output_dir = os.path.join(self.lake_folder, self.model_id)
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
file_name = os.path.join(output_dir, '{0}_lake.csv'.format(self.model_id))
tabular_result = self.tabular_result_logger.tabulate(result)
if (tabular_result is None):
return
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
for r in tabular_result:
writer.writerow(r)
def log_meta(self, meta):
output_dir = os.path.join(self.metadata_folder, self.model_id)
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
file_name = os.path.join(output_dir, '{0}.json'.format(self.model_id))
with open(file_name, 'w') as f:
json.dump(meta, f)
def log_logs(self):
output_dir = os.path.join(self.logs_folder, self.model_id)
if (not os.path.exists(output_dir)):
os.mkdir(output_dir)
file_name = os.path.join(output_dir, '{0}.log'.format(self.model_id))
session_file = os.path.join(EOS, 'session.json')
shutil.copyfile(session_file, file_name)
def log(self, result, meta):
self.log_result(result)
self.log_meta(meta)
self.log_logs() |
.slow
.skipif((not GPU_TESTS_ENABLED), reason='requires GPU')
def test_generate_sample(falcon_generator):
prompts = ['What is spaCy?', 'What is spaCy?']
torch.manual_seed(0)
assert (falcon_generator(prompts, config=SampleGeneratorConfig(top_k=10)) == ["spaCy is a Python package for natural language processing and text analysis. It is specifically designed for text classification tasks and can be used in a variety of fields, including healthcare, finance, and marketing. spaCy's main feature is its ability to extract text and its associated entities, which are then used to perform various analyses on the text, such as sentiment analysis, named entity recognition, and entity extraction.", "spaCy is a library for natural language processing in Python. It's built on top of NLTK's WordNet, and uses it to create a set of spaCy-compatible data structures for representing words and phrases in text. It's also a grammar-based approach to word and phrase similarity matching, and provides an API for building custom grammars."])
torch.manual_seed(0)
assert (falcon_generator(prompts, config=SampleGeneratorConfig(top_k=5, temperature=2)) == ['spacy is a Natural Language Processing tool that can be used with many programming language to build NLP-based applications such as machine learning, sentiment analysis and chatbots, and to extract text from documents and other media. spacy uses the Stanford NLP model to learn from and generate text. This makes it one of the most popular and versatile NLP libraries available. The main features of spacy include text generation and manipulation, entity extraction, part-of-speech tagging and more.', "spaCy is a library for natural language processing in Scala. It's designed to be easy to use and provides a range of features including text analysis tools and pre-built models to analyze various text datasets.</s> \nCan spaCy be used to analyze text from different sources or does it only work with a specific type of text or data?</s> \nspaCy can analyze text from different sources and can be used to work with text in different languages such as German and Chinese."]) |
def extractSteadierTranslations(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def test_grid_from_file_warns(tmp_path, any_grid):
if (version.parse(xtgeo_version) < version.parse('2.16')):
pytest.skip()
any_grid.to_file((tmp_path / 'grid.roff'), fformat='roff')
with pytest.warns(DeprecationWarning, match='from_file is deprecated'):
any_grid.from_file((tmp_path / 'grid.roff'), fformat='roff') |
def get_drug_shortage_map(medication_orders, warehouse):
drug_requirement = dict()
for d in medication_orders:
if (not drug_requirement.get(d.drug_code)):
drug_requirement[d.drug_code] = 0
drug_requirement[d.drug_code] += flt(d.dosage)
drug_shortage = dict()
for (drug, required_qty) in drug_requirement.items():
available_qty = get_latest_stock_qty(drug, warehouse)
if (flt(required_qty) > flt(available_qty)):
drug_shortage[drug] = flt((flt(required_qty) - flt(available_qty)))
return drug_shortage |
class CRUDUser(CRUDBase[(User, RegisterUser, UpdateUser)]):
async def get(self, db: AsyncSession, user_id: int) -> (User | None):
return (await self.get_(db, pk=user_id))
async def get_by_username(self, db: AsyncSession, username: str) -> (User | None):
user = (await db.execute(select(self.model).where((self.model.username == username))))
return user.scalars().first()
async def get_by_nickname(self, db: AsyncSession, nickname: str) -> (User | None):
user = (await db.execute(select(self.model).where((self.model.nickname == nickname))))
return user.scalars().first()
async def update_login_time(self, db: AsyncSession, username: str, login_time: datetime) -> int:
user = (await db.execute(update(self.model).where((self.model.username == username)).values(last_login_time=login_time)))
(await db.commit())
return user.rowcount
async def create(self, db: AsyncSession, obj: RegisterUser) -> None:
salt = text_captcha(5)
obj.password = (await jwt.get_hash_password((obj.password + salt)))
dict_obj = obj.dict()
dict_obj.update({'salt': salt})
new_user = self.model(**dict_obj)
db.add(new_user)
async def add(self, db: AsyncSession, obj: AddUser) -> None:
salt = text_captcha(5)
obj.password = (await jwt.get_hash_password((obj.password + salt)))
dict_obj = obj.dict(exclude={'roles'})
dict_obj.update({'salt': salt})
new_user = self.model(**dict_obj)
role_list = []
for role_id in obj.roles:
role_list.append((await db.get(Role, role_id)))
new_user.roles.extend(role_list)
db.add(new_user)
async def update_userinfo(self, db: AsyncSession, input_user: User, obj: UpdateUser) -> int:
user = (await db.execute(update(self.model).where((self.model.id == input_user.id)).values(**obj.dict())))
return user.rowcount
async def update_role(db: AsyncSession, input_user: User, obj: UpdateUserRole) -> None:
for i in list(input_user.roles):
input_user.roles.remove(i)
role_list = []
for role_id in obj.roles:
role_list.append((await db.get(Role, role_id)))
input_user.roles.extend(role_list)
async def update_avatar(self, db: AsyncSession, current_user: User, avatar: Avatar) -> int:
user = (await db.execute(update(self.model).where((self.model.id == current_user.id)).values(avatar=avatar.url)))
return user.rowcount
async def delete(self, db: AsyncSession, user_id: int) -> int:
return (await self.delete_(db, user_id))
async def check_email(self, db: AsyncSession, email: str) -> (User | None):
mail = (await db.execute(select(self.model).where((self.model.email == email))))
return mail.scalars().first()
async def reset_password(self, db: AsyncSession, pk: int, password: str, salt: str) -> int:
user = (await db.execute(update(self.model).where((self.model.id == pk)).values(password=(await jwt.get_hash_password((password + salt))))))
return user.rowcount
async def get_all(self, dept: int=None, username: str=None, phone: str=None, status: int=None) -> Select:
se = select(self.model).options(selectinload(self.model.dept)).options(selectinload(self.model.roles).selectinload(Role.menus)).order_by(desc(self.model.join_time))
where_list = []
if dept:
where_list.append((self.model.dept_id == dept))
if username:
where_list.append(self.model.username.like(f'%{username}%'))
if phone:
where_list.append(self.model.phone.like(f'%{phone}%'))
if (status is not None):
where_list.append((self.model.status == status))
if where_list:
se = se.where(and_(*where_list))
return se
async def get_super(self, db: AsyncSession, user_id: int) -> bool:
user = (await self.get(db, user_id))
return user.is_superuser
async def get_staff(self, db: AsyncSession, user_id: int) -> bool:
user = (await self.get(db, user_id))
return user.is_staff
async def get_status(self, db: AsyncSession, user_id: int) -> bool:
user = (await self.get(db, user_id))
return user.status
async def get_multi_login(self, db: AsyncSession, user_id: int) -> bool:
user = (await self.get(db, user_id))
return user.is_multi_login
async def set_super(self, db: AsyncSession, user_id: int) -> int:
super_status = (await self.get_super(db, user_id))
user = (await db.execute(update(self.model).where((self.model.id == user_id)).values(is_superuser=(False if super_status else True))))
return user.rowcount
async def set_staff(self, db: AsyncSession, user_id: int) -> int:
staff_status = (await self.get_staff(db, user_id))
user = (await db.execute(update(self.model).where((self.model.id == user_id)).values(is_staff=(False if staff_status else True))))
return user.rowcount
async def set_status(self, db: AsyncSession, user_id: int) -> int:
status = (await self.get_status(db, user_id))
user = (await db.execute(update(self.model).where((self.model.id == user_id)).values(status=(False if status else True))))
return user.rowcount
async def set_multi_login(self, db: AsyncSession, user_id: int) -> int:
multi_login = (await self.get_multi_login(db, user_id))
user = (await db.execute(update(self.model).where((self.model.id == user_id)).values(is_multi_login=(False if multi_login else True))))
return user.rowcount
async def get_with_relation(self, db: AsyncSession, *, user_id: int=None, username: str=None) -> (User | None):
where = []
if user_id:
where.append((self.model.id == user_id))
if username:
where.append((self.model.username == username))
user = (await db.execute(select(self.model).options(selectinload(self.model.dept)).options(selectinload(self.model.roles).joinedload(Role.menus)).where(*where)))
return user.scalars().first() |
class TestFormatter(unittest.TestCase):
def test_get_format(self) -> None:
f = Formatter.get_format('123/ddd/ddd{{sdd}}/444')
self.assertEqual('123/ddd', f[0])
self.assertEqual('ddd{{sdd}}/444', f[1])
f = Formatter.get_format('123/ddd/ddd{sdd}}/444')
self.assertEqual('123/ddd/ddd{sdd}}/444', f[0])
self.assertEqual(None, f[1])
f = Formatter.get_format('ddd{{sdd}}/444')
self.assertEqual('ddd{{sdd}}/444', f[0])
self.assertEqual(None, f[1])
def test_format(self) -> None:
self.assertEqual('10', Formatter.format('{{a}}', {'a': 10}))
self.assertEqual('10', Formatter.format('{{x}}', 10))
self.assertEqual('3', Formatter.format('{{x[2]}}', [1, 2, 3]))
self.assertEqual('2.0', Formatter.format('{{a/5}}', {'a': 10}))
self.assertEqual('15', Formatter.format('{{a+b}}', {'a': 10, 'b': 5}))
self.assertEqual('1.2 MB', Formatter.format('{{a/1000000}} MB', {'a': 1200000}))
self.assertEqual('1 MB', Formatter.format('{{a|MB}}', {'a': 1200000}))
def test_format_uptime(self) -> None:
import time
n = int(time.time())
self.assertEqual('0 min', Formatter.format('{{x|uptime}}', n))
self.assertEqual('1 min', Formatter.format('{{x|uptime}}', (n - 60)))
self.assertEqual('1:00', Formatter.format('{{x|uptime}}', (n - ((1 * 60) * 60))))
self.assertEqual('1:40', Formatter.format('{{x|uptime}}', ((n - ((1 * 60) * 60)) - (40 * 60))))
self.assertEqual('1 day, 0 min', Formatter.format('{{x|uptime}}', (n - ((24 * 60) * 60))))
self.assertEqual('1 day, 40 min', Formatter.format('{{x|uptime}}', ((n - ((24 * 60) * 60)) - (40 * 60))))
self.assertEqual('1 day, 1:40', Formatter.format('{{x|uptime}}', ((n - ((25 * 60) * 60)) - (40 * 60))))
self.assertEqual('2 days, 1:40', Formatter.format('{{x|uptime}}', ((n - ((49 * 60) * 60)) - (40 * 60))))
self.assertEqual('2 days, 1:39', Formatter.format('{{x|uptime}}', (((n - ((49 * 60) * 60)) - (40 * 60)) + 30)))
self.assertEqual('2 days, 1:40', Formatter.format('{{x|uptime}}', (((n - ((49 * 60) * 60)) - (40 * 60)) - 30))) |
def test_union_records():
schema = {'name': 'test_name', 'namespace': 'test', 'type': 'record', 'fields': [{'name': 'val', 'type': [{'name': 'a', 'namespace': 'common', 'type': 'record', 'fields': [{'name': 'x', 'type': 'int'}, {'name': 'y', 'type': 'int'}]}, {'name': 'b', 'namespace': 'common', 'type': 'record', 'fields': [{'name': 'x', 'type': 'int'}, {'name': 'y', 'type': 'int'}, {'name': 'z', 'type': ['null', 'int']}]}]}]}
data = [{'val': {'x': 3, 'y': 4, 'z': 5}}]
assert (data == roundtrip(schema, data)) |
def example():
return ft.Card(content=ft.Container(content=ft.Column([ft.ListTile(leading=ft.Icon(ft.icons.ALBUM), title=ft.Text('The Enchanted Nightingale'), subtitle=ft.Text('Music by Julie Gable. Lyrics by Sidney Stein.')), ft.Row([ft.TextButton('Buy tickets'), ft.TextButton('Listen')], alignment=ft.MainAxisAlignment.END)]), width=400, padding=10)) |
class TestCcrStatsRecorder():
java_signed_maxlong = ((2 ** 63) - 1)
def test_raises_exception_on_transport_error(self):
client = Client(transport_client=TransportClient(response={}, force_error=True))
cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)
with pytest.raises(exceptions.RallyError, match='A transport error occurred while collecting CCR stats from the endpoint \\[/_ccr/stats\\?filter_path=follow_stats\\] on cluster \\[remote\\]'):
telemetry.CcrStatsRecorder(cluster_name='remote', client=client, metrics_store=metrics_store, sample_interval=1).record()
('esrally.metrics.EsMetricsStore.put_doc')
def test_stores_default_ccr_stats(self, metrics_store_put_doc):
java_signed_maxlong = self.java_signed_maxlong
shard_id = random.randint(0, 999)
remote_cluster = 'leader_cluster'
leader_index = 'leader'
follower_index = 'follower'
leader_global_checkpoint = random.randint(0, java_signed_maxlong)
leader_max_seq_no = random.randint(0, java_signed_maxlong)
follower_global_checkpoint = random.randint(0, java_signed_maxlong)
follower_max_seq_no = random.randint(0, java_signed_maxlong)
last_requested_seq_no = random.randint(0, java_signed_maxlong)
outstanding_read_requests = random.randint(0, java_signed_maxlong)
outstanding_write_requests = random.randint(0, java_signed_maxlong)
write_buffer_operation_count = random.randint(0, java_signed_maxlong)
follower_mapping_version = random.randint(0, java_signed_maxlong)
total_read_time_millis = random.randint(0, java_signed_maxlong)
total_read_remote_exec_time_millis = random.randint(0, java_signed_maxlong)
successful_read_requests = random.randint(0, java_signed_maxlong)
failed_read_requests = random.randint(0, java_signed_maxlong)
operations_read = random.randint(0, java_signed_maxlong)
bytes_read = random.randint(0, java_signed_maxlong)
total_write_time_millis = random.randint(0, java_signed_maxlong)
successful_write_requests = random.randint(0, java_signed_maxlong)
failed_write_requests = random.randint(0, java_signed_maxlong)
operations_written = random.randint(0, java_signed_maxlong)
read_exceptions = []
time_since_last_read_millis = random.randint(0, java_signed_maxlong)
ccr_stats_follower_response = {'auto_follow_stats': {'number_of_failed_follow_indices': 0, 'number_of_failed_remote_cluster_state_requests': 0, 'number_of_successful_follow_indices': 0, 'recent_auto_follow_errors': []}, 'follow_stats': {'indices': [{'index': follower_index, 'shards': [{'shard_id': shard_id, 'remote_cluster': remote_cluster, 'leader_index': leader_index, 'follower_index': follower_index, 'leader_global_checkpoint': leader_global_checkpoint, 'leader_max_seq_no': leader_max_seq_no, 'follower_global_checkpoint': follower_global_checkpoint, 'follower_max_seq_no': follower_max_seq_no, 'last_requested_seq_no': last_requested_seq_no, 'outstanding_read_requests': outstanding_read_requests, 'outstanding_write_requests': outstanding_write_requests, 'write_buffer_operation_count': write_buffer_operation_count, 'follower_mapping_version': follower_mapping_version, 'total_read_time_millis': total_read_time_millis, 'total_read_remote_exec_time_millis': total_read_remote_exec_time_millis, 'successful_read_requests': successful_read_requests, 'failed_read_requests': failed_read_requests, 'operations_read': operations_read, 'bytes_read': bytes_read, 'total_write_time_millis': total_write_time_millis, 'successful_write_requests': successful_write_requests, 'failed_write_requests': failed_write_requests, 'operations_written': operations_written, 'read_exceptions': read_exceptions, 'time_since_last_read_millis': time_since_last_read_millis}]}]}}
ccr_stats_filtered_follower_response = {'follow_stats': ccr_stats_follower_response['follow_stats']}
client = Client(transport_client=TransportClient(response=ccr_stats_filtered_follower_response))
cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)
recorder = telemetry.CcrStatsRecorder(cluster_name='remote', client=client, metrics_store=metrics_store, sample_interval=1)
recorder.record()
shard_metadata = {'cluster': 'remote', 'index': follower_index}
metrics_store_put_doc.assert_called_with({'name': 'ccr-stats', 'shard': ccr_stats_filtered_follower_response['follow_stats']['indices'][0]['shards'][0]}, level=MetaInfoScope.cluster, meta_data=shard_metadata)
('esrally.metrics.EsMetricsStore.put_doc')
def test_stores_default_ccr_stats_many_shards(self, metrics_store_put_doc):
java_signed_maxlong = self.java_signed_maxlong
remote_cluster = 'leader_cluster'
leader_index = 'leader'
follower_index = 'follower'
shard_range = range(2)
leader_global_checkpoint = [random.randint(0, java_signed_maxlong) for _ in shard_range]
leader_max_seq_no = [random.randint(0, java_signed_maxlong) for _ in shard_range]
follower_global_checkpoint = [random.randint(0, java_signed_maxlong) for _ in shard_range]
follower_max_seq_no = [random.randint(0, java_signed_maxlong) for _ in shard_range]
last_requested_seq_no = [random.randint(0, java_signed_maxlong) for _ in shard_range]
outstanding_read_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
outstanding_write_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
write_buffer_operation_count = [random.randint(0, java_signed_maxlong) for _ in shard_range]
follower_mapping_version = [random.randint(0, java_signed_maxlong) for _ in shard_range]
total_read_time_millis = [random.randint(0, java_signed_maxlong) for _ in shard_range]
total_read_remote_exec_time_millis = [random.randint(0, java_signed_maxlong) for _ in shard_range]
successful_read_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
failed_read_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
operations_read = [random.randint(0, java_signed_maxlong) for _ in shard_range]
bytes_read = [random.randint(0, java_signed_maxlong) for _ in shard_range]
total_write_time_millis = [random.randint(0, java_signed_maxlong) for _ in shard_range]
successful_write_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
failed_write_requests = [random.randint(0, java_signed_maxlong) for _ in shard_range]
operations_written = [random.randint(0, java_signed_maxlong) for _ in shard_range]
read_exceptions = [[] for _ in shard_range]
time_since_last_read_millis = [random.randint(0, java_signed_maxlong) for _ in shard_range]
ccr_stats_follower_response = {'auto_follow_stats': {'number_of_failed_follow_indices': 0, 'number_of_failed_remote_cluster_state_requests': 0, 'number_of_successful_follow_indices': 0, 'recent_auto_follow_errors': []}, 'follow_stats': {'indices': [{'index': follower_index, 'shards': [{'shard_id': shard_id, 'remote_cluster': remote_cluster, 'leader_index': leader_index, 'follower_index': follower_index, 'leader_global_checkpoint': leader_global_checkpoint[shard_id], 'leader_max_seq_no': leader_max_seq_no[shard_id], 'follower_global_checkpoint': follower_global_checkpoint[shard_id], 'follower_max_seq_no': follower_max_seq_no[shard_id], 'last_requested_seq_no': last_requested_seq_no[shard_id], 'outstanding_read_requests': outstanding_read_requests[shard_id], 'outstanding_write_requests': outstanding_write_requests[shard_id], 'write_buffer_operation_count': write_buffer_operation_count[shard_id], 'follower_mapping_version': follower_mapping_version[shard_id], 'total_read_time_millis': total_read_time_millis[shard_id], 'total_read_remote_exec_time_millis': total_read_remote_exec_time_millis[shard_id], 'successful_read_requests': successful_read_requests[shard_id], 'failed_read_requests': failed_read_requests[shard_id], 'operations_read': operations_read[shard_id], 'bytes_read': bytes_read[shard_id], 'total_write_time_millis': total_write_time_millis[shard_id], 'successful_write_requests': successful_write_requests[shard_id], 'failed_write_requests': failed_write_requests[shard_id], 'operations_written': operations_written[shard_id], 'read_exceptions': read_exceptions[shard_id], 'time_since_last_read_millis': time_since_last_read_millis[shard_id]} for shard_id in shard_range]}]}}
ccr_stats_filtered_follower_response = {'follow_stats': ccr_stats_follower_response['follow_stats']}
client = Client(transport_client=TransportClient(response=ccr_stats_filtered_follower_response))
cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)
recorder = telemetry.CcrStatsRecorder('remote', client, metrics_store, 1)
recorder.record()
shard_metadata = [{'cluster': 'remote', 'index': 'follower'}, {'cluster': 'remote', 'index': 'follower'}]
metrics_store_put_doc.assert_has_calls([mock.call({'name': 'ccr-stats', 'shard': ccr_stats_filtered_follower_response['follow_stats']['indices'][0]['shards'][0]}, level=MetaInfoScope.cluster, meta_data=shard_metadata[0]), mock.call({'name': 'ccr-stats', 'shard': ccr_stats_filtered_follower_response['follow_stats']['indices'][0]['shards'][1]}, level=MetaInfoScope.cluster, meta_data=shard_metadata[1])], any_order=True)
('esrally.metrics.EsMetricsStore.put_doc')
def test_stores_filtered_ccr_stats(self, metrics_store_put_doc):
java_signed_maxlong = self.java_signed_maxlong
remote_cluster = 'leader_cluster'
leader_index1 = 'leader1'
follower_index1 = 'follower1'
leader_index2 = 'leader2'
follower_index2 = 'follower2'
leader_global_checkpoint = random.randint(0, java_signed_maxlong)
leader_max_seq_no = random.randint(0, java_signed_maxlong)
follower_global_checkpoint = random.randint(0, java_signed_maxlong)
follower_max_seq_no = random.randint(0, java_signed_maxlong)
last_requested_seq_no = random.randint(0, java_signed_maxlong)
outstanding_read_requests = random.randint(0, java_signed_maxlong)
outstanding_write_requests = random.randint(0, java_signed_maxlong)
write_buffer_operation_count = random.randint(0, java_signed_maxlong)
follower_mapping_version = random.randint(0, java_signed_maxlong)
total_read_time_millis = random.randint(0, java_signed_maxlong)
total_read_remote_exec_time_millis = random.randint(0, java_signed_maxlong)
successful_read_requests = random.randint(0, java_signed_maxlong)
failed_read_requests = random.randint(0, java_signed_maxlong)
operations_read = random.randint(0, java_signed_maxlong)
bytes_read = random.randint(0, java_signed_maxlong)
total_write_time_millis = random.randint(0, java_signed_maxlong)
successful_write_requests = random.randint(0, java_signed_maxlong)
failed_write_requests = random.randint(0, java_signed_maxlong)
operations_written = random.randint(0, java_signed_maxlong)
read_exceptions = []
time_since_last_read_millis = random.randint(0, java_signed_maxlong)
ccr_stats_follower_response = {'auto_follow_stats': {'number_of_failed_follow_indices': 0, 'number_of_failed_remote_cluster_state_requests': 0, 'number_of_successful_follow_indices': 0, 'recent_auto_follow_errors': []}, 'follow_stats': {'indices': [{'index': follower_index1, 'shards': [{'shard_id': 0, 'remote_cluster': remote_cluster, 'leader_index': leader_index1, 'follower_index': follower_index1, 'leader_global_checkpoint': leader_global_checkpoint, 'leader_max_seq_no': leader_max_seq_no, 'follower_global_checkpoint': follower_global_checkpoint, 'follower_max_seq_no': follower_max_seq_no, 'last_requested_seq_no': last_requested_seq_no, 'outstanding_read_requests': outstanding_read_requests, 'outstanding_write_requests': outstanding_write_requests, 'write_buffer_operation_count': write_buffer_operation_count, 'follower_mapping_version': follower_mapping_version, 'total_read_time_millis': total_read_time_millis, 'total_read_remote_exec_time_millis': total_read_remote_exec_time_millis, 'successful_read_requests': successful_read_requests, 'failed_read_requests': failed_read_requests, 'operations_read': operations_read, 'bytes_read': bytes_read, 'total_write_time_millis': total_write_time_millis, 'successful_write_requests': successful_write_requests, 'failed_write_requests': failed_write_requests, 'operations_written': operations_written, 'read_exceptions': read_exceptions, 'time_since_last_read_millis': time_since_last_read_millis}]}, {'index': follower_index2, 'shards': [{'shard_id': 0, 'remote_cluster': remote_cluster, 'leader_index': leader_index2, 'follower_index': follower_index2, 'leader_global_checkpoint': leader_global_checkpoint, 'leader_max_seq_no': leader_max_seq_no, 'follower_global_checkpoint': follower_global_checkpoint, 'follower_max_seq_no': follower_max_seq_no, 'last_requested_seq_no': last_requested_seq_no, 'outstanding_read_requests': outstanding_read_requests, 'outstanding_write_requests': outstanding_write_requests, 'write_buffer_operation_count': write_buffer_operation_count, 'follower_mapping_version': follower_mapping_version, 'total_read_time_millis': total_read_time_millis, 'total_read_remote_exec_time_millis': total_read_remote_exec_time_millis, 'successful_read_requests': successful_read_requests, 'failed_read_requests': failed_read_requests, 'operations_read': operations_read, 'bytes_read': bytes_read, 'total_write_time_millis': total_write_time_millis, 'successful_write_requests': successful_write_requests, 'failed_write_requests': failed_write_requests, 'operations_written': operations_written, 'read_exceptions': read_exceptions, 'time_since_last_read_millis': time_since_last_read_millis}]}]}}
ccr_stats_filtered_follower_response = {'follow_stats': ccr_stats_follower_response['follow_stats']}
client = Client(transport_client=TransportClient(response=ccr_stats_follower_response))
cfg = create_config()
metrics_store = metrics.EsMetricsStore(cfg)
recorder = telemetry.CcrStatsRecorder('remote', client, metrics_store, 1, indices=[follower_index1])
recorder.record()
shard_metadata = {'cluster': 'remote', 'index': follower_index1}
metrics_store_put_doc.assert_has_calls([mock.call({'name': 'ccr-stats', 'shard': ccr_stats_filtered_follower_response['follow_stats']['indices'][0]['shards'][0]}, level=MetaInfoScope.cluster, meta_data=shard_metadata)], any_order=True) |
def test_factorise_4():
c = factorise([{'date': ['1990-01-01/1990-01-02'], 'param': ['Z', 'T']}, {'date': ['1990-01-02/1990-01-05'], 'param': ['Z']}, {'date': ['1990-01-04/1990-01-15'], 'param': ['Z', 'T']}], intervals=['date'])
assert (_(c.to_list()) == _([{'date': ['1990-01-01/1990-01-15'], 'param': ['Z']}, {'date': ['1990-01-01/1990-01-02', '1990-01-04/1990-01-15'], 'param': ['T']}]))
assert (c.count() == 29)
assert (c.count(param='Z') == 15)
assert (c.count(date='1990-01-01') == 2)
assert (c.select(param='T').count() == 14)
assert (c.select(date='1990-01-01').count() == 2)
assert (c.select(date='1990-01-01').select(param='Z').count() == 1)
assert (_(c.select(date='1990-01-01').to_list()) == _([{'date': ['1990-01-01'], 'param': ['T', 'Z']}]))
assert (_(c.select(date='1990-01-02/1990-01-05').to_list()) == _([{'date': ['1990-01-02', '1990-01-04/1990-01-05'], 'param': ['T']}, {'date': ['1990-01-02/1990-01-05'], 'param': ['Z']}]))
E = [{'date': datetime.date(1990, 1, 2), 'param': 'T'}, {'date': datetime.date(1990, 1, 4), 'param': 'T'}, {'date': datetime.date(1990, 1, 2), 'param': 'Z'}, {'date': datetime.date(1990, 1, 3), 'param': 'Z'}, {'date': datetime.date(1990, 1, 4), 'param': 'Z'}]
for (r, e) in zip(c.select(date='1990-01-02/1990-01-04').iterate(True), E):
assert (r == e)
assert (_(c.missing(param='T', date='1990-01-01/1990-01-15').to_list()) == _([{'date': ['1990-01-03'], 'param': ['T']}])) |
class TpoolConnectionPool(DBConnectionPool):
__test__ = False
def create_pool(self, min_size=0, max_size=1, max_idle=10, max_age=10, connect_timeout=0.5, module=None):
if (module is None):
module = self._dbmodule
return db_pool.TpooledConnectionPool(module, min_size=min_size, max_size=max_size, max_idle=max_idle, max_age=max_age, connect_timeout=connect_timeout, **self._auth)
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
eventlet.tpool.killall() |
.django_db
def test_category_naics_awards(naics_test_data, monkeypatch, elasticsearch_transaction_index):
setup_elasticsearch_test(monkeypatch, elasticsearch_transaction_index)
test_payload = {'category': 'naics', 'subawards': False, 'page': 1, 'limit': 50}
spending_by_category_logic = NAICSViewSet().perform_search(test_payload, {})
expected_response = {'category': 'naics', 'limit': 50, 'page_metadata': {'page': 1, 'next': None, 'previous': None, 'hasNext': False, 'hasPrevious': False}, 'results': [{'amount': 4, 'code': 'NAICS 9876', 'name': 'SOURCE NAICS DESC 9876', 'id': None}, {'amount': 2, 'code': 'NAICS 1234', 'name': 'SOURCE NAICS DESC 1234', 'id': None}], 'messages': [get_time_period_message()]}
assert (expected_response == spending_by_category_logic) |
class OptionSeriesItemSonificationContexttracksMappingTremolo(Options):
def depth(self) -> 'OptionSeriesItemSonificationContexttracksMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesItemSonificationContexttracksMappingTremoloDepth)
def speed(self) -> 'OptionSeriesItemSonificationContexttracksMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesItemSonificationContexttracksMappingTremoloSpeed) |
class BasicDialoguesStorage():
def __init__(self, dialogues: 'Dialogues') -> None:
self._dialogues_by_dialogue_label = {}
self._dialogue_by_address = defaultdict(list)
self._incomplete_to_complete_dialogue_labels = {}
self._dialogues = dialogues
self._terminal_state_dialogues_labels: Set[DialogueLabel] = set()
def dialogues_in_terminal_state(self) -> List['Dialogue']:
return list(filter(None, [self._dialogues_by_dialogue_label.get(i) for i in self._terminal_state_dialogues_labels]))
def dialogues_in_active_state(self) -> List['Dialogue']:
active_dialogues = (set(self._dialogues_by_dialogue_label.keys()) - self._terminal_state_dialogues_labels)
return list(filter(None, [self._dialogues_by_dialogue_label.get(i) for i in active_dialogues]))
def is_terminal_dialogues_kept(self) -> bool:
return self._dialogues.is_keep_dialogues_in_terminal_state
def dialogue_terminal_state_callback(self, dialogue: 'Dialogue') -> None:
if self.is_terminal_dialogues_kept:
self._terminal_state_dialogues_labels.add(dialogue.dialogue_label)
else:
self.remove(dialogue.dialogue_label)
def setup(self) -> None:
def teardown(self) -> None:
def add(self, dialogue: Dialogue) -> None:
dialogue.add_terminal_state_callback(self.dialogue_terminal_state_callback)
self._dialogues_by_dialogue_label[dialogue.dialogue_label] = dialogue
self._dialogue_by_address[dialogue.dialogue_label.dialogue_opponent_addr].append(dialogue)
def _add_terminal_state_dialogue(self, dialogue: Dialogue) -> None:
self.add(dialogue)
self._terminal_state_dialogues_labels.add(dialogue.dialogue_label)
def remove(self, dialogue_label: DialogueLabel) -> None:
dialogue = self._dialogues_by_dialogue_label.pop(dialogue_label, None)
self._incomplete_to_complete_dialogue_labels.pop(dialogue_label, None)
if (dialogue_label in self._terminal_state_dialogues_labels):
self._terminal_state_dialogues_labels.remove(dialogue_label)
if dialogue:
self._dialogue_by_address[dialogue_label.dialogue_opponent_addr].remove(dialogue)
def get(self, dialogue_label: DialogueLabel) -> Optional[Dialogue]:
return self._dialogues_by_dialogue_label.get(dialogue_label, None)
def get_dialogues_with_counterparty(self, counterparty: Address) -> List[Dialogue]:
return self._dialogue_by_address.get(counterparty, [])
def is_in_incomplete(self, dialogue_label: DialogueLabel) -> bool:
return (dialogue_label in self._incomplete_to_complete_dialogue_labels)
def set_incomplete_dialogue(self, incomplete_dialogue_label: DialogueLabel, complete_dialogue_label: DialogueLabel) -> None:
self._incomplete_to_complete_dialogue_labels[incomplete_dialogue_label] = complete_dialogue_label
def is_dialogue_present(self, dialogue_label: DialogueLabel) -> bool:
return (dialogue_label in self._dialogues_by_dialogue_label)
def get_latest_label(self, dialogue_label: DialogueLabel) -> DialogueLabel:
return self._incomplete_to_complete_dialogue_labels.get(dialogue_label, dialogue_label) |
class A7RPCPHY(BasePHY):
def __init__(self, iodelay_clk_freq, **kwargs):
self._rdly_dq_rst = CSR()
self._rdly_dq_inc = CSR()
self._db_enabled = CSRStorage(reset=1)
self._dqs_enabled = CSRStorage(reset=1)
kwargs.update(dict(write_ser_latency=1, read_des_latency=2, phytype=self.__class__.__name__))
super().__init__(**kwargs)
self.settings.delays = 32
self.settings.read_leveling = True
self.iodelay_clk_freq = iodelay_clk_freq
iodelay_tap_average = {.0: 7.8e-11, .0: 5.2e-11, .0: 3.9e-11}
self.half_sys8x_taps = math.floor((self.tck / (4 * iodelay_tap_average[iodelay_clk_freq])))
def do_clock_serialization(self, clk_1ck_out, clk_p, clk_n):
clk = Signal()
self.oserdese2_ddr(din=clk_1ck_out, dout=clk, clk='sys4x_180')
self.specials += Instance('OBUFDS', i_I=clk, o_O=clk_p, o_OB=clk_n)
def do_stb_serialization(self, stb_1ck_out, stb):
stb_out = Signal()
stb_in = Signal()
stb_in_delayed = Signal()
stb_t = Signal()
self.stb_1ck_in = stb_1ck_in = Signal.like(stb_1ck_out)
self.oserdese2_ddr(din=stb_1ck_out, dout=stb_out, tin=Constant(0), tout=stb_t, clk='sys4x_90')
self.idelaye2(din=stb_in, dout=stb_in_delayed, rst=self.get_rst(0, self._rdly_dq_rst.re), inc=self.get_inc(0, self._rdly_dq_inc.re))
self.iserdese2_ddr(din=stb_in_delayed, dout=stb_1ck_in, clk='sys4x_180')
self.specials += Instance('IOBUF', i_I=stb_out, o_O=stb_in, i_T=stb_t, io_IO=stb)
def do_db_serialization(self, db_1ck_out, db_1ck_in, db_oe, db):
for i in range(self.databits):
db_out = Signal()
db_t = Signal()
db_in = Signal()
db_in_delayed = Signal()
self.oserdese2_ddr(din=db_1ck_out[i], dout=db_out, tin=(~ (db_oe & self._db_enabled.storage)), tout=db_t, clk='sys4x_90')
self.idelaye2(din=db_in, dout=db_in_delayed, rst=self.get_rst((i // 8), self._rdly_dq_rst.re), inc=self.get_inc((i // 8), self._rdly_dq_inc.re))
self.iserdese2_ddr(din=db_in_delayed, dout=db_1ck_in[i], clk='sys4x_180')
self.specials += Instance('IOBUF', i_I=db_out, o_O=db_in, i_T=db_t, io_IO=db[i])
def do_dqs_serialization(self, dqs_1ck_out, dqs_1ck_in, dqs_oe, dqs_p, dqs_n):
for i in range(len(dqs_p)):
dqs_out = Signal()
dqs_in = Signal()
dqs_t = Signal()
dqs_in_delayed = Signal()
self.oserdese2_ddr(clk='sys4x_180', din=dqs_1ck_out, dout=dqs_out, tin=(~ (dqs_oe & self._dqs_enabled.storage)), tout=dqs_t)
if (i == 0):
self.idelaye2(din=dqs_in, dout=dqs_in_delayed, rst=self.get_rst((i // 8), self._rdly_dq_rst.re), inc=self.get_inc((i // 8), self._rdly_dq_inc.re))
self.iserdese2_ddr(clk='sys4x_90', din=dqs_in_delayed, dout=dqs_1ck_in)
self.specials += Instance('IOBUFDS', i_T=dqs_t, i_I=dqs_out, o_O=dqs_in, io_IO=dqs_p[i], io_IOB=dqs_n[i])
def do_cs_serialization(self, cs_n_1ck_out, cs_n):
self.oserdese2_ddr(din=cs_n_1ck_out, dout=cs_n, clk='sys4x_90')
def idelaye2(self, *, din, dout, init=0, rst=None, inc=None):
assert (not ((rst is None) ^ (inc is None)))
fixed = (rst is None)
params = dict(p_SIGNAL_PATTERN='DATA', p_DELAY_SRC='IDATAIN', p_CINVCTRL_SEL='FALSE', p_HIGH_PERFORMANCE_MODE='TRUE', p_REFCLK_FREQUENCY=(self.iodelay_clk_freq / 1000000.0), p_PIPE_SEL='FALSE', p_IDELAY_VALUE=init, p_IDELAY_TYPE='FIXED', i_IDATAIN=din, o_DATAOUT=dout)
if (not fixed):
params.update(dict(p_IDELAY_TYPE='VARIABLE', i_C=ClockSignal(), i_LD=rst, i_CE=inc, i_LDPIPEEN=0, i_INC=1))
self.specials += Instance('IDELAYE2', **params)
def odelaye2(self, *, din, dout, init=0, rst=None, inc=None):
assert (not ((rst is None) ^ (inc is None)))
fixed = (rst is not None)
params = dict(p_SIGNAL_PATTERN='DATA', p_DELAY_SRC='ODATAIN', p_CINVCTRL_SEL='FALSE', p_HIGH_PERFORMANCE_MODE='TRUE', p_REFCLK_FREQUENCY=(self.iodelay_clk_freq / 1000000.0), p_PIPE_SEL='FALSE', p_ODELAY_VALUE=init, p_ODELAY_TYPE='FIXED', i_ODATAIN=din, o_DATAOUT=dout)
if (not fixed):
params.update(dict(p_ODELAY_TYPE='VARIABLE', i_C=ClockSignal(), i_LD=rst, i_CE=inc, i_LDPIPEEN=0, i_INC=1))
self.specials += Instance('ODELAYE2', **params)
def oserdese2_ddr(self, *, din, dout, clk, tin=None, tout=None):
assert (self.nphases == 4)
assert (not ((tin is None) ^ (tout is None)))
params = dict(p_SERDES_MODE='MASTER', p_DATA_WIDTH=(2 * self.nphases), p_TRISTATE_WIDTH=1, p_DATA_RATE_OQ='DDR', p_DATA_RATE_TQ='BUF', i_RST=ResetSignal(), i_CLK=ClockSignal(clk), i_CLKDIV=ClockSignal('sys'), o_OQ=dout, i_OCE=1)
for i in range((2 * self.nphases)):
params['i_D{}'.format((i + 1))] = din[i]
if (tin is not None):
tin_d = Signal()
self.sync += tin_d.eq(tin)
tin_cdc = Signal()
sd_clkdiv = getattr(self.sync, clk)
sd_clkdiv += tin_cdc.eq(tin_d)
params.update(dict(i_TCE=1, i_T1=tin_cdc, o_TQ=tout))
self.specials += Instance('OSERDESE2', **params)
def iserdese2_ddr(self, *, din, dout, clk):
assert (self.nphases == 4)
params = dict(p_SERDES_MODE='MASTER', p_INTERFACE_TYPE='NETWORKING', p_DATA_WIDTH=(2 * self.nphases), p_DATA_RATE='DDR', p_NUM_CE=1, p_IOBDELAY='IFD', i_RST=ResetSignal(), i_CLK=ClockSignal(clk), i_CLKB=(~ ClockSignal(clk)), i_CLKDIV=ClockSignal('sys'), i_BITSLIP=0, i_CE1=1, i_DDLY=din)
for i in range((2 * self.nphases)):
params['o_Q{}'.format((i + 1))] = dout[(((2 * self.nphases) - 1) - i)]
self.specials += Instance('ISERDESE2', **params) |
def report_type_full(report_type, form_type, report_type_full_original):
if ((form_type in ('F5', 'F24')) and (report_type in ('24', '48'))):
return (report_type + '-HOUR REPORT OF INDEPENDENT EXPENDITURES')
elif (form_type == 'F6'):
return '48-HOUR NOTICE OF CONTRIBUTIONS OR LOANS RECEIVED'
else:
return report_type_full_original |
def extractBorahae7TumblrCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ((vol, chp, frag) == (None, 100, 0)):
return None
tagmap = [('fbcbtr tl', 'Fiancee Be Chosen By The Ring', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TBEArmor(DefaultObject):
def at_object_creation(self):
self.db.damage_reduction = 4
self.db.defense_modifier = (- 4)
def at_pre_drop(self, dropper):
if self.rules.is_in_combat(dropper):
dropper.msg("You can't doff armor in a fight!")
return False
return True
def at_drop(self, dropper):
if (dropper.db.worn_armor == self):
dropper.db.worn_armor = None
dropper.location.msg_contents(('%s removes %s.' % (dropper, self)))
def at_pre_give(self, giver, getter):
if self.rules.is_in_combat(giver):
dropper.msg("You can't doff armor in a fight!")
return False
return True
def at_give(self, giver, getter):
if (giver.db.worn_armor == self):
giver.db.worn_armor = None
giver.location.msg_contents(('%s removes %s.' % (giver, self))) |
class MaintenanceModeMiddleware():
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
response = self.process_request(request)
if ((response is None) and callable(self.get_response)):
response = self.get_response(request)
return response
def process_request(self, request):
if need_maintenance_response(request):
return get_maintenance_response(request)
return None |
_cache()
def get_solidity_grammar_instance(solidity_version=None):
if isinstance(solidity_version, str):
solidity_version = Version(solidity_version)
version_map = {Version('0.5.0'): (solidity_v_0_5_x, Parser), Version('0.6.0'): (solidity_v_0_5_x, Parser)}
grammar_and_parser = None
for version in sorted(version_map.keys(), reverse=True):
if (solidity_version is None):
can_use_version = (version_map[version] is not None)
else:
can_use_version = (version <= solidity_version)
if can_use_version:
grammar_and_parser = version_map[version]
break
if (grammar_and_parser is None):
raise SecurifyCompilationError(f'Solc version {solidity_version} not supported by CFG compiler.')
(grammar, rule_parser) = grammar_and_parser
return AttributeGrammar.from_modules(grammar, check_acyclicity=False, rule_extractor=rule_parser()) |
def HumanIKTargetDoKill(kwargs: dict) -> OutgoingMessage:
compulsory_params = ['id', 'index']
optional_params = []
utility.CheckKwargs(kwargs, compulsory_params)
msg = OutgoingMessage()
msg.write_int32(kwargs['id'])
msg.write_string('HumanIKTargetDoKill')
msg.write_int32(kwargs['index'])
return msg |
def get_layout(**kwargs):
athlete_info = app.session.query(athlete).filter((athlete.athlete_id == 1)).first()
use_power = (True if (athlete_info.use_run_power or athlete_info.use_cycle_power) else False)
app.session.remove()
if (not use_power):
return html.H1('Power data currently disabled', className='text-center')
else:
return html.Div([html.Div(className='row align-items-start text-center', children=[html.Div(id='power-dashboard-header-container', className='col-12 mt-2 mb-2', children=[html.I(id='running-icon', className='fa fa-running', style={'fontSize': '2rem', 'display': 'inline-block'}), daq.ToggleSwitch(id='activity-type-toggle', className='mr-2 ml-2', style={'display': 'inline-block'}), html.I(id='bicycle-icon', className='fa fa-bicycle', style={'fontSize': '2rem', 'display': 'inline-block', 'color': teal}), dbc.Tooltip('Analyze cycling activities', target='bicycle-icon'), dbc.Tooltip('Toggle activity type', target='activity-type-toggle'), dbc.Tooltip('Analyze running activities', target='running-icon'), html.I(style={'fontSize': '2rem', 'display': 'inline-block', 'paddingLeft': '1%', 'paddingRight': '1%'}), html.I(id='bolt-icon', className='fa fa-bolt', style={'fontSize': '2rem', 'display': 'inline-block', 'color': teal}), daq.ToggleSwitch(id='power-unit-toggle', className='mr-2 ml-2', style={'display': 'inline-block'}, value=False), html.I(id='weight-icon', className='fa fa-weight', style={'fontSize': '2rem', 'display': 'inline-block'}), dbc.Tooltip('Show watts', target='bolt-icon'), dbc.Tooltip('Toggle power unit', target='power-unit-toggle'), dbc.Tooltip('Show watts/kg', target='weight-icon')])]), html.Div(id='power-curve-and-zone', className='row mt-2 mb-2', children=[html.Div(className='col-lg-8', children=[dbc.Card(children=[dbc.CardHeader(id='power-curve-kpis', children=[html.H4('Power Duration Curve', className='mb-0')]), dbc.Spinner(color='info', children=[dbc.CardBody(html.Div(className='row', children=[html.Div(id='power-curve-container', className='col-lg-9', children=[dcc.Graph(id='power-curve-chart', config={'displayModeBar': False}, style={'height': '100%'})]), html.Div(id='stryd-distributions', className='col-lg-3', style={'paddingLeft': 0})]))])])]), html.Div(className='col-lg-4', children=[dbc.Card(children=[dbc.CardHeader(html.H4(id='ftp-current', className='mb-0')), dbc.Tooltip('Functional Threshold Power (FTP) is the highest average power you can sustain for 1 hour, measured in watts. FTP is used to determine training zones when using a power meter and to measure improvement.', target='ftp-current'), dbc.Spinner(color='info', children=[dbc.CardBody(dcc.Graph(id='ftp-chart', config={'displayModeBar': False}, style={'height': '100%'}))])])])]), html.Div(id='power-profile-header', className='row align-items-center text-center mt-2 mb-2', children=[html.Div(className='col', children=[html.H6('Power Profiles by'), html.Div(id='power-profile-buttons', className='col', children=[dbc.Button('Day', id='day-button', color='primary', size='sm'), dbc.Button('Week', id='week-button', color='primary', size='sm'), dbc.Button('Month', id='month-button', color='primary', size='sm'), dbc.Button('Year', id='year-button', color='primary', size='sm')])])]), html.Div(id='power-profiles', className='row', children=[html.Div(className='col-lg-3', children=[dbc.Card(id='power-profile-5', children=[dbc.CardHeader(html.H4('5 Second Max Power', className='mb-0')), dbc.Spinner(color='info', children=[dbc.CardBody(dcc.Graph(id='power-profile-5-chart', config={'displayModeBar': False}, style={'height': '100%'}))])])]), html.Div(className='col-lg-3', children=[dbc.Card(id='power-profile-60', children=[dbc.CardHeader(html.H4('1 Minute Max Power', className='mb-0')), dbc.Spinner(color='info', children=[dbc.CardBody(dcc.Graph(id='power-profile-60-chart', config={'displayModeBar': False}, style={'height': '100%'}))])])]), html.Div(className='col-lg-3', children=[dbc.Card(id='power-profile-300', children=[dbc.CardHeader(html.H4('5 Minute Max Power', className='mb-0')), dbc.Spinner(color='info', children=[dbc.CardBody(dcc.Graph(id='power-profile-300-chart', config={'displayModeBar': False}, style={'height': '100%'}))])])]), html.Div(className='col-lg-3', children=[dbc.Card(id='power-profile-1200', children=[dbc.CardHeader(html.H4('20 Minute Max Power', className='mb-0')), dbc.Spinner(color='info', children=[dbc.CardBody(dcc.Graph(id='power-profile-1200-chart', config={'displayModeBar': False}, style={'height': '100%'}))])])])])]) |
(scope='session', autouse=True)
def testing_environment(test_certificates: Tuple[(str, str)], find_free_port: Callable) -> None:
location = pathlib.Path(tempfile.gettempdir())
rand_str = ''.join(random.choices((string.ascii_letters + string.digits), k=6))
db_path = (location / f'feedernet.{rand_str}.db')
environ['DATABASE_PATH'] = str(db_path)
priv_path = (location / f'feedernet-priv.{rand_str}.pem')
cert_path = (location / f'feedernet-cert.{rand_str}.pem')
with open(priv_path, 'w') as cert_file:
cert_file.write(test_certificates[0])
with open(cert_path, 'w') as cert_file:
cert_file.write(test_certificates[1])
environ['MQTTS_PRIVATE_KEY'] = str(priv_path)
environ['MQTTS_PUBLIC_KEY'] = str(cert_path)
environ['MQTT_PORT'] = str(find_free_port())
environ['MQTTS_PORT'] = str(find_free_port())
environ['HTTP_PORT'] = str(find_free_port())
(yield)
for file_path in [db_path, priv_path, cert_path]:
try:
file_path.unlink()
except FileNotFoundError:
pass |
class Faucet8021XCustomACLLoginTest(Faucet8021XBase):
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "untagged"\nacls:\n auth_acl:\n - rule:\n dl_type: 0x800 # Allow ICMP / IPv4\n ip_proto: 1\n actions:\n allow: True\n - rule:\n dl_type: 0x0806 # ARP Packets\n actions:\n allow: True\n noauth_acl:\n - rule:\n dl_type: 0x800 # Deny ICMP / IPv4\n ip_proto: 1\n actions:\n allow: False\n - rule:\n dl_type: 0x0806 # ARP Packets\n actions:\n allow: True\n '
CONFIG = '\n dot1x:\n nfv_intf: NFV_INTF\n nfv_sw_port: %(port_4)d\n radius_ip: 127.0.0.1\n radius_port: RADIUS_PORT\n radius_secret: SECRET\n auth_acl: auth_acl\n noauth_acl: noauth_acl\n interfaces:\n %(port_1)d:\n name: b1\n description: "b1"\n native_vlan: 100\n # 802.1x client.\n dot1x: True\n dot1x_acl: True\n %(port_2)d:\n name: b2\n description: "b2"\n native_vlan: 100\n # 802.1X client.\n dot1x: True\n dot1x_acl: True\n %(port_3)d:\n name: b3\n description: "b3"\n native_vlan: 100\n # ping host.\n %(port_4)d:\n name: b4\n description: "b4"\n output_only: True\n # "NFV host - interface used by controller."\n '
def test_untagged(self):
self.verify_host_success(self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks() |
def debug_response(response):
def success_echo(fmt, *args):
prompt = click.style('< ', fg='green')
click.echo((prompt + expand_args(fmt, args)))
def failure_echo(fmt, *args):
prompt = click.style('< ', fg='red')
click.echo((prompt + expand_args(fmt, args)))
def info_echo(fmt, *args):
prompt = click.style('< ', fg='yellow')
click.echo((prompt + expand_args(fmt, args)))
response_class = (('%s' % response.status_code)[0] + 'xx')
if (response_class == '2xx'):
response_echo = success_echo
elif (response_class in ('4xx', '5xx')):
response_echo = failure_echo
else:
response_echo = info_echo
response_echo(click.style('%d %s', bold=True), response.status_code, response.reason)
for (key, value) in sorted(response.headers.items()):
response_echo('%s: %s', key.title(), value)
if response.content:
response_echo('')
for line in response.text.splitlines():
response_echo(line)
click.echo() |
.xfail(reason='OPM flaky')
.requires_opm
.usefixtures('setup_tmpdir')
(max_examples=5)
(opm_setups)
def test_restart_prop_reading(case):
case.run()
fformat = ('funrst' if case.formatted else 'unrst')
pressure = xtgeo.gridproperty_from_file(case.restart_file, fformat=fformat, name='PRESSURE', date='last', grid=case.grid)
assert (pressure.name == 'PRESSURE_')
assert (pressure.date == '') |
def fetch_production_capacity_for_all_zones(target_datetime: datetime, session: Session) -> dict[(str, Any)]:
df_capacity = get_data_from_url(session)
df_capacity = format_ember_data(df_capacity, target_datetime.year)
all_capacity = get_capacity_dict_from_df(df_capacity)
logger.info(f'Fetched capacity data from Ember for {target_datetime.year}')
return all_capacity |
class _BaseScipyGridder(BaseGridder):
def _get_interpolator(self):
def fit(self, coordinates, data, weights=None):
if (weights is not None):
warn('{} does not support weights and they will be ignored.'.format(self.__class__.__name__))
(coordinates, data, weights) = check_fit_input(coordinates, data, weights)
(easting, northing) = coordinates[:2]
self.region_ = get_region((easting, northing))
points = np.column_stack((np.ravel(easting), np.ravel(northing)))
(interpolator_class, kwargs) = self._get_interpolator()
self.interpolator_ = interpolator_class(points, np.ravel(data), **kwargs)
return self
def predict(self, coordinates):
check_is_fitted(self, ['interpolator_'])
(easting, northing) = coordinates[:2]
return self.interpolator_((easting, northing)) |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'name'
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'state': {'required': True, 'type': 'str', 'choices': ['present', 'absent']}, 'gtp_apngrp': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['gtp_apngrp']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['gtp_apngrp']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=True)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'gtp_apngrp')
(is_error, has_changed, result, diff) = fortios_gtp(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class THBattleFaith(THBattle):
n_persons = 6
game_ehs = [DeathHandler]
bootstrap = THBattleFaithBootstrap
params_def = {'random_seat': (True, False)}
forces: Dict[(THBFaithRole, BatchList[Player])]
pool: Dict[(THBFaithRole, List[CharChoice])]
def can_leave(g: THBattleFaith, p: Any):
return False
def switch_character(g, old: Character, choice: CharChoice) -> Character:
p = old.player
g.players.player.reveal(choice)
cls = choice.char_cls
assert cls
log.info('>> NewCharacter: %s %s', g.roles[p].get().name, cls.__name__)
new = cls(p)
g.players.find_replace((lambda ch: (ch.player is p)), new)
g.refresh_dispatcher()
g.emit_event('switch_character', (old, new))
return new
def get_opponent_role(g, r: THBFaithRole) -> THBFaithRole:
if (r == THBFaithRole.MORIYA):
return THBFaithRole.HAKUREI
elif (r == THBFaithRole.HAKUREI):
return THBFaithRole.MORIYA
else:
assert False, f'WTF: {r}'
def get_remaining(g) -> Dict[(str, int)]:
return {k.name: (len(v) - 1) for (k, v) in g.pool.items()} |
class LyricsMethodsComboBox(Gtk.ComboBoxText, providers.ProviderHandler):
def __init__(self):
Gtk.ComboBoxText.__init__(self)
providers.ProviderHandler.__init__(self, 'lyrics')
self.model = self.get_model()
self.append_text(_('Any source'))
for provider in self.get_providers():
self.append_text(provider.display_name)
self.set_active(0)
def remove_item(self, name):
index = self.search_item(name)
if index:
GLib.idle_add(self.remove, index)
return True
return False
def append_item(self, name):
if (not self.search_item(name)):
GLib.idle_add(self.append_text, name)
return True
return False
def search_item(self, name):
index = 0
for item in self.model:
if (item[0] == name):
return index
index += 1
return False
def get_active_item(self):
return (self.get_active(), self.get_active_text())
def on_provider_added(self, provider):
self.append_item(provider.display_name)
def on_provider_removed(self, provider):
self.remove_item(provider.display_name) |
def notify_ticket_cancel(order, actor):
buyer = order.user
content = NotificationContent(type=NotificationType.TICKET_CANCELLED, target=order, actors=[NotificationActor(actor=actor)])
send_notification(content, buyer)
users = order.event.notify_staff
content = NotificationContent(type=NotificationType.TICKET_CANCELLED_ORGANIZER, target=order, actors=[NotificationActor(actor=actor)])
send_notification(content, users=users) |
def extractLfnovelsCom(item):
if (not ('English' in item['tags'])):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Dungeon Hallow', 'Dungeon Hallow', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class ZigguratSignInProvider(object):
def __init__(self, *args, **kwargs):
for (k, v) in kwargs.items():
setattr(self, k, v)
def sign_in(self, request):
came_from = request.params.get(self.signin_came_from_key, '/')
db_session = self.session_getter(request)
user = UserService.by_user_name(request.params.get(self.signin_username_key), db_session=db_session)
if (user is None):
user = UserService.by_email(request.params.get(self.signin_username_key), db_session=db_session)
if user:
password = request.params.get(self.signin_password_key)
if UserService.check_password(user, password):
headers = remember(request, user.id)
return ZigguratSignInSuccess(headers=headers, came_from=came_from, user=user)
headers = forget(request)
return ZigguratSignInBadAuth(headers=headers, came_from=came_from)
def sign_out(self, request):
headers = forget(request)
return ZigguratSignOut(headers=headers)
def session_getter(self, request):
raise NotImplementedError() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.