code stringlengths 281 23.7M |
|---|
def create_callback():
global CALLBACK_CREATED
if (not CALLBACK_CREATED):
_app.callback(Output(EMPTY_DIV, 'children'), [Input('battery-table', 'data_timestamp')], [State('battery-table', 'data'), State('battery-table', 'data_previous')])
def capture_diffs_in_battery_table(timestamp, data, data_previous):
if (timestamp is None):
raise PreventUpdate
diff_data = diff_dashtable(data, data_previous, 'start_at')
for changed_line in diff_data:
if (changed_line['column_name'] == 'price'):
conn = Database.get_db()
charge = Charge(datetime.utcfromtimestamp((changed_line['start_at'] / 1000)))
charge.price = changed_line['current_value']
charge.vin = get_default_car().vin
if (not Database.set_chargings_price(conn, charge)):
logger.error("Can't find line to update in the database")
else:
logger.debug('update price %s of %s', changed_line['current_value'], changed_line['start_at'])
conn.close()
return ''
_app.callback([Output('tab_battery_popup_graph', 'children'), Output('tab_battery_popup', 'is_open')], [Input('battery-table', 'active_cell'), Input('tab_battery_popup-close', 'n_clicks')], [State('battery-table', 'data'), State('tab_battery_popup', 'is_open')])
def get_battery_curve(active_cell, close, data, is_open):
if (is_open is None):
is_open = False
if ((active_cell is not None) and (active_cell['column_id'] in ['start_level', 'end_level']) and (not is_open)):
row = data[active_cell['row']]
return (figures.get_battery_curve_fig(row, APP.myp.vehicles_list[0]), True)
return ('', False)
_app.callback([Output('tab_trips_popup_graph', 'children'), Output('tab_trips_popup', 'is_open')], [Input('trips-table', 'active_cell'), Input('tab_trips_popup-close', 'n_clicks')], State('tab_trips_popup', 'is_open'))
def get_altitude_graph(active_cell, close, is_open):
if (is_open is None):
is_open = False
if ((active_cell is not None) and (active_cell['column_id'] in ['altitude_diff']) and (not is_open)):
return (figures.get_altitude_fig(trips[(active_cell['row_id'] - 1)]), True)
return ('', False)
_app.callback(Output('loading-output-trips', 'children'), Input('export-trips-table', 'n_clicks'))
def export_trips_loading_animation(n_clicks):
time.sleep(3)
_app.callback(Output('loading-output-battery', 'children'), Input('export-battery-table', 'n_clicks'))
def export_batt_loading_animation(n_clicks):
time.sleep(3)
dash_app.clientside_callback('\n function(n_clicks) {\n if (n_clicks > 0)\n document.querySelector("#trips-table button.export").click()\n return ""\n }\n ', Output('trips-table', 'data-dummy'), [Input('export-trips-table', 'n_clicks')])
dash_app.clientside_callback('\n function(n_clicks) {\n if (n_clicks > 0)\n document.querySelector("#battery-table button.export").click()\n return ""\n }\n ', Output('battery-table', 'data-dummy'), [Input('export-battery-table', 'n_clicks')])
figures.CURRENCY = APP.config.General.currency
figures.EXPORT_FORMAT = APP.config.General.export_format
CALLBACK_CREATED = True |
class MapCSSWriter():
def __init__(self, scheme: Scheme, icon_directory_name: str, add_icons: bool=True, add_ways: bool=True, add_icons_for_lifecycle: bool=True) -> None:
self.add_icons: bool = add_icons
self.add_ways: bool = add_ways
self.add_icons_for_lifecycle: bool = add_icons_for_lifecycle
self.icon_directory_name: str = icon_directory_name
self.point_matchers: list[Matcher] = scheme.node_matchers
self.line_matchers: list[Matcher] = scheme.way_matchers
def add_selector(self, target: str, matcher: Matcher, prefix: str='', opacity: Optional[float]=None) -> str:
elements: dict[(str, str)] = {}
for value in matcher.tags.values():
if value.startswith('^'):
return ''
clean_shapes = matcher.get_clean_shapes()
if clean_shapes:
elements['icon-image'] = ((f'"{self.icon_directory_name}/' + '___'.join(clean_shapes)) + '.svg"')
if (opacity is not None):
elements['icon-opacity'] = f'{opacity:.2f}'
style: dict[(str, str)] = matcher.get_style()
if style:
if ('fill' in style):
elements['fill-color'] = style['fill']
if ('stroke' in style):
elements['color'] = style['stroke']
if ('stroke-width' in style):
elements['width'] = style['stroke-width']
if ('stroke-dasharray' in style):
elements['dashes'] = style['stroke-dasharray']
if ('opacity' in style):
elements['fill-opacity'] = style['opacity']
elements['opacity'] = style['opacity']
if (not elements):
return ''
selector: str = ((target + matcher.get_mapcss_selector(prefix)) + ' {\n')
for (key, value) in elements.items():
selector += f''' {key}: {value};
'''
selector += '}\n'
return selector
def write(self, output_file: TextIO) -> None:
output_file.write((HEADER + '\n\n'))
if self.add_ways:
output_file.write((WAY_CONFIG + '\n\n'))
if self.add_icons:
output_file.write((NODE_CONFIG + '\n\n'))
if self.add_icons:
for matcher in self.point_matchers:
for target in ['node', 'area']:
output_file.write(self.add_selector(target, matcher))
if self.add_ways:
for line_matcher in self.line_matchers:
for target in ['way', 'relation']:
output_file.write(self.add_selector(target, line_matcher))
if (not self.add_icons_for_lifecycle):
return
for (index, stage_of_decay) in enumerate(STAGES_OF_DECAY):
opacity: float = (0.6 - ((0.4 * index) / (len(STAGES_OF_DECAY) - 1.0)))
for matcher in self.point_matchers:
if (len(matcher.tags) > 1):
continue
for target in ['node', 'area']:
output_file.write(self.add_selector(target, matcher, stage_of_decay, opacity)) |
.unit
class TestCredentials():
def test_valid_credentials(self):
credentials = Credentials(username='test', password='password', user_id='some_id', access_token='some_token')
assert (credentials.username == 'test')
assert (credentials.password == 'password')
assert (credentials.user_id == 'some_id')
assert (credentials.access_token == 'some_token') |
def _geth_command_arguments(rpc_port, base_geth_command_arguments, geth_version):
(yield from base_geth_command_arguments)
if (geth_version.major == 1):
(yield from ('-- '-- rpc_port, '-- 'admin,eth,net,web3,personal,miner,txpool', '--ipcdisable', '--allow-insecure-unlock', '--miner.etherbase', COINBASE[2:]))
else:
raise AssertionError('Unsupported Geth version') |
_test
def test_lsl_poller_node() -> None:
class LSLPollerGraphConfig(Config):
output_filename: str
class LSLPollerGraph(Graph):
MY_SOURCE: LSLPollerNode
MY_SINK: MySink
config: LSLPollerGraphConfig
def setup(self) -> None:
self.MY_SOURCE.configure(LSLPollerConfig(type='mock_type'))
self.MY_SINK.configure(MySinkConfig(output_filename=self.config.output_filename))
def connections(self) -> Connections:
return ((self.MY_SOURCE.topic, self.MY_SINK.TOPIC),)
graph = LSLPollerGraph()
output_filename = get_test_filename()
graph.configure(LSLPollerGraphConfig(output_filename=output_filename))
runner = LocalRunner(module=graph)
p = Process(target=write_sample_to_lsl, args=())
p.start()
runner.run()
p.join()
with open(output_filename, 'r') as f:
data = f.read()
recieved_data = set(data.strip(DATA_DELIMITER).split(DATA_DELIMITER))
assert (len(recieved_data) > 0)
assert (len(samples) == len(recieved_data)) |
def test_from_file_double_dimensions(simple_roff_parameter_contents):
buff = io.BytesIO()
simple_roff_parameter_contents.append(('dimensions', {'nX': 2, 'nY': 2, 'nZ': 2}))
roffio.write(buff, simple_roff_parameter_contents)
buff.seek(0)
with pytest.raises(ValueError, match='Multiple tag'):
RoffParameter.from_file(buff, 'b') |
('MXNetWrapper.v1')
def MXNetWrapper(mxnet_model, convert_inputs: Optional[Callable]=None, convert_outputs: Optional[Callable]=None, model_class: Type[Model]=Model, model_name: str='mxnet') -> Model[(Any, Any)]:
if (convert_inputs is None):
convert_inputs = convert_mxnet_default_inputs
if (convert_outputs is None):
convert_outputs = convert_mxnet_default_outputs
return model_class(model_name, forward, attrs={'convert_inputs': convert_inputs, 'convert_outputs': convert_outputs}, shims=[MXNetShim(mxnet_model)]) |
class TestDPRoundReducer(TestRoundReducerBase):
def test_dp_off(self) -> None:
ref_model = create_ref_model(ref_model_param_value=3.0)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=float('inf'), noise_multiplier=0)
assertFalse(dp_rr.privacy_on)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=10.0, noise_multiplier=(- 1))
assertFalse(dp_rr.privacy_on)
def test_collect_update_with_clipping(self) -> None:
num_clients = 100
global_value = 5.0
clients = self._create_fake_clients(global_param_value=global_value, num_clients=num_clients, client_param_value=3.0, client_weight=1.0)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=6.0, num_users_per_round=num_clients, total_number_of_users=num_clients)
for client in clients:
(delta, weight) = client.generate_local_update(Message(utils.SampleNet(utils.TwoFC())))
dp_rr.collect_update(delta, weight)
expected_param_values = (1. * num_clients)
(collected_model_updates, _) = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(collected_model_updates, expected_param_values)
assertEqual(mismatched, '', mismatched)
def test_clipping_when_noise_zero(self) -> None:
num_clients = 50
global_value = 8.0
clients = self._create_fake_clients(global_param_value=global_value, num_clients=num_clients, client_param_value=2.0, client_weight=1.0)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=15.0, noise_multiplier=0, num_users_per_round=num_clients, total_number_of_users=num_clients)
for client in clients:
(delta, weight) = client.generate_local_update(Message(utils.SampleNet(utils.TwoFC())))
dp_rr.collect_update(delta, weight)
dp_rr.reduce()
expected_param_values = (((3. * num_clients) / num_clients) + 0)
(model_after_noise, sum_weights) = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(model_after_noise, expected_param_values)
assertEqual(mismatched, '', mismatched)
def test_noise_when_clipping_large_value(self) -> None:
num_clients = 20
global_value = 5.0
clients = self._create_fake_clients(global_param_value=global_value, num_clients=num_clients, client_param_value=3.0, client_weight=1.0)
ref_model = create_ref_model(ref_model_param_value=global_value)
ref_model_before = FLModelParamUtils.clone(ref_model)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=10.0, num_users_per_round=num_clients, total_number_of_users=num_clients)
for client in clients:
(delta, weight) = client.generate_local_update(Message(utils.SampleNet(utils.TwoFC())))
dp_rr.collect_update(delta, weight)
expected_param_values = (2.0 * num_clients)
(collected_model_updates, _) = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(collected_model_updates, expected_param_values)
assertEqual(mismatched, '', mismatched)
dp_rr.reduce()
(ref_module_after_noise, _) = dp_rr.current_results
mismatched = utils.verify_models_equivalent_after_training(ref_model_before.fl_get_module(), ref_module_after_noise)
assertNotEqual(mismatched, '')
def test_noise_added_correctly(self) -> None:
num_clients = 100
global_value = 5.0
clients = self._create_fake_clients(global_param_value=global_value, num_clients=num_clients, client_param_value=3.0, client_weight=1.0)
ref_model = create_ref_model(ref_model_param_value=global_value)
dp_rr = get_dp_round_reducer(ref_model, clipping_value=7.0, num_users_per_round=num_clients, total_number_of_users=num_clients)
for client in clients:
(delta, weight) = client.generate_local_update(Message(utils.SampleNet(utils.TwoFC())))
client.compute_delta(ref_model, delta, delta)
dp_rr.collect_update(delta, weight)
dp_rr.privacy_engine._generate_noise = MagicMock(return_value=0.8)
expected_param_values = (((1. * num_clients) / num_clients) + 0.8)
dp_rr.reduce()
(ref_module_after_noise, _) = dp_rr.current_results
mismatched = utils.model_parameters_equal_to_value(ref_module_after_noise, expected_param_values)
assertEqual(mismatched, '', mismatched)
def test_multiprocess_dp_all_processes_the_same(self) -> None:
model = utils.SampleNet(utils.TwoFC())
num_models = 4
r = get_dp_round_reducer(model, clipping_value=1.0, reduction_type=ReductionType.AVERAGE, noise_multiplier=1, num_users_per_round=4, total_number_of_users=4, reset=False)
results = run_reduction_test(r, num_processes=4, num_models=num_models)
same_value = results[0]
for r in results:
assertAlmostEqual(r, same_value, 5) |
def upgrade():
op.create_table('message_settings', sa.Column('id', sa.Integer(), nullable=False), sa.Column('action', sa.String(), nullable=True), sa.Column('mail_status', sa.Integer(), nullable=True), sa.Column('notif_status', sa.Integer(), nullable=True), sa.Column('user_control_status', sa.Integer(), nullable=True), sa.PrimaryKeyConstraint('id')) |
def tensorflow2xp(tf_tensor: 'tf.Tensor', *, ops: Optional['Ops']=None) -> ArrayXd:
from .api import NumpyOps
assert_tensorflow_installed()
if is_tensorflow_gpu_array(tf_tensor):
if isinstance(ops, NumpyOps):
return tf_tensor.numpy()
else:
dlpack_tensor = tf.experimental.dlpack.to_dlpack(tf_tensor)
return cupy_from_dlpack(dlpack_tensor)
elif (isinstance(ops, NumpyOps) or (ops is None)):
return tf_tensor.numpy()
else:
return cupy.asarray(tf_tensor.numpy()) |
class WindowsOpenIntent(hass.Hass):
def initialize(self):
self.listService = self.get_app('listService')
return
def getIntentResponse(self, slots, devicename):
try:
windows_dict = self.listService.getWindow()
doors_dict = self.listService.getDoor()
doors_tilted_dict = self.listService.getDoorTilted()
window_open_list = []
door_open_list = []
door_tilted_list = []
for (key, value) in windows_dict.items():
if (self.get_state(value) == 'on'):
window_open_list.append(value)
for (key, value) in doors_dict.items():
if (self.get_state(value) == 'on'):
door_open_list.append(value)
for (key, value) in doors_tilted_dict.items():
if (self.get_state(value) == 'on'):
door_tilted_list.append(value)
text = ''
if (len(window_open_list) > 0):
if (text != ''):
text = (text + ' <break strength="weak"/>')
text = (text + self.args['textLineWindowOpen'])
for entity in window_open_list:
text = ((text + ' <break strength="weak"/>') + self.friendly_name(entity))
if (len(door_open_list) > 0):
if (text != ''):
text = (text + ' <break strength="weak"/>')
text = (text + self.args['textLineDoorOpen'])
for entity in door_open_list:
text = ((text + ' <break strength="weak"/>') + self.friendly_name(entity))
if (len(door_tilted_list) > 0):
if (text != ''):
text = (text + ' <break strength="weak"/>')
text = (text + self.args['textLineDoorTilted'])
for entity in door_tilted_list:
friendly_name = self.friendly_name(entity)
friendly_name = friendly_name.replace(' gekippt', '')
friendly_name = friendly_name.replace(' Gekippt', '')
text = ((text + ' <break strength="weak"/>') + friendly_name)
if (text == ''):
text = self.args['textLineClosed']
except Exception as e:
self.log('Exception: {}'.format(e))
self.log('slots: {}'.format(slots))
text = self.random_arg(self.args['Error'])
return text
def random_arg(self, argName):
if isinstance(argName, list):
text = random.choice(argName)
else:
text = argName
return text |
class ListWallpapers(SimpleDirectiveMixin, Directive):
required_arguments = 0
optional_arguments = 0
def make_rst(self):
wps = []
for wpname in dir(wallpapers):
wpaper = getattr(wallpapers, wpname)
wps.append((wpname, wpaper))
rst = list_wallpapers_template.render(wallpapers=wps)
for line in rst.splitlines():
if (not line.strip()):
continue
(yield line) |
('/status/<wf_unique_id>', methods=['GET'])
def status(wf_unique_id):
global old_wf
run_records = read_status(wf_unique_id)
if (not run_records):
return jsonify({'ERROR:': 'ID does not exist'})
records = []
for i in run_records:
if (i[1].split('_')[0] == 'STEP'):
step = json.loads(i[0])
records.append([step['step_key'].split('.')[0], i[1], i[2], i[0]])
nodes = sorted(list(set([i[0] for i in records])))
wf_finish_time = ''
wf_notes = ''
nodes_status = []
for i in nodes:
node = list(filter((lambda task: (task[0] == i)), records))
node = sorted(node, key=(lambda x: x[2]), reverse=True)[0]
message = (json.loads(node[3])['dagster_event']['event_specific_data']['error']['message'] if (node[1] == 'STEP_FAILURE') else '')
nodes_status.append({'node': node[0], 'note': message.replace('\n', '').replace('"', ''), 'status': node[1].split('_')[1], 'time': node[2]})
pipe = list(filter((lambda x: (x[1].split('_')[0] == 'PIPELINE')), run_records))
pipe = sorted(pipe, key=(lambda x: x[2]), reverse=False)
wf_start_time = pipe[0][2]
print(('start time' + str(wf_start_time)))
wf_status = pipe[(- 1)][1].split('_')[1]
if (wf_status == 'SUCCESS'):
old_wf = ''
wf_finish_time = pipe[(- 1)][2]
wf_log_summary = {'wf_status': wf_status, 'wf_start_time': wf_start_time, 'wf_finish_time': wf_finish_time, 'wf_notes': wf_notes, 'nodes_status': nodes_status}
return jsonify(wf_log_summary) |
class UniqueForYearValidator(BaseUniqueForValidator):
message = _('This field must be unique for the "{date_field}" year.')
def filter_queryset(self, attrs, queryset, field_name, date_field_name):
value = attrs[self.field]
date = attrs[self.date_field]
filter_kwargs = {}
filter_kwargs[field_name] = value
filter_kwargs[('%s__year' % date_field_name)] = date.year
return qs_filter(queryset, **filter_kwargs) |
class aggregate_stats_reply(stats_reply):
version = 3
type = 19
stats_type = 2
def __init__(self, xid=None, flags=None, packet_count=None, byte_count=None, flow_count=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
if (packet_count != None):
self.packet_count = packet_count
else:
self.packet_count = 0
if (byte_count != None):
self.byte_count = byte_count
else:
self.byte_count = 0
if (flow_count != None):
self.flow_count = flow_count
else:
self.flow_count = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
packed.append(struct.pack('!Q', self.packet_count))
packed.append(struct.pack('!Q', self.byte_count))
packed.append(struct.pack('!L', self.flow_count))
packed.append(('\x00' * 4))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = aggregate_stats_reply()
_version = reader.read('!B')[0]
assert (_version == 3)
_type = reader.read('!B')[0]
assert (_type == 19)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 2)
obj.flags = reader.read('!H')[0]
reader.skip(4)
obj.packet_count = reader.read('!Q')[0]
obj.byte_count = reader.read('!Q')[0]
obj.flow_count = reader.read('!L')[0]
reader.skip(4)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
if (self.packet_count != other.packet_count):
return False
if (self.byte_count != other.byte_count):
return False
if (self.flow_count != other.flow_count):
return False
return True
def pretty_print(self, q):
q.text('aggregate_stats_reply {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {1: 'OFPSF_REPLY_MORE'}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.text(',')
q.breakable()
q.text('packet_count = ')
q.text(('%#x' % self.packet_count))
q.text(',')
q.breakable()
q.text('byte_count = ')
q.text(('%#x' % self.byte_count))
q.text(',')
q.breakable()
q.text('flow_count = ')
q.text(('%#x' % self.flow_count))
q.breakable()
q.text('}') |
def test_verify_different_author_and_chat(patch_chat, patch_chat_member, master_channel):
msg = Message(chat=patch_chat, author=patch_chat_member, text='Message', deliver_to=master_channel)
msg.verify()
patch_chat.verify.assert_called_once()
patch_chat_member.verify.assert_called_once() |
def block_ranges(start_block: BlockNumber, last_block: Optional[BlockNumber], step: int=5) -> Iterable[Tuple[(BlockNumber, BlockNumber)]]:
if ((last_block is not None) and (start_block > last_block)):
raise TypeError('Incompatible start and stop arguments.', 'Start must be less than or equal to stop.')
return ((BlockNumber(from_block), BlockNumber((to_block - 1))) for (from_block, to_block) in segment_count(start_block, (last_block + 1), step)) |
def execute_trace_test(name):
for filename in ('mh_trace.json', 'mh_trace_by_tag.json', 'mh_act_trace.lobster', 'mh_imp_trace.lobster'):
if os.path.isfile(filename):
os.unlink(filename)
flags = []
if os.path.isfile('cmdline'):
with open('cmdline', 'r') as fd:
for raw_flag in fd.readlines():
flag = raw_flag.strip()
if flag:
flags.append(flag)
r = run_command('mh_trace', flags)
plain_out = r.stdout
with open('test.out', 'w') as fd:
fd.write(plain_out)
return ('Ran trace test %s' % name) |
class FlattenConcatBaseNet(nn.Module):
def __init__(self, obs_shapes: Dict[(str, Sequence[int])], hidden_units: List[int], non_lin: nn.Module):
super().__init__()
self.hidden_units = hidden_units
self.non_lin = non_lin
self.perception_dict: Dict[(str, PerceptionBlock)] = dict()
flat_keys = []
for (obs, shape) in obs_shapes.items():
out_key = f'{obs}_flat'
flat_keys.append(out_key)
self.perception_dict[out_key] = FlattenBlock(in_keys=obs, out_keys=out_key, in_shapes=shape, num_flatten_dims=len(shape))
in_shapes = [self.perception_dict[k].out_shapes()[0] for k in flat_keys]
self.perception_dict['concat'] = ConcatenationBlock(in_keys=flat_keys, out_keys='concat', in_shapes=in_shapes, concat_dim=(- 1))
self.perception_dict['latent'] = DenseBlock(in_keys='concat', out_keys='latent', in_shapes=self.perception_dict['concat'].out_shapes(), hidden_units=self.hidden_units, non_lin=self.non_lin)
module_init = make_module_init_normc(std=1.0)
for key in self.perception_dict.keys():
self.perception_dict[key].apply(module_init) |
class CmdTime(COMMAND_DEFAULT_CLASS):
key = ''
aliases = ''
locks = 'cmd:perm(time) or perm(Player)'
help_category = 'System'
def func(self):
table1 = self.styled_table('|wServer time', '', align='l', width=78)
table1.add_row('Current uptime', utils.time_format(gametime.uptime(), 3))
table1.add_row('Portal uptime', utils.time_format(gametime.portal_uptime(), 3))
table1.add_row('Total runtime', utils.time_format(gametime.runtime(), 2))
table1.add_row('First start', datetime.datetime.fromtimestamp(gametime.server_epoch()))
table1.add_row('Current time', datetime.datetime.now())
table1.reformat_column(0, width=30)
table2 = self.styled_table('|wIn-Game time', ('|wReal time x %g' % gametime.TIMEFACTOR), align='l', width=78, border_top=0)
epochtxt = ('Epoch (%s)' % ('from settings' if settings.TIME_GAME_EPOCH else 'server start'))
table2.add_row(epochtxt, datetime.datetime.fromtimestamp(gametime.game_epoch()))
table2.add_row('Total time passed:', utils.time_format(gametime.gametime(), 2))
table2.add_row('Current time ', datetime.datetime.fromtimestamp(gametime.gametime(absolute=True)))
table2.reformat_column(0, width=30)
self.msg(((str(table1) + '\n') + str(table2))) |
def _get_data_type(n_bytes_per_element, sign_flag):
if (n_bytes_per_element not in VALID_ELEMENT_SIZES):
raise NotImplementedError((("Found a 'Grid data element size' (a.k.a. 'ES') value " + f"of '{n_bytes_per_element}'. Only values equal to 1, 2, 4 and 8 are valid, ") + 'along with their compressed counterparts (1025, 1026, 1028, 1032).'))
if (n_bytes_per_element > 1024):
n_bytes_per_element -= 1024
if (n_bytes_per_element == 1):
if (sign_flag == 0):
data_type = 'B'
elif (sign_flag == 1):
data_type = 'b'
elif (n_bytes_per_element == 2):
if (sign_flag == 0):
data_type = 'H'
elif (sign_flag == 1):
data_type = 'h'
elif (n_bytes_per_element == 4):
if (sign_flag == 0):
data_type = 'I'
elif (sign_flag == 1):
data_type = 'i'
elif (sign_flag == 2):
data_type = 'f'
elif (n_bytes_per_element == 8):
data_type = 'd'
return data_type |
def get_header_line(filenames):
pipeline = '({read_files}) 2>/dev/null'.format(read_files=read_files(filenames, max_lines=1))
header_lines = subprocess.check_output(pipeline, shell=True).decode('utf8').splitlines()
header_line = header_lines[0]
for (n, filename) in enumerate(filenames):
other_line = header_lines[n]
if (other_line != header_line):
raise InvalidHeaderError('Input files do not have identical headers:\n\n{}: {}\n{}: {}'.format(filenames[0], header_line, filename, other_line))
return header_line |
class OptionSeriesPackedbubbleDataDatalabelsTextpath(Options):
def attributes(self):
return self._config_get(None)
def attributes(self, value: Any):
self._config(value, js_type=False)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class CollectorConfig(Config):
class Config():
underscore_attrs_are_private = True
id: str = ''
trigger: CollectorTrigger
report_config: ReportConfig
reference_path: Optional[str]
project_id: str
api_url: str = '
api_secret: Optional[str] = None
cache_reference: bool = True
_reference: Any = None
_workspace: Optional[RemoteWorkspace] = None
def workspace(self) -> RemoteWorkspace:
if (self._workspace is None):
self._workspace = RemoteWorkspace(base_url=self.api_url, secret=self.api_secret)
return self._workspace
def _read_reference(self):
return pd.read_parquet(self.reference_path)
def reference(self):
if (self.reference_path is None):
return None
if (self._reference is not None):
return self._reference
if (not self.cache_reference):
return self._read_reference()
self._reference = self._read_reference()
return self._reference |
class PhonyCursor():
def __init__(self, test_file=None):
if (not test_file):
test_file = os.path.join(os.path.dirname(__file__), 'tests/etl_test_data.json')
with open(test_file) as json_data:
self.db_responses = json.load(json_data)
self.results = None
def execute(self, statement, parameters=None):
if parameters:
statement %= tuple(parameters)
self.results = None
for key in self.db_responses.keys():
if (''.join(key.split()) == ''.join(statement.split())):
self.results = self.db_responses[key] |
def check_reqdir(reqdir: str, pfiles: 'JobsFS', cls=RequestDirError) -> requests.RequestID:
(requests_str, reqid_str) = os.path.split(reqdir)
if (requests_str != pfiles.requests.root):
raise cls(None, reqdir, 'invalid', 'target not in ~/BENCH/REQUESTS/')
reqid = requests.RequestID.parse(reqid_str)
if (not reqid):
raise cls(None, reqdir, 'invalid', f'{reqid_str!r} not a request ID')
if (not os.path.exists(reqdir)):
raise cls(reqid, reqdir, 'missing', 'target request dir missing')
if (not os.path.isdir(reqdir)):
raise cls(reqid, reqdir, 'malformed', 'target is not a directory')
return reqid |
def test_packages_installed(host):
installed = False
for name in ('kernel', 'kernel-common', 'kernel-devel', 'kernel-headers', 'linux-headers', 'linux-image'):
package = host.package(name)
if (not package.is_installed):
continue
version = '-'.join((ver for ver in (package.version, package.release) if ver))
assert version.startswith(host.ansible.get_variables().get('kernel_version', ''))
installed = True
if (not installed):
skip('No kernel package found') |
def ComputeReposWithChanges(repos_and_curr_branch, params):
commands = []
for (repo, _branch) in repos_and_curr_branch:
commands.append(ParallelCmd(repo, ([params.config.git] + ['status', '-s'])))
repos_with_changes = {}
def OnOutput(output):
if (not output.stdout):
repos_with_changes[output.repo] = False
else:
repos_with_changes[output.repo] = True
ExecuteInParallel(commands, on_output=OnOutput)
return repos_with_changes |
class DateDetectorTemplate(object):
__slots__ = ('template', 'hits', 'lastUsed', 'distance')
def __init__(self, template):
self.template = template
self.hits = 0
self.lastUsed = 0
self.distance =
def weight(self):
return ((self.hits * self.template.weight) / max(1, self.distance))
def __getattr__(self, name):
return getattr(self.template, name) |
_arguments
def listen_events(args):
class Processor(ListenerProcessor):
def __init__(self, queue):
self.queue = queue
def process(self, events: List[Event]):
for e in events:
self.queue.put(e)
client = EmbeddedNotificationClient(server_uri=args.server_uri, namespace=args.namespace)
offset = 0
if args.begin_offset:
offset = args.begin_offset
elif args.begin_time:
offset = client.time_to_offset(time_utils.timestamp_to_datetime(args.begin_time))
event_queue = Queue()
registration_id = client.register_listener(listener_processor=Processor(event_queue), event_keys=[args.key], offset=offset)
try:
while True:
print(event_queue.get())
except KeyboardInterrupt:
pass
finally:
client.unregister_listener(registration_id) |
class QR_Window(QWidget):
def __init__(self, win):
QWidget.__init__(self)
self.win = win
self.setWindowTitle(('ElectrumSV - ' + _('Payment Request')))
self.label = ''
self.amount = 0
self.setFocusPolicy(Qt.NoFocus)
layout = QGridLayout()
self.qrw = QRCodeWidget()
layout.addWidget(self.qrw, 0, 0, 1, 4, Qt.AlignHCenter)
self._address_label = QLabel((_('Destination') + ':'))
layout.addWidget(self._address_label, 1, 1, 1, 1, Qt.AlignRight)
self._address_edit = QPlainTextEdit()
self._address_edit.setReadOnly(True)
self._address_edit.setMinimumWidth(300)
layout.addWidget(self._address_edit, 1, 2, 1, 1, Qt.AlignLeft)
self._message_label = QLabel((_('Message') + ':'))
layout.addWidget(self._message_label, 2, 1, 1, 1, Qt.AlignRight)
self._message_edit = QPlainTextEdit()
self._message_edit.setReadOnly(True)
self._message_edit.setMinimumWidth(300)
layout.addWidget(self._message_edit, 2, 2, 1, 1, Qt.AlignLeft)
self._amount_label = QLabel((_('Amount') + ':'))
layout.addWidget(self._amount_label, 3, 1, 1, 1, Qt.AlignRight)
self._amount_edit = QLineEdit()
self._message_edit.setReadOnly(True)
layout.addWidget(self._amount_edit, 3, 2, 1, 1, Qt.AlignLeft)
self.setLayout(layout)
def set_content(self, address_text, amount, message, url):
self._address_edit.setPlainText(address_text)
if amount:
amount_text = '{} {}'.format(app_state.format_amount(amount), app_state.base_unit())
else:
amount_text = ''
self._amount_edit.setText(amount_text)
self._message_edit.setPlainText(message)
self.qrw.setData(url) |
def setup_test_data(db):
baker.make('submissions.DABSSubmissionWindowSchedule', submission_fiscal_year=2020, submission_fiscal_month=8, is_quarter=False, submission_reveal_date='2020-06-01', period_start_date='2020-04-01')
baker.make('reporting.ReportingAgencyOverview', toptier_code='043', fiscal_year=2020, fiscal_period=8, unlinked_assistance_c_awards=12, unlinked_assistance_d_awards=24, unlinked_procurement_c_awards=14, unlinked_procurement_d_awards=28, linked_assistance_awards=6, linked_procurement_awards=7) |
def _validate_submission_type(filters: dict) -> None:
legacy_submission_type = filters.get('submission_type', ...)
submission_types = filters.get('submission_types', ...)
if ((submission_types == ...) and (legacy_submission_type == ...)):
raise InvalidParameterException('Missing required filter: submission_types')
elif ((submission_types == ...) and (legacy_submission_type != ...)):
del filters['submission_type']
if isinstance(legacy_submission_type, list):
raise InvalidParameterException('Use filter `submission_types` to request multiple submission types')
else:
submission_types = [legacy_submission_type]
elif (not isinstance(submission_types, list)):
submission_types = [submission_types]
if (len(submission_types) == 0):
msg = f"Provide at least one value in submission_types: {' '.join(VALID_ACCOUNT_SUBMISSION_TYPES)}"
raise InvalidParameterException(msg)
if any((True for submission_type in submission_types if (submission_type not in VALID_ACCOUNT_SUBMISSION_TYPES))):
msg = f"Invalid value in submission_types. Options: [{', '.join(VALID_ACCOUNT_SUBMISSION_TYPES)}]"
raise InvalidParameterException(msg)
filters['submission_types'] = list(set(submission_types)) |
def remove_redundant_and_unpin_blocks(asmcfg, head, mode, unpin=True):
reachable_loc_keys = list(asmcfg.reachable_sons(head))
blocks_to_be_removed = []
rip = ExprId('RIP', 64)
new_next_addr_card = ExprLoc(asmcfg.loc_db.get_or_create_name_location('_'), 64)
for block in asmcfg.blocks:
if (block.loc_key not in reachable_loc_keys):
blocks_to_be_removed.append(block)
elif unpin:
for instr in block.lines:
for ind in range(len(instr.args)):
if (rip in instr.args[ind]):
next_addr = ExprInt((instr.offset + instr.l), 64)
fix_dict = {rip: ((rip + next_addr) - new_next_addr_card)}
instr.args[ind] = instr.args[ind].replace_expr(fix_dict)
if (not block.lines):
block.lines = [create_nop(mode)]
if (unpin and asmcfg.loc_db.get_location_offset(block.loc_key)):
asmcfg.loc_db.unset_location_offset(block.loc_key)
for block in blocks_to_be_removed:
asmcfg.del_block(block) |
def savgol(x, total_width=None, weights=None, window_width=7, order=3, n_iter=1):
if (len(x) < 2):
return x
if (total_width is None):
total_width = (n_iter * window_width)
if (weights is None):
(x, total_wing, signal) = check_inputs(x, total_width, False)
else:
(x, total_wing, signal, weights) = check_inputs(x, total_width, False, weights)
total_width = ((2 * total_wing) + 1)
window_width = min(window_width, total_width)
order = min(order, (window_width // 2))
n_iter = max(1, min(1000, (total_width // window_width)))
logging.debug('Smoothing in %s iterations with window width %s and order %s for effective bandwidth %s', n_iter, window_width, order, total_width)
if (weights is None):
y = signal
for _i in range(n_iter):
y = savgol_filter(y, window_width, order, mode='interp')
else:
window = savgol_coeffs(window_width, order)
(y, _w) = convolve_weighted(window, signal, weights, n_iter)
bad_idx = ((y > x.max()) | (y < x.min()))
if bad_idx.any():
logging.warning('Smoothing overshot at %s / %s indices: (%s, %s) vs. original (%s, %s)', bad_idx.sum(), len(bad_idx), y.min(), y.max(), x.min(), x.max())
return y[total_wing:(- total_wing)] |
class OptionSeriesGaugeSonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def parse_cnc_request_config(f, data):
data['msg_type_decoded'] = 'REQUEST_CONFIG'
data['loop_count'] = unpack('I', f.read(4))[0]
data['nb_localscans'] = unpack('I', f.read(4))[0]
data['nb_extscans'] = unpack('I', f.read(4))[0]
data['nb_ifscans'] = unpack('I', f.read(4))[0]
data['nb_killed'] = unpack('I', f.read(4))[0]
off_1C = unpack('I', f.read(4))[0]
data['flag_BRUTEFORCE_LIST'] = bool((off_1C & 1))
data['flag_WRITE_ACCESS'] = bool((off_1C & 2))
data['flag_TIME_PROBLEM'] = bool((off_1C & 128))
return data |
def _main():
env = jinja2.Environment(loader=jinja2.FileSystemLoader('.'))
env.filters['event_link'] = _filter_event_link
env.filters['member_heading'] = _filter_member_heading
env.filters['yes_or_no'] = _filter_yes_or_no
templ = env.get_template('event_docs.md.j2')
for filename in sys.argv[1:]:
print(filename)
input_path = Path(filename)
schema = definition_loader.load(input_path)
output_path = (_OUTPUT_ROOT_PATH / input_path.parent.name).with_suffix('.md')
context = {'type': input_path.parent.name, 'version': input_path.stem, 'description': schema.get('_description', ''), 'abbrev': schema.get('_abbrev', ''), 'links': schema.get('_links', {}), 'data_members': _get_members('data.', schema['properties']['data'], _SKIP_DOC_FIELDS), 'meta_members': _get_members('meta.', schema['properties']['meta'], _SKIP_DOC_FIELDS), 'examples': schema.get('_examples'), 'history': schema.get('_history'), 'source_file': os.path.relpath(input_path, output_path.parent)}
with output_path.open(mode='w') as output_file:
output_file.write(templ.render(**context)) |
def admin_login(driver):
try:
driver.get(CHALL_URL)
(username, password) = ('admin', 'gtrunkgeljndrthuiujgejleuudbcklbeuvbktkgfgcftvkfhkdugrcfheegjjtb')
username_input = driver.find_element_by_id('username')
password_input = driver.find_element_by_id('password')
username_input.send_keys(username)
password_input.send_keys(password)
submit = driver.find_element_by_id('submit_button')
submit.click()
driver.get(CHALL_URL2)
username_input = driver.find_element_by_id('username')
password_input = driver.find_element_by_id('password')
username_input.send_keys(username)
password_input.send_keys(password)
submit = driver.find_element_by_id('submit_button')
driver.get(CHALL_URL2)
except Exception as e:
print('exception inside admin_login')
print(e)
return |
def edit(filename=None, contents=None, use_tty=None, suffix=''):
editor = get_editor()
args = ([editor] + get_editor_args(os.path.basename(os.path.realpath(editor))))
if (use_tty is None):
use_tty = (sys.stdin.isatty() and (not sys.stdout.isatty()))
if (filename is None):
tmp = tempfile.NamedTemporaryFile(suffix=suffix)
filename = tmp.name
if (contents is not None):
if hasattr(contents, 'encode'):
contents = contents.encode()
with open(filename, mode='wb') as f:
f.write(contents)
args += [filename]
stdout = None
if use_tty:
stdout = open(get_tty_filename(), 'wb')
proc = subprocess.Popen(args, close_fds=True, stdout=stdout)
proc.communicate()
with open(filename, mode='rb') as f:
return f.read() |
def test_split_color_glyphs_by_version():
layerBuilder = LayerListBuilder()
colorGlyphs = {'a': [('b', 0), ('c', 1), ('d', 2), ('e', 3)]}
(colorGlyphsV0, colorGlyphsV1) = builder._split_color_glyphs_by_version(colorGlyphs)
assert (colorGlyphsV0 == {'a': [('b', 0), ('c', 1), ('d', 2), ('e', 3)]})
assert (not colorGlyphsV1)
colorGlyphs = {'a': (ot.PaintFormat.PaintGlyph, 0, 'b')}
(colorGlyphsV0, colorGlyphsV1) = builder._split_color_glyphs_by_version(colorGlyphs)
assert (not colorGlyphsV0)
assert (colorGlyphsV1 == colorGlyphs)
colorGlyphs = {'a': [('b', 0)], 'c': [('d', 1), ('e', {'format': 3, 'colorLine': {'stops': [(0.0, 2), (1.0, 3)]}, 'p0': (0, 0), 'p1': (10, 10)})]}
(colorGlyphsV0, colorGlyphsV1) = builder._split_color_glyphs_by_version(colorGlyphs)
assert (colorGlyphsV0 == {'a': [('b', 0)]})
assert ('a' not in colorGlyphsV1)
assert ('c' in colorGlyphsV1)
assert (len(colorGlyphsV1['c']) == 2) |
def send_image(self, fileDir=None, toUserName=None, mediaId=None, file_=None):
logger.debug(('Request to send a image(mediaId: %s) to %s: %s' % (mediaId, toUserName, fileDir)))
if (fileDir or file_):
if hasattr(fileDir, 'read'):
(file_, fileDir) = (fileDir, None)
if (fileDir is None):
fileDir = 'tmp.jpg'
else:
return ReturnValue({'BaseResponse': {'ErrMsg': 'Either fileDir or file_ should be specific', 'Ret': (- 1005)}})
if (toUserName is None):
toUserName = self.storageClass.userName
if (mediaId is None):
r = self.upload_file(fileDir, isPicture=(not (fileDir[(- 4):] == '.gif')), file_=file_)
if r:
mediaId = r['MediaId']
else:
return r
url = ('%s/webwxsendmsgimg?fun=async&f=json' % self.loginInfo['url'])
data = {'BaseRequest': self.loginInfo['BaseRequest'], 'Msg': {'Type': 3, 'MediaId': mediaId, 'FromUserName': self.storageClass.userName, 'ToUserName': toUserName, 'LocalID': int((time.time() * 10000.0)), 'ClientMsgId': int((time.time() * 10000.0))}, 'Scene': 0}
if (fileDir[(- 4):] == '.gif'):
url = ('%s/webwxsendemoticon?fun=sys' % self.loginInfo['url'])
data['Msg']['Type'] = 47
data['Msg']['EmojiFlag'] = 2
headers = {'User-Agent': self.user_agent, 'Content-Type': 'application/json;charset=UTF-8'}
r = self.s.post(url, headers=headers, data=json.dumps(data, ensure_ascii=False).encode('utf8'))
return ReturnValue(rawResponse=r) |
.usefixtures('use_tmpdir')
def test_validate_no_logs_when_overwriting_with_same_value(caplog):
with open('job_file', 'w', encoding='utf-8') as fout:
fout.write('EXECUTABLE echo\nARGLIST <VAR1> <VAR2> <VAR3>\n')
with open('config_file.ert', 'w', encoding='utf-8') as fout:
fout.write('NUM_REALIZATIONS 1\n')
fout.write('DEFINE <VAR1> 10\n')
fout.write('DEFINE <VAR2> 20\n')
fout.write('DEFINE <VAR3> 55\n')
fout.write('INSTALL_JOB job_name job_file\n')
fout.write('FORWARD_MODEL job_name(<VAR1>=10, <VAR2>=<VAR2>, <VAR3>=5)\n')
with caplog.at_level(logging.INFO):
ert_conf = ErtConfig.from_file('config_file.ert')
ert_conf.forward_model_data_to_json('0', '0', 0)
assert (("Private arg '<VAR3>':'5' chosen over global '55' in forward model job_name" in caplog.text) and ("Private arg '<VAR1>':'10' chosen over global '10' in forward model job_name" not in caplog.text) and ("Private arg '<VAR2>':'20' chosen over global '20' in forward model job_name" not in caplog.text)) |
class OptionPlotoptionsDependencywheelSonificationTracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsSankeySonificationContexttracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def _get_required_dbt_package_version() -> Optional[str]:
packages_file_path = os.path.join(PATH, _PACKAGES_FILENAME)
packages_yaml = OrderedYaml().load(packages_file_path)
for requirement in packages_yaml.get('packages', []):
package_id = requirement.get('package')
if (not package_id):
continue
package_name = package_id.split('/')[(- 1)]
if (package_name == _DBT_PACKAGE_NAME):
return requirement['version']
return None |
class BaseEventGeneratorNode(Node, metaclass=BaseEventGeneratorNodeMeta):
def __init__(self) -> None:
super(BaseEventGeneratorNode, self).__init__()
self._start_time: float = time()
def _time_elapsed_since_start(self) -> float:
return (time() - self._start_time)
def setup_generator(self, generator: BaseEventGenerator) -> None:
self._generator = generator
def generate_events(self) -> EventPublishingHeap:
return self._generator.generate_events()
async def publish_events(self) -> AsyncPublisher:
raise NotImplementedError() |
class OptionTime(Options):
def Date(self):
return self._config_get('undefined')
def Date(self, value: Any):
self._config(value, js_type=False)
def getTimezoneOffset(self):
return self._config_get('undefined')
def getTimezoneOffset(self, value: Any):
self._config(value, js_type=False)
def moment(self):
return self._config_get(None)
def moment(self, value: Any):
self._config(value, js_type=False)
def timezone(self):
return self._config_get('undefined')
def timezone(self, text: str):
self._config(text, js_type=False)
def timezoneOffset(self):
return self._config_get(0)
def timezoneOffset(self, num: float):
self._config(num, js_type=False)
def useUTC(self):
return self._config_get(True)
def useUTC(self, flag: bool):
self._config(flag, js_type=False) |
def test_to_security_item():
item = technical.to_security_item('stock_sz_000338')
assert (item.id == 'stock_sz_000338')
assert (item.code == '000338')
item = technical.to_security_item('000338')
assert (item.id == 'stock_sz_000338')
assert (item.code == '000338')
item = technical.to_security_item('stock_nasdaq_MSFT')
assert (item.id == 'stock_nasdaq_MSFT')
assert (item.code == 'MSFT')
item = technical.to_security_item('MSFT')
assert (item.id == 'stock_nasdaq_MSFT')
assert (item.code == 'MSFT')
item = technical.to_security_item('future_shfe_ag1301')
assert (item.id == 'future_shfe_ag1301')
assert (item.code == 'ag1301')
item = technical.to_security_item('ag1301')
assert (item.id == 'future_shfe_ag1301')
assert (item.code == 'ag1301')
item = technical.to_security_item('future_shfe_ag1301')
assert (item.id == 'future_shfe_ag1301')
assert (item.code == 'ag1301')
item = technical.to_security_item('ag1301')
assert (item.id == 'future_shfe_ag1301')
assert (item.code == 'ag1301')
item = technical.to_security_item('BTC-USD', exchange='gdax')
assert (item.id == 'cryptocurrency_gdax_BTC-USD')
assert (item.code == 'BTC-USD') |
def main():
a = ArgumentParser()
a.add_argument('-f', '--fsa', metavar='FSAFILE', required=True, help="HFST's optimised lookup binary data for the transducer to be applied")
a.add_argument('-i', '--input', metavar='INFILE', type=open, required=True, dest='infile', help='source of analysis data')
a.add_argument('-o', '--output', metavar='OUTFILE', required=True, type=FileType('w'), dest='outfile', help='result file')
a.add_argument('-X', '--statistics', metavar='STATFILE', type=FileType('w'), dest='statfile', help='statistics')
a.add_argument('-v', '--verbose', action='store_true', default=False, help='Print verbosely while processing')
a.add_argument('-C', '--no-casing', action='store_true', default=False, help='Do not try to recase input and output when matching')
a.add_argument('-a', '--additional-mapping', default='', metavar='MAP', help='Also try using MAP to match analyses and lemmas', choices=['ftb3.1', ''])
a.add_argument('-c', '--count', metavar='FREQ', default=0, help='test only word-forms with frequency higher than FREQ')
options = a.parse_args()
his = libhfst.HfstInputStream(options.fsa)
omorfi = his.read()
if (not options.statfile):
options.statfile = stdout
full_matches = 0
lemma_matches = 0
anal_matches = 0
no_matches = 0
no_results = 0
lines = 0
deduct_forgn = 0
deduct_advposman = 0
deduct_oliprt = 0
deduct_abbr_prop = 0
deduct_unkwn = 0
deduct_lemma = 0
deduct_matches = 0
deduct_results = 0
threshold = 90
realstart = perf_counter()
cpustart = process_time()
for line in options.infile:
fields = line.strip().replace(' ', '\t', 1).split('\t')
if (len(fields) < 4):
print('ERROR: Skipping line', fields, file=stderr)
continue
freq = int(fields[0])
if (freq < int(options.count)):
break
surf = fields[1]
lemma = fields[2]
analysis = fields[3]
lines += freq
if options.verbose:
print(lines, '(', freq, ') ...', end='\r')
anals = omorfi.lookup(surf)
if (not options.no_casing):
if surf[0].isupper():
anals += omorfi.lookup((surf[0].lower() + surf[1:]))
if surf.isupper():
anals += omorfi.lookup(surf.lower())
if surf.isupper():
anals += omorfi.lookup((surf[0] + surf[1:].lower()))
found_anals = False
found_lemma = False
print_in = True
for anal in anals:
if (analysis in anal[0]):
found_anals = True
if (lemma in anal[0]):
found_lemma = True
if (not options.no_casing):
if (lemma.lower() in anal[0]):
found_lemma = True
elif (lemma.upper() in anal[0]):
found_lemma = True
if (len(anals) == 0):
print_in = False
no_results += freq
if (options.additional_mapping == 'ftb3.1'):
if ('Forgn' in analysis):
deduct_forgn += freq
deduct_results += freq
print_in = False
elif ('Unkwn' in analysis):
deduct_unkwn += freq
deduct_results += freq
print_in = False
else:
print('NORESULTS:', freq, surf, lemma, anals, sep='\t', file=options.outfile)
if options.verbose:
print('?', end='', file=stderr)
else:
print('NORESULTS:', freq, surf, lemma, anals, sep='\t', file=options.outfile)
if options.verbose:
print('?', end='', file=stderr)
elif ((not found_anals) and (not found_lemma)):
no_matches += freq
if (options.additional_mapping == 'ftb3.1'):
if ('Adv Pos Man' in analysis):
deduct_advposman += freq
deduct_matches += freq
print_in = False
elif ('Unkwn' in analysis):
deduct_unkwn += 1
deduct_matches += 1
print_in = False
else:
print('NOMATCH:', freq, surf, ((lemma + ' ') + analysis), sep='\t', end='\t', file=options.outfile)
if options.verbose:
print('!', end='', file=stderr)
else:
print('NOMATCH:', freq, surf, ((lemma + ' ') + analysis), sep='\t', end='\t', file=options.outfile)
if options.verbose:
print('!', end='', file=stderr)
elif (not found_anals):
lemma_matches += freq
if (options.additional_mapping == 'ftb3.1'):
if ('Adv Pos Man' in analysis):
deduct_advposman += freq
deduct_lemma += freq
print_in = False
elif (('V Prt Act' in analysis) and surf.startswith('oli')):
deduct_oliprt += freq
deduct_lemma += freq
print_in = False
elif ('Forgn' in analysis):
deduct_forgn += freq
deduct_lemma += freq
print_in = False
elif ('Abbr' in analysis):
propfail = False
for anal in anals:
if ('Abbr Prop' in anal[0]):
propfail = True
if propfail:
deduct_abbr_prop += freq
deduct_lemma += freq
print_in = False
else:
print('NOANALMATCH:', freq, surf, analysis, sep='\t', end='\t', file=options.outfile)
if options.verbose:
print('', end='', file=stderr)
elif ('Unkwn' in analysis):
deduct_unkwn += freq
deduct_lemma += freq
print_in = False
else:
if options.verbose:
print('', end='', file=stderr)
print('NOANALMATCH:', freq, surf, analysis, sep='\t', end='\t', file=options.outfile)
else:
if options.verbose:
print('', end='', file=stderr)
print('NOANALMATCH:', freq, surf, analysis, sep='\t', end='\t', file=options.outfile)
elif (not found_lemma):
anal_matches += freq
print('NOLEMMAMATCH:', freq, surf, lemma, sep='\t', end='\t', file=options.outfile)
if options.verbose:
print('#', end='', file=stderr)
else:
if options.verbose:
print('.', end='', file=stderr)
full_matches += freq
print_in = False
if print_in:
print(':IN:', end='\t', file=options.outfile)
for anal in anals:
print(anal[0], end='\t', file=options.outfile)
print(file=options.outfile)
realend = perf_counter()
cpuend = process_time()
print('CPU time:', (cpuend - cpustart), 'real time:', (realend - realstart))
print('Lines', 'Matches', 'Lemma', 'Anals', 'Mismatch', 'No results', sep='\t', file=options.statfile)
print(lines, full_matches, lemma_matches, anal_matches, no_matches, no_results, sep='\t', file=options.statfile)
print((((lines / lines) * 100) if (lines != 0) else 0), (((full_matches / lines) * 100) if (lines != 0) else 0), (((lemma_matches / lines) * 100) if (lines != 0) else 0), (((anal_matches / lines) * 100) if (lines != 0) else 0), (((no_matches / lines) * 100) if (lines != 0) else 0), (((no_results / lines) * 100) if (lines != 0) else 0), sep='\t', file=options.statfile)
if (options.additional_mapping == 'ftb3.1'):
print('Deducting known bugs...\n', 'Forgn:', deduct_forgn, '\nAdv Pos Man:', deduct_advposman, '\noli V Prt Act:', deduct_oliprt, '\nAbbr Prop:', deduct_abbr_prop, '\nUnkwn:', deduct_unkwn, file=options.statfile)
lines = (((((lines - deduct_forgn) - deduct_advposman) - deduct_oliprt) - deduct_abbr_prop) - deduct_unkwn)
no_results -= deduct_results
no_matches -= deduct_matches
lemma_matches -= deduct_lemma
if (options.additional_mapping != ''):
print(lines, full_matches, lemma_matches, anal_matches, no_matches, no_results, sep='\t', file=options.statfile)
print((((lines / lines) * 100) if (lines != 0) else 0), (((full_matches / lines) * 100) if (lines != 0) else 0), (((lemma_matches / lines) * 100) if (lines != 0) else 0), (((anal_matches / lines) * 100) if (lines != 0) else 0), (((no_matches / lines) * 100) if (lines != 0) else 0), (((no_results / lines) * 100) if (lines != 0) else 0), sep='\t', file=options.statfile)
if ((lines == 0) or (((full_matches / lines) * 100) < threshold)):
print('needs to have', threshold, '% matches to pass regress test\n', 'please examine', options.outfile.name, 'for regressions', file=stderr)
exit(1)
else:
exit(0) |
def extract_fields(aggregate: Dict, collections: List[Collection]) -> None:
for collection in collections:
field_dict = aggregate[collection.name]
for field in collection.fields:
if field_dict.get(field.name):
field_dict[field.name] = merge_fields(field_dict[field.name], field)
else:
field_dict[field.name] = field |
def extractUrbanlegendsclubOrg(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def addr_infos():
return [hr.AddrInfo(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP, sockaddr=hr.IPv4Sockaddr(address='10.0.0.42', port=6052)), hr.AddrInfo(family=socket.AF_INET6, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP, sockaddr=hr.IPv6Sockaddr(address='2001:db8:85a3::8a2e:370:7334', port=6052, flowinfo=0, scope_id=0))] |
.parametrize('val, meant_to_be, encode_fn, decode_fn', ((ADDRESS_TALLY_PAIRS[0], tuple, encode_address_tally_pair, decode_address_tally_pair), (DUMMY_VOTE_1, Vote, encode_vote, decode_vote), (DUMMY_VOTE_2, Vote, encode_vote, decode_vote), (SNAPSHOT_1, Snapshot, encode_snapshot, decode_snapshot), (TRUMP_TALLY, Tally, encode_tally, decode_tally), (YANG_TALLY, Tally, encode_tally, decode_tally)))
def test_encoding_decoding(val, meant_to_be, encode_fn, decode_fn):
assert (type(val) is meant_to_be)
binary = encode_fn(val)
assert (type(binary) is bytes)
revived = decode_fn(binary)
assert (revived == val) |
class CargoTomlParser(FileParser):
def parse(self, content: str) -> List[str]:
try:
data = toml.loads(content)
dependencies = []
if ('dependencies' in data):
dependencies.extend(data.get('dependencies', {}).keys())
if ('dev-dependencies' in data):
dependencies.extend(data.get('dev-dependencies', {}).keys())
return dependencies
except toml.TomlDecodeError as error:
self.log_error(TOM_DECODE_ERROR.format(error))
return dependencies |
def _fuse_group_ops_by_type(sorted_graph: List[Tensor], op_type: str, workdir: str=None) -> List[Tensor]:
if (not _group_ops_by_type(sorted_graph, op_type, workdir)):
return sorted_graph
sorted_graph = toposort(sorted_graph)
sorted_graph = transform_utils.sanitize_sorted_graph(sorted_graph)
return sorted_graph |
def analyze(mission):
try:
Analyzer(mission).analyze()
except WorkerExit:
raise
except Exception as err:
traceback.print_exc()
download_ch.pub('ANALYZE_FAILED', (err, mission))
except PauseDownloadError as err:
download_ch.pub('ANALYZE_INVALID', (err, mission))
else:
download_ch.pub('ANALYZE_FINISHED', mission) |
def _access_list_rpc_to_rlp_structure(access_list: Sequence) -> Sequence:
if (not is_rpc_structured_access_list(access_list)):
raise ValueError('provided object not formatted as JSON-RPC-structured access list')
rlp_structured_access_list = []
for d in access_list:
rlp_structured_access_list.append((d['address'], tuple((_ for _ in d['storageKeys']))))
return tuple(rlp_structured_access_list) |
class OptionPlotoptionsColumnSonificationContexttracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = '\nvlans:\n 100:\n description: "mixed"\n'
CONFIG = '\n interfaces:\n %(port_1)d:\n tagged_vlans: [100]\n %(port_2)d:\n native_vlan: 100\n %(port_3)d:\n native_vlan: 100\n %(port_4)d:\n native_vlan: 100\n'
def setUp(self):
super().setUp()
self.topo = self.topo_class(self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid], n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self() |
class FLCSVDataset(FLDataset):
def __init__(self, path: str):
self.data_frame = pd.read_csv(path)
def __len__(self):
return len(self.data_frame)
def __getitem__(self, idx):
raw_row = self.data_frame.iloc[idx]
return self._get_processed_row_from_single_raw_row(raw_row)
def _get_processed_row_from_single_raw_row(self, raw_row: Any) -> Dict[(str, Any)]:
pass |
class RegressionErrorNormality(Metric[RegressionErrorNormalityResults]):
def __init__(self, options: AnyOptions=None):
super().__init__(options=options)
def calculate(self, data: InputData) -> RegressionErrorNormalityResults:
dataset_columns = process_columns(data.current_data, data.column_mapping)
target_name = dataset_columns.utility_columns.target
prediction_name = dataset_columns.utility_columns.prediction
curr_df = data.current_data
ref_df = data.reference_data
if ((target_name is None) or (prediction_name is None)):
raise ValueError("The columns 'target' and 'prediction' columns should be present")
if (not isinstance(prediction_name, str)):
raise ValueError('Expect one column for prediction. List of columns was provided.')
agg_data = True
if self.get_options().render_options.raw_data:
agg_data = False
curr_df = self._make_df_for_plot(curr_df, target_name, prediction_name, None)
current_error = (curr_df[prediction_name] - curr_df[target_name])
curr_qq_lines = probplot(current_error, dist='norm', plot=None)
current_theoretical = self._get_theoretical_line(curr_qq_lines)
current_plot_data = self._get_plot_data(curr_qq_lines, current_error, agg_data)
reference_theoretical = None
reference_plot_data = None
if (ref_df is not None):
ref_df = self._make_df_for_plot(ref_df, target_name, prediction_name, None)
reference_error = (ref_df[prediction_name] - ref_df[target_name])
ref_qq_lines = probplot(reference_error, dist='norm', plot=None)
reference_theoretical = self._get_theoretical_line(ref_qq_lines)
reference_plot_data = self._get_plot_data(ref_qq_lines, reference_error, agg_data)
return RegressionErrorNormalityResults(current_plot=current_plot_data, current_theoretical=current_theoretical, reference_plot=reference_plot_data, reference_theoretical=reference_theoretical)
def _make_df_for_plot(self, df, target_name: str, prediction_name: str, datetime_column_name: Optional[str]):
result = df.replace([np.inf, (- np.inf)], np.nan)
if (datetime_column_name is not None):
result.dropna(axis=0, how='any', inplace=True, subset=[target_name, prediction_name, datetime_column_name])
return result.sort_values(datetime_column_name)
result.dropna(axis=0, how='any', inplace=True, subset=[target_name, prediction_name])
return result.sort_index()
def _get_theoretical_line(self, res: Any):
x = [res[0][0][0], res[0][0][(- 1)]]
y = [((res[1][0] * res[0][0][0]) + res[1][1]), ((res[1][0] * res[0][0][(- 1)]) + res[1][1])]
return pd.DataFrame({'x': x, 'y': y})
def _get_plot_data(self, res: Any, err_data: pd.Series, agg_data: bool):
df = pd.DataFrame({'x': res[0][0], 'y': res[0][1]})
if (not agg_data):
return df
df['bin'] = pd.cut(err_data.sort_values().values, bins=10, labels=False, retbins=False)
return df.groupby('bin', group_keys=False).apply((lambda x: x.sample(n=min(100, x.shape[0]), random_state=0))).drop('bin', axis=1) |
class System_base(_System_base):
def __init__(self, **args):
super(System_base, self).__init__(**args)
for k in (set(system_default_keys) - set(args.keys())):
v = default_so.__dict__[k]
if (not inspect.isclass(v)):
try:
self.__dict__[k] = copy.deepcopy(default_so.__dict__[k])
except:
pass |
def test_align_concatenate_to_phylip(o_dir, e_dir, request):
program = 'bin/align/phyluce_align_concatenate_alignments'
output = os.path.join(o_dir, 'mafft-gblocks-clean-concat-phylip')
cmd = [os.path.join(request.config.rootdir, program), '--alignments', os.path.join(e_dir, 'mafft-gblocks-clean'), '--output', output, '--phylip']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
assert (proc.returncode == 0), print('{}'.format(stderr.decode('utf-8')))
output_files = glob.glob(os.path.join(output, '*'))
assert output_files, 'There are no output files'
for output_file in output_files:
name = os.path.basename(output_file)
expected_file = os.path.join(e_dir, 'mafft-gblocks-clean-concat-phylip', name)
observed = open(output_file).read()
expected = open(expected_file).read()
assert (observed == expected) |
class TestLayoutTokensText():
def test_should_select_tokens_based_on_index(self):
token_1 = LayoutToken(text='token1', whitespace=' ')
token_2 = LayoutToken(text='token2', whitespace=' ')
layout_tokens_text = LayoutTokensText(LayoutBlock.for_tokens([token_1, token_2]))
assert (str(layout_tokens_text) == 'token1 token2')
assert (layout_tokens_text.get_layout_tokens_between(0, 1) == [token_1])
assert (layout_tokens_text.get_layout_tokens_between((len(token_1.text) - 1), len(token_1.text)) == [token_1])
assert (not layout_tokens_text.get_layout_tokens_between(len(token_1.text), (len(token_1.text) + 1)))
assert (layout_tokens_text.get_layout_tokens_between((len(token_1.text) + 1), (len(token_1.text) + 2)) == [token_2])
assert (layout_tokens_text.get_layout_tokens_between((((len(token_1.text) + 1) + len(token_2.text)) - 1), ((len(token_1.text) + 1) + len(token_2.text))) == [token_2])
assert (not layout_tokens_text.get_layout_tokens_between(((len(token_1.text) + 1) + len(token_2.text)), (((len(token_1.text) + 1) + len(token_2.text)) + 1))) |
def extractTranslatingSloth(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
tagmap = [('', "Wife, I Am the Baby's Father", 'translated'), ("Wife, I Am the Baby's Father", "Wife, I Am the Baby's Father", 'translated'), ('I want to eat meat Wife', 'I want to eat meat Wife', 'translated'), ('My Lord is a Stone', 'My Lord is a Stone', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionPlotoptionsWaterfallLabel(Options):
def boxesToAvoid(self):
return self._config_get(None)
def boxesToAvoid(self, value: Any):
self._config(value, js_type=False)
def connectorAllowed(self):
return self._config_get(False)
def connectorAllowed(self, flag: bool):
self._config(flag, js_type=False)
def connectorNeighbourDistance(self):
return self._config_get(24)
def connectorNeighbourDistance(self, num: float):
self._config(num, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def format(self):
return self._config_get('undefined')
def format(self, text: str):
self._config(text, js_type=False)
def formatter(self):
return self._config_get('undefined')
def formatter(self, value: Any):
self._config(value, js_type=False)
def maxFontSize(self):
return self._config_get(None)
def maxFontSize(self, num: float):
self._config(num, js_type=False)
def minFontSize(self):
return self._config_get(None)
def minFontSize(self, num: float):
self._config(num, js_type=False)
def onArea(self):
return self._config_get(None)
def onArea(self, flag: bool):
self._config(flag, js_type=False)
def style(self) -> 'OptionPlotoptionsWaterfallLabelStyle':
return self._config_sub_data('style', OptionPlotoptionsWaterfallLabelStyle)
def useHTML(self):
return self._config_get(False)
def useHTML(self, flag: bool):
self._config(flag, js_type=False) |
class FrontendUtilsTest(TestCase):
def test_readable_size_correct_no_snapshot(self):
scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB']
original_val = 10
for i in range(6):
val = (original_val * (1024 ** i))
string_val = frontend_utils.readable_size(val)
correct_output = f'10.00{scales[i]:>5}'
self.assertEqual(string_val, correct_output)
def test_readable_size_correct_with_snapshot(self):
scales = ['B', 'KB', 'MB', 'GB', 'TB', 'EB']
original_val = 10
for i in range(6):
val = (original_val * (1024 ** i))
string_val = frontend_utils.readable_size(val, True)
correct_output = f'+10.00{scales[i]:>5}'
self.assertEqual(string_val, correct_output)
def test_readable_size_neg_input(self):
original_val = (- 10)
string_val = frontend_utils.readable_size(original_val)
correct_output = '-10.00 B'
self.assertEqual(string_val, correct_output)
def test_readable_size_invalid_input(self):
original_val = '10'
with self.assertRaises(TypeError):
frontend_utils.readable_size(original_val)
def test_init_table_field_names(self):
pt = frontend_utils.init_table(references=False, snapshot=False)
self.assertEqual(pt.field_names, ['Object', 'Count', 'Size'])
pt = frontend_utils.init_table(references=False, snapshot=True)
self.assertEqual(pt.field_names, ['Object', 'Count Diff', 'Size Diff'])
pt = frontend_utils.init_table(references=True, snapshot=False)
self.assertEqual(pt.field_names, ['Object', 'Count', 'Size', 'References', 'Backwards References'])
pt = frontend_utils.init_table(references=True, snapshot=True)
self.assertEqual(pt.field_names, ['Object', 'Count Diff', 'Size Diff', 'References', 'Backwards References'])
def test_init_table_alignment(self):
pt = frontend_utils.init_table(references=False, snapshot=False)
self.assertEqual(pt._align, {'Object': 'l', 'Count': 'r', 'Size': 'r'})
pt = frontend_utils.init_table(references=True, snapshot=False)
self.assertEqual(pt._align, {'Object': 'l', 'Count': 'r', 'Size': 'r', 'References': 'c', 'Backwards References': 'c'})
pt = frontend_utils.init_table(references=True, snapshot=True)
self.assertEqual(pt._align, {'Object': 'l', 'Count Diff': 'r', 'Size Diff': 'r', 'References': 'c', 'Backwards References': 'c'})
pt = frontend_utils.init_table(references=False, snapshot=True)
self.assertEqual(pt._align, {'Object': 'l', 'Count Diff': 'r', 'Size Diff': 'r'})
def test_format_output_default(self):
items = analysis_utils.RetrievedObjects(pid=1234, title='Analysis of pid 1234', data=[['Item 1', 10, 1024], ['Item 2', 1000, 1048576]])
correct_items = [['Item 2', 1000, '1024.00 KB'], ['Item 1', 10, '1024.00 B']]
pt = frontend_utils.format_summary_output(items)
self.assertEqual(pt._rows, correct_items)
def test_format_output_no_data(self):
items = analysis_utils.RetrievedObjects(pid=1234, title='Analysis of pid 1234', data=None)
correct_items = [[f'No data to display for pid 1234.', 0, '0.00 B']]
pt = frontend_utils.format_summary_output(items)
self.assertEqual(pt._rows, correct_items)
def test_format_output_snapshot(self):
items = analysis_utils.RetrievedObjects(pid=1234, title='Snapshot Differences', data=[['Item 1', 10, 1024], ['Item 2', 1000, 1048576]])
correct_items = [['Item 2', '+1000', '+1024.00 KB'], ['Item 1', '+10', '+1024.00 B']]
pt = frontend_utils.format_summary_output(items)
self.assertEqual(pt._rows, correct_items)
def test_format_output_with_references(self):
items = analysis_utils.RetrievedObjects(pid=1234, title='Analysis of pid 1234', data=[['Item 1', 10, 1034, 'filename.png', 'filename2.png'], ['Item 2', 1000, 10042]])
updated_items = [['Item 2', 1000, frontend_utils.readable_size(10042), '', ''], ['Item 1', 10, frontend_utils.readable_size(1034), 'filename.png', 'filename2.png']]
pt = frontend_utils.format_summary_output(items)
self.assertEqual(pt._rows, items.data)
self.assertEqual(items.data, updated_items) |
class OptionSeriesTreemapSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
('/get-complex-object', methods=['GET'])
def get_complex_object():
print(bool(request.args.get('returnObject')))
if bool(request.args.get('returnObject')):
return_object = {'complexObj': [{'id': '0001', 'type': 'donut', 'name': 'Cake', 'ppu': 0.55, 'batters': {'batter': [{'id': '1001', 'type': 'Regular'}, {'id': '1002', 'type': 'Chocolate'}, {'id': '1003', 'type': 'Blueberry'}, {'id': '1004', 'type': "Devil's Food"}]}, 'topping': [{'id': '5001', 'type': 'None'}, {'id': '5002', 'type': 'Glazed'}, {'id': '5005', 'type': 'Sugar'}, {'id': '5007', 'type': 'Powdered Sugar'}, {'id': '5006', 'type': 'Chocolate with Sprinkles'}, {'id': '5003', 'type': 'Chocolate'}, {'id': '5004', 'type': 'Maple'}]}, {'id': '0002', 'type': 'donut', 'name': 'Raised', 'ppu': 0.55, 'batters': {'batter': [{'id': '1001', 'type': 'Regular'}]}, 'topping': [{'id': '5001', 'type': 'None'}, {'id': '5002', 'type': 'Glazed'}, {'id': '5005', 'type': 'Sugar'}, {'id': '5003', 'type': 'Chocolate'}, {'id': '5004', 'type': 'Maple'}]}, {'id': '0003', 'type': 'donut', 'name': 'Old Fashioned', 'ppu': 0.55, 'batters': {'batter': [{'id': '1001', 'type': 'Regular'}, {'id': '1002', 'type': 'Chocolate'}]}, 'topping': [{'id': '5001', 'type': 'None'}, {'id': '5002', 'type': 'Glazed'}, {'id': '5003', 'type': 'Chocolate'}, {'id': '5004', 'type': 'Maple'}]}]}
return jsonify(return_object)
return jsonify({'erro': 'erro'}) |
def test_conversion_of_ai_standard_to_red_shift_material_diffuse_properties(create_pymel, setup_scene):
pm = create_pymel
(ai_standard, ai_standard_sg) = pm.createSurfaceShader('aiStandard')
diffuse_color = (1, 0.5, 0)
diffuse_weight = 0.532
diffuse_roughness = 0.8
transl_weight = 0.25
diffuse_direct = 0.95
diffuse_indirect = 0.89
ai_standard.color.set(diffuse_color)
ai_standard.Kd.set(diffuse_weight)
ai_standard.diffuseRoughness.set(diffuse_roughness)
ai_standard.Kb.set(transl_weight)
ai_standard.directDiffuse.set(diffuse_direct)
ai_standard.indirectDiffuse.set(diffuse_indirect)
conversion_man = ai2rs.ConversionManager()
rs_material = conversion_man.convert(ai_standard)
assert (rs_material.diffuse_color.get() == pytest.approx(diffuse_color, abs=0.001))
assert (rs_material.diffuse_weight.get() == pytest.approx(diffuse_weight, abs=0.001))
assert (rs_material.diffuse_roughness.get() == pytest.approx(diffuse_roughness, abs=0.001))
assert (rs_material.transl_weight.get() == pytest.approx(transl_weight, abs=0.001))
assert (rs_material.diffuse_direct.get() == pytest.approx(diffuse_direct, abs=0.001))
assert (rs_material.diffuse_indirect.get() == pytest.approx(diffuse_indirect, abs=0.001)) |
class LayoutMapBoxLayer(OptPlotly.Layout):
def sourcetype(self):
return self._attrs['sourcetype']
def sourcetype(self, val):
self._attrs['sourcetype'] = val
def source(self):
return self._attrs['source']
def source(self, val):
self._attrs['source'] = val
def below(self):
return self._attrs['below']
def below(self, val):
self._attrs['below'] = val |
class TestUCSSerialize(util.ColorAssertsPyTest):
COLORS = [('color(--ucs 0.75 0.1 0.1 / 0.5)', {}, 'color(--ucs 0.75 0.1 0.1 / 0.5)'), ('color(--ucs 0.75 0.1 0.1)', {'alpha': True}, 'color(--ucs 0.75 0.1 0.1 / 1)'), ('color(--ucs 0.75 0.1 0.1 / 0.5)', {'alpha': False}, 'color(--ucs 0.75 0.1 0.1)'), ('color(--ucs none 0.1 0.1)', {}, 'color(--ucs 0 0.1 0.1)'), ('color(--ucs none 0.1 0.1)', {'none': True}, 'color(--ucs none 0.1 0.1)'), ('color(--ucs 0.75 1.2 0.1)', {}, 'color(--ucs 0.75 1.2 0.1)'), ('color(--ucs 0.75 1.2 0.1)', {'fit': False}, 'color(--ucs 0.75 1.2 0.1)')]
.parametrize('color1,options,color2', COLORS)
def test_colors(self, color1, options, color2):
self.assertEqual(Color(color1).to_string(**options), color2) |
class VarCompositeTest(unittest.TestCase):
def test_var_composite(self):
input_path = os.path.join(data_dir, 'varc-ac00-ac01.ttf')
ttf = ttLib.TTFont(input_path)
ttf.flavor = 'woff2'
out = BytesIO()
ttf.save(out)
ttf = ttLib.TTFont(out)
ttf.flavor = None
out = BytesIO()
ttf.save(out) |
class DataMonitoring():
def __init__(self, config: Config, tracking: Optional[Tracking]=None, force_update_dbt_package: bool=False, disable_samples: bool=False, selector_filter: FiltersSchema=FiltersSchema()):
self.execution_properties: Dict[(str, Any)] = {}
self.config = config
self.tracking = tracking
self.internal_dbt_runner = self._init_internal_dbt_runner()
self._download_dbt_package_if_needed(force_update_dbt_package)
latest_invocation = self.get_latest_invocation()
self.project_name = latest_invocation.get('project_name')
dbt_pkg_version = latest_invocation.get('elementary_version')
self.warehouse_info = self._get_warehouse_info(hash_id=isinstance(tracking, AnonymousTracking))
if tracking:
if self.warehouse_info:
tracking.register_group('warehouse', self.warehouse_info.id, self.warehouse_info.dict())
tracking.set_env('target_name', latest_invocation.get('target_name'))
tracking.set_env('dbt_orchestrator', latest_invocation.get('orchestrator'))
tracking.set_env('dbt_version', latest_invocation.get('dbt_version'))
tracking.set_env('dbt_pkg_version', dbt_pkg_version)
if dbt_pkg_version:
self._check_dbt_package_compatibility(dbt_pkg_version)
self.elementary_database_and_schema = self.get_elementary_database_and_schema()
self.success = True
self.disable_samples = disable_samples
self.selector_filter = selector_filter
def _init_internal_dbt_runner(self):
internal_dbt_runner = DbtRunner(dbt_project_utils.PATH, self.config.profiles_dir, self.config.profile_target, env_vars=self.config.env_vars)
return internal_dbt_runner
def _download_dbt_package_if_needed(self, force_update_dbt_packages: bool):
internal_dbt_package_up_to_date = dbt_project_utils.is_dbt_package_up_to_date()
self.execution_properties['dbt_package_up_to_date'] = internal_dbt_package_up_to_date
self.execution_properties['force_update_dbt_packages'] = force_update_dbt_packages
if ((not internal_dbt_package_up_to_date) or force_update_dbt_packages):
logger.info('Downloading edr internal dbt package')
package_downloaded = self.internal_dbt_runner.deps()
self.execution_properties['package_downloaded'] = package_downloaded
if (not package_downloaded):
logger.error('Could not download internal dbt package')
self.success = False
return
def properties(self):
data_monitoring_properties = {'data_monitoring_properties': self.execution_properties}
return data_monitoring_properties
def get_elementary_database_and_schema(self):
try:
relation = self.internal_dbt_runner.run_operation('elementary_cli.get_elementary_database_and_schema', quiet=True)[0]
logger.info(f"Elementary's database and schema: '{relation}'")
return relation
except Exception as ex:
logger.error("Failed to parse Elementary's database and schema.")
if self.tracking:
self.tracking.record_internal_exception(ex)
return '<elementary_database>.<elementary_schema>'
def get_latest_invocation(self) -> Dict[(str, Any)]:
try:
latest_invocation = self.internal_dbt_runner.run_operation('elementary_cli.get_latest_invocation', quiet=True)[0]
return (json.loads(latest_invocation)[0] if latest_invocation else {})
except Exception as err:
logger.error(f'Unable to get the latest invocation: {err}')
if self.tracking:
self.tracking.record_internal_exception(err)
return {}
def _check_dbt_package_compatibility(dbt_pkg_ver_str: str) -> None:
py_pkg_ver_str = package.get_package_version()
if (py_pkg_ver_str is None):
logger.warning('Could not get package version!')
return
dbt_pkg_ver = cast(version.Version, version.parse(dbt_pkg_ver_str))
py_pkg_ver = cast(version.Version, version.parse(py_pkg_ver_str))
if ((dbt_pkg_ver.major > py_pkg_ver.major) or ((dbt_pkg_ver.major == py_pkg_ver.major) and (dbt_pkg_ver.minor > py_pkg_ver.minor))):
logger.warning(f'''You are using incompatible versions between edr ({py_pkg_ver}) and Elementary's dbt package ({dbt_pkg_ver}).
To fix please run:
pip install --upgrade elementary-data
''')
return
if ((dbt_pkg_ver.major < py_pkg_ver.major) or ((dbt_pkg_ver.major == py_pkg_ver.major) and (dbt_pkg_ver.minor < py_pkg_ver.minor))):
logger.warning(f'''You are using incompatible versions between edr ({py_pkg_ver}) and Elementary's dbt package ({dbt_pkg_ver}).
To fix please update your packages.yml, and run:
dbt deps && dbt run --select elementary
''')
return
logger.info(f"edr ({py_pkg_ver}) and Elementary's dbt package ({dbt_pkg_ver}) are compatible.")
def _get_warehouse_info(self, hash_id: bool=False) -> Optional[WarehouseInfo]:
try:
(warehouse_type, warehouse_unique_id) = json.loads(self.internal_dbt_runner.run_operation('elementary_cli.get_adapter_type_and_unique_id', quiet=True)[0])
return WarehouseInfo(id=(warehouse_unique_id if (not hash_id) else hash(warehouse_unique_id)), type=warehouse_type)
except Exception:
logger.debug('Could not get warehouse info.', exc_info=True)
return None |
class _Wizard(QtGui.QWizard):
def __init__(self, parent, pyface_wizard):
QtGui.QWizard.__init__(self, parent)
self._pyface_wizard = pyface_wizard
self._controller = pyface_wizard.controller
self._ids = {}
self.currentIdChanged.connect(self._update_controller)
def addWizardPage(self, page):
qpage = page.create_page(self)
qpage.pyface_wizard = self._pyface_wizard
id = self.addPage(qpage)
self._ids[id] = page
def setStartWizardPage(self):
page = self._controller.get_first_page()
id = self._page_to_id(page)
if (id >= 0):
self.setStartId(id)
def nextId(self):
if (self.currentId() < 0):
return self._page_to_id(self._controller.get_first_page())
current = self._ids[self.currentId()]
next = self._controller.get_next_page(current)
return self._page_to_id(next)
def _update_controller(self, id):
self._controller.current_page = self._ids.get(id)
def _page_to_id(self, page):
if (page is None):
id = (- 1)
else:
for (id, p) in self._ids.items():
if (p is page):
break
else:
id = (- 1)
return id |
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = (attributes or [])
def __repr__(self):
if (self.attributes is None):
return ('Sum(%s)' % self.types)
else:
return ('Sum(%s, %s)' % (self.types, self.attributes)) |
class NameTableTest(unittest.TestCase):
def test_getDebugName(self):
table = table__n_a_m_e()
table.names = [makeName('Bold', 258, 1, 0, 0), makeName('Gras', 258, 1, 0, 1), makeName('Fett', 258, 1, 0, 2), makeName('Sem Fraccoes', 292, 1, 0, 8)]
self.assertEqual('Bold', table.getDebugName(258))
self.assertEqual('Sem Fraccoes', table.getDebugName(292))
self.assertEqual(None, table.getDebugName(999))
def test_setName(self):
table = table__n_a_m_e()
table.setName('Regular', 2, 1, 0, 0)
table.setName('Version 1.000', 5, 3, 1, 1033)
table.setName('', 276, 1, 2, 19)
self.assertEqual('Regular', table.getName(2, 1, 0, 0).toUnicode())
self.assertEqual('Version 1.000', table.getName(5, 3, 1, 1033).toUnicode())
self.assertEqual('', table.getName(276, 1, 2, 19).toUnicode())
self.assertTrue((len(table.names) == 3))
table.setName('', 276, 1, 2, 19)
self.assertEqual('', table.getName(276, 1, 2, 19).toUnicode())
self.assertTrue((len(table.names) == 3))
with CapturingLogHandler(log, 'WARNING') as captor:
table.setName(b'abc', 0, 1, 0, 0)
self.assertTrue((len([r for r in captor.records if ('string is bytes' in r.msg)]) == 1))
with self.assertRaises(TypeError):
table.setName(1.0, 5, 1, 0, 0)
def test_names_sort_bytes_str(self):
table = table__n_a_m_e()
table.names = [makeName('Test', 25, 3, 1, 1033), makeName('Test'.encode('utf-16be'), 25, 3, 1, 1033)]
table.compile(None)
def test_names_sort_attributes(self):
table = table__n_a_m_e()
broken = makeName('Test', 25, 3, 1, 1033)
delattr(broken, 'platformID')
table.names = [makeName('Test', 25, 3, 1, 1033), broken]
with self.assertRaises(TypeError):
table.names.sort()
def test_names_sort_encoding(self):
table = table__n_a_m_e()
table.names = [makeName('Mac Unicode encodes ok', 25, 3, 0, 1033), makeName('Win Latin fails to encode', 25, 1, 0, 0)]
table.names.sort()
self.assertEqual(table.names[0].platformID, 1)
self.assertEqual(table.names[1].platformID, 3)
def test_addName(self):
table = table__n_a_m_e()
nameIDs = []
for string in ('Width', 'Weight', 'Custom'):
nameIDs.append(table.addName(string))
self.assertEqual(nameIDs[0], 256)
self.assertEqual(nameIDs[1], 257)
self.assertEqual(nameIDs[2], 258)
self.assertEqual(len(table.names), 6)
self.assertEqual(table.names[0].string, 'Width')
self.assertEqual(table.names[1].string, 'Width')
self.assertEqual(table.names[2].string, 'Weight')
self.assertEqual(table.names[3].string, 'Weight')
self.assertEqual(table.names[4].string, 'Custom')
self.assertEqual(table.names[5].string, 'Custom')
with self.assertRaises(ValueError):
table.addName('Invalid nameID', minNameID=32767)
with self.assertRaises(TypeError):
table.addName(b'abc')
def test_removeNames(self):
table = table__n_a_m_e()
table.setName('Regular', 2, 1, 0, 0)
table.setName('Regular', 2, 3, 1, 1033)
table.removeNames(nameID=2)
self.assertEqual(table.names, [])
table = table__n_a_m_e()
table.setName('FamilyName', 1, 1, 0, 0)
table.setName('Regular', 2, 1, 0, 0)
table.setName('FamilyName', 1, 3, 1, 1033)
table.setName('Regular', 2, 3, 1, 1033)
table.removeNames(platformID=1)
self.assertEqual(len(table.names), 2)
self.assertIsNone(table.getName(1, 1, 0, 0))
self.assertIsNone(table.getName(2, 1, 0, 0))
rec1 = table.getName(1, 3, 1, 1033)
self.assertEqual(str(rec1), 'FamilyName')
rec2 = table.getName(2, 3, 1, 1033)
self.assertEqual(str(rec2), 'Regular')
table = table__n_a_m_e()
table.setName('FamilyName', 1, 1, 0, 0)
table.setName('Regular', 2, 1, 0, 0)
table.removeNames(nameID=1)
self.assertEqual(len(table.names), 1)
self.assertIsNone(table.getName(1, 1, 0, 0))
rec = table.getName(2, 1, 0, 0)
self.assertEqual(str(rec), 'Regular')
table = table__n_a_m_e()
table.setName('FamilyName', 1, 1, 0, 0)
table.setName('Regular', 2, 1, 0, 0)
table.removeNames(2, 1, 0, 0)
self.assertEqual(len(table.names), 1)
self.assertIsNone(table.getName(2, 1, 0, 0))
rec = table.getName(1, 1, 0, 0)
self.assertEqual(str(rec), 'FamilyName')
table = table__n_a_m_e()
table.setName('FamilyName', 1, 1, 0, 0)
table.setName('Regular', 2, 1, 0, 0)
table.removeNames()
self.assertEqual(len(table.names), 2)
rec1 = table.getName(1, 1, 0, 0)
self.assertEqual(str(rec1), 'FamilyName')
rec2 = table.getName(2, 1, 0, 0)
self.assertEqual(str(rec2), 'Regular')
def _get_test_names():
names = {'en': 'Width', 'de-CH': 'Breite', 'gsw-LI': 'Braiti'}
namesSubSet = names.copy()
del namesSubSet['gsw-LI']
namesSuperSet = names.copy()
namesSuperSet['nl'] = 'Breedte'
return (names, namesSubSet, namesSuperSet)
def test_findMultilingualName(self):
table = table__n_a_m_e()
(names, namesSubSet, namesSuperSet) = self._get_test_names()
nameID = table.addMultilingualName(names)
assert (nameID is not None)
self.assertEqual(nameID, table.findMultilingualName(names))
self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
self.assertEqual(None, table.findMultilingualName(namesSuperSet))
def test_findMultilingualName_compiled(self):
table = table__n_a_m_e()
(names, namesSubSet, namesSuperSet) = self._get_test_names()
nameID = table.addMultilingualName(names)
assert (nameID is not None)
data = table.compile(None)
table = table__n_a_m_e()
table.decompile(data, None)
self.assertEqual(nameID, table.findMultilingualName(names))
self.assertEqual(nameID, table.findMultilingualName(namesSubSet))
self.assertEqual(None, table.findMultilingualName(namesSuperSet))
def test_addMultilingualNameReuse(self):
table = table__n_a_m_e()
(names, namesSubSet, namesSuperSet) = self._get_test_names()
nameID = table.addMultilingualName(names)
assert (nameID is not None)
self.assertEqual(nameID, table.addMultilingualName(names))
self.assertEqual(nameID, table.addMultilingualName(namesSubSet))
self.assertNotEqual(None, table.addMultilingualName(namesSuperSet))
def test_findMultilingualNameNoMac(self):
table = table__n_a_m_e()
(names, namesSubSet, namesSuperSet) = self._get_test_names()
nameID = table.addMultilingualName(names, mac=False)
assert (nameID is not None)
self.assertEqual(nameID, table.findMultilingualName(names, mac=False))
self.assertEqual(None, table.findMultilingualName(names))
self.assertEqual(nameID, table.findMultilingualName(namesSubSet, mac=False))
self.assertEqual(None, table.findMultilingualName(namesSubSet))
self.assertEqual(None, table.findMultilingualName(namesSuperSet))
def test_addMultilingualName(self):
font = FakeFont(glyphs=['.notdef', 'A'])
nameTable = font.tables['name'] = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
widthID = nameTable.addMultilingualName({'en': 'Width', 'de-CH': 'Breite', 'gsw-LI': 'Braiti'}, ttFont=font, mac=False)
self.assertEqual(widthID, 256)
xHeightID = nameTable.addMultilingualName({'en': 'X-Height', 'gsw-LI': 'X-Hoochi'}, ttFont=font, mac=False)
self.assertEqual(xHeightID, 257)
captor.assertRegex('cannot add Windows name in language gsw-LI')
self.assertEqual(names(nameTable), [(256, 0, 4, 0, 'Braiti'), (256, 3, 1, 1033, 'Width'), (256, 3, 1, 2055, 'Breite'), (257, 0, 4, 0, 'X-Hoochi'), (257, 3, 1, 1033, 'X-Height')])
self.assertEqual(set(font.tables.keys()), {'ltag', 'name'})
self.assertEqual(font['ltag'].tags, ['gsw-LI'])
def test_addMultilingualName_legacyMacEncoding(self):
font = FakeFont(glyphs=['.notdef', 'A'])
nameTable = font.tables['name'] = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
nameTable.addMultilingualName({'la': 'SPQR'}, ttFont=font)
captor.assertRegex('cannot add Windows name in language la')
self.assertEqual(names(nameTable), [(256, 1, 0, 131, 'SPQR')])
self.assertNotIn('ltag', font.tables.keys())
def test_addMultilingualName_legacyMacEncodingButUnencodableName(self):
font = FakeFont(glyphs=['.notdef', 'A'])
nameTable = font.tables['name'] = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
nameTable.addMultilingualName({'la': 'Q'}, ttFont=font)
captor.assertRegex('cannot add Windows name in language la')
self.assertEqual(names(nameTable), [(256, 0, 4, 0, 'Q')])
self.assertIn('ltag', font.tables)
self.assertEqual(font['ltag'].tags, ['la'])
def test_addMultilingualName_legacyMacEncodingButNoCodec(self):
font = FakeFont(glyphs=['.notdef', 'A'])
nameTable = font.tables['name'] = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
nameTable.addMultilingualName({'az-Arab': ' '}, ttFont=font)
captor.assertRegex('cannot add Windows name in language az-Arab')
self.assertEqual(names(nameTable), [(256, 0, 4, 0, ' ')])
self.assertIn('ltag', font.tables)
self.assertEqual(font['ltag'].tags, ['az-Arab'])
def test_addMultilingualName_noTTFont(self):
nameTable = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
nameTable.addMultilingualName({'en': 'A', 'la': 'Q'})
captor.assertRegex("cannot store language la into 'ltag' table")
def test_addMultilingualName_TTFont(self):
font = FakeFont(glyphs=['.notdef', 'A'])
nameTable = newTable('name')
with CapturingLogHandler(log, 'WARNING') as captor:
nameTable.addMultilingualName({'en': 'A', 'ar': ''}, ttFont=font)
self.assertFalse(captor.records)
def test_addMultilingualName_minNameID(self):
table = table__n_a_m_e()
(names, namesSubSet, namesSuperSet) = self._get_test_names()
nameID = table.addMultilingualName(names, nameID=2)
self.assertEqual(nameID, 2)
nameID = table.addMultilingualName(names)
self.assertEqual(nameID, 2)
nameID = table.addMultilingualName(names, minNameID=256)
self.assertGreaterEqual(nameID, 256)
self.assertEqual(nameID, table.findMultilingualName(names, minNameID=256))
def test_addMultilingualName_name_inconsistencies(self):
table = table__n_a_m_e()
table.setName('Weight', 270, 3, 1, 1033)
names = {'en': 'Weight'}
nameID = table.addMultilingualName(names, minNameID=256)
self.assertEqual(271, nameID)
def test_decompile_badOffset(self):
table = table__n_a_m_e()
badRecord = {'platformID': 1, 'platEncID': 3, 'langID': 7, 'nameID': 1, 'length': 3, 'offset': 8765}
data = bytesjoin([struct.pack(tostr('>HHH'), 1, 1, (6 + nameRecordSize)), sstruct.pack(nameRecordFormat, badRecord)])
table.decompile(data, ttFont=None)
self.assertEqual(table.names, []) |
class OptionPlotoptionsArearangeStatesHoverMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
def test_group_summaries_by_folder():
summaries = [('folder1/module1.py', 'Summary 1'), ('folder2/module2.py', 'Summary 2')]
grouped = group_summaries_by_folder(summaries)
assert (grouped == {'folder1': [('folder1/module1.py', 'Summary 1')], 'folder2': [('folder2/module2.py', 'Summary 2')]}) |
def test_sort():
o = _common.Sort(key='abc', direction=_common.Sort.Direction.ASCENDING)
assert (o.key == 'abc')
assert (o.direction == _common.Sort.Direction.ASCENDING)
o2 = _common.Sort.from_flyte_idl(o.to_flyte_idl())
assert (o2 == o)
assert (o2.key == 'abc')
assert (o2.direction == _common.Sort.Direction.ASCENDING) |
class ComponentManager(Service):
logger = logging.getLogger('trinity.extensibility.component_manager.ComponentManager')
_endpoint: EndpointAPI
reason = None
def __init__(self, boot_info: BootInfo, component_types: Sequence[Type[BaseIsolatedComponent]]) -> None:
self._boot_info = boot_info
self._component_types = component_types
self._endpoint_available = asyncio.Event()
self._trigger_component_exit = asyncio.Event()
async def get_event_bus(self) -> EndpointAPI:
(await self._endpoint_available.wait())
return self._endpoint
async def run(self) -> None:
connection_config = ConnectionConfig.from_name(MAIN_EVENTBUS_ENDPOINT, self._boot_info.trinity_config.ipc_dir)
async with AsyncioEndpoint.serve(connection_config) as endpoint:
self._endpoint = endpoint
self.manager.run_daemon_task(self._track_and_propagate_available_endpoints)
(await endpoint.wait_until_any_endpoint_subscribed_to(EventBusConnected))
self._endpoint_available.set()
all_components = tuple((component_cls(self._boot_info) for component_cls in self._component_types))
enabled_components = tuple((component for component in all_components if component.is_enabled))
from p2p.asyncio_utils import create_task, wait_first
try:
self.logger.info('Starting components: %s', '/'.join((component.name for component in enabled_components)))
tasks: List[asyncio.Task[Any]] = []
for component in enabled_components:
tasks.append(create_task(component.run_in_process(), f'IsolatedComponent/{component.name}/run_in_process'))
tasks.append(asyncio.create_task(self._trigger_component_exit.wait()))
self.logger.info('Components started')
try:
(await wait_first(tasks, max_wait_after_cancellation=10))
finally:
self.logger.info('Stopping components')
finally:
self.logger.info('Components stopped.')
self.manager.cancel()
def shutdown(self, reason: str) -> None:
self.logger.info('Shutting down, reason: %s', reason)
self.reason = reason
self._trigger_component_exit.set()
_available_endpoints: Tuple[(ConnectionConfig, ...)] = ()
async def _track_and_propagate_available_endpoints(self) -> None:
async for ev in self._endpoint.stream(EventBusConnected):
self._available_endpoints = (self._available_endpoints + (ev.connection_config,))
self.logger.debug('New EventBus Endpoint connected %s', ev.connection_config.name)
(await self._endpoint.broadcast(AvailableEndpointsUpdated(self._available_endpoints)))
self.logger.debug('Connected EventBus Endpoints %s', self._available_endpoints) |
def find_or_build_fs_dir(settings, participant_label):
fs_sub_dir = os.path.join(settings.fs_dir, 'sub-{}'.format(participant_label))
if os.path.isfile(os.path.join(fs_sub_dir, 'mri', 'wmparc.mgz')):
logger.info('Found freesurfer outputs for sub-{}'.format(participant_label))
return
else:
cmd = ['fmriprep', settings.bids_dir, os.path.dirname(settings.func_derivs_dir), 'participant', '--participant_label', participant_label, '--anat-only', '--output-space T1w template', '--nthreads', str(settings.n_cpus), '--omp-nthreads', str(settings.n_cpus)]
if settings.fmriprep_work:
cmd.extend(['--work-dir', settings.fmriprep_work])
if settings.fs_license:
cmd.extend(['--fs-license-file', settings.fs_license])
if settings.fmriprep_vargs:
cmd.append(settings.fmriprep_vargs)
run(cmd, dryrun=DRYRUN) |
def extractAustincrustranslationBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if (item['tags'] == []):
titlemap = [('Isekai de Slow Life wo (Ganbou) Chapter ', 'Isekai de Slow Life wo (Ganbou)', 'translated'), ('Isekai De Slow Live wo (Ganbou) Chapter ', 'Isekai de Slow Life wo (Ganbou)', 'translated'), ('Isekai de Slow Life wo (Ganbou} Chapter ', 'Isekai de Slow Life wo (Ganbou)', 'translated'), ('I will Leisure become healer in another world Chapter ', 'I will Leisurely Become Healer in Another World', 'translated'), ('I will Leisurely Become Healer in Another World Chapter ', 'I will Leisurely Become Healer in Another World', 'translated'), ('The Best Assassin, Incarnated into a Different Worlds Aristocrat Chapter ', 'The Best Assassin, Incarnated into a Different Worlds Aristocrat', 'translated'), ('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'), ('Master of Dungeon', 'Master of Dungeon', 'oel')]
for (titlecomponent, name, tl_type) in titlemap:
if (titlecomponent.lower() in item['title'].lower()):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def test_raises_if_email_doesnt_match_token_user(Fred):
verifier = verifiers.EmailMatchesUserToken(User)
token = Token(user_id=1, operation=TokenActions.RESET_PASSWORD)
with pytest.raises(ValidationError) as excinfo:
verifier(token, email='not really')
assert (excinfo.value.attribute == 'email')
assert (excinfo.value.reason == 'Wrong email') |
class OptionSeriesPieMarker(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def enabledThreshold(self):
return self._config_get(2)
def enabledThreshold(self, num: float):
self._config(num, js_type=False)
def fillColor(self):
return self._config_get(None)
def fillColor(self, text: str):
self._config(text, js_type=False)
def height(self):
return self._config_get(None)
def height(self, num: float):
self._config(num, js_type=False)
def lineColor(self):
return self._config_get('#ffffff')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(0)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(4)
def radius(self, num: float):
self._config(num, js_type=False)
def states(self) -> 'OptionSeriesPieMarkerStates':
return self._config_sub_data('states', OptionSeriesPieMarkerStates)
def symbol(self):
return self._config_get(None)
def symbol(self, text: str):
self._config(text, js_type=False)
def width(self):
return self._config_get(None)
def width(self, num: float):
self._config(num, js_type=False) |
class Rules(QObject):
__instance = None
updated = pyqtSignal(int)
LOG_TAG = '[Rules]: '
def instance():
if (Rules.__instance == None):
Rules.__instance = Rules()
return Rules.__instance
def __init__(self):
QObject.__init__(self)
self._db = Database.instance()
def add(self, time, node, name, description, enabled, precedence, nolog, action, duration, op_type, op_sensitive, op_operand, op_data, created):
if (duration in Config.RULES_DURATION_FILTER):
return
self._db.insert('rules', '(time, node, name, description, enabled, precedence, nolog, action, duration, operator_type, operator_sensitive, operator_operand, operator_data, created)', (time, node, name, description, enabled, precedence, nolog, action, duration, op_type, op_sensitive, op_operand, op_data, created), action_on_conflict='REPLACE')
def add_rules(self, addr, rules):
try:
for (_, r) in enumerate(rules):
rjson = json.loads(MessageToJson(r))
if ((r.operator.type == Config.RULE_TYPE_LIST) and (rjson.get('operator') != None) and (rjson.get('operator').get('list') != None)):
r.operator.data = json.dumps(rjson.get('operator').get('list'))
self.add(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), addr, r.name, r.description, str(r.enabled), str(r.precedence), str(r.nolog), r.action, r.duration, r.operator.type, str(r.operator.sensitive), r.operator.operand, r.operator.data, str(datetime.fromtimestamp(r.created).strftime('%Y-%m-%d %H:%M:%S')))
return True
except Exception as e:
print((self.LOG_TAG + ' exception adding node rules to db: '), e)
return False
def delete(self, name, addr, callback):
rule = ui_pb2.Rule(name=name)
rule.enabled = False
rule.action = ''
rule.duration = ''
rule.operator.type = ''
rule.operator.operand = ''
rule.operator.data = ''
if (not self._db.delete_rule(rule.name, addr)):
return None
return rule
def delete_by_field(self, field, values):
return self._db.delete_rules_by_field(field, values)
def get_by_name(self, node, name):
return self._db.get_rule(name, node)
def get_by_field(self, node, field, value):
return self._db.get_rule_by_field(node, field, value)
def exists(self, rule, node_addr):
return self._db.rule_exists(rule, node_addr)
def new_unique_name(self, rule_name, node_addr, prefix):
if (self._db.get_rule(rule_name, node_addr).next() == False):
return rule_name
for idx in range(0, 100):
new_rule_name = '{0}-{1}'.format(rule_name, idx)
if (self._db.get_rule(new_rule_name, node_addr).next() == False):
return new_rule_name
return rule_name
def update_time(self, time, name, addr):
self._db.update('rules', 'time=?', (time, name, addr), 'name=? AND node=?', action_on_conflict='OR REPLACE')
def rule_to_json(self, node, rule_name):
try:
records = self._db.get_rule(rule_name, node)
if ((records == None) or (records == (- 1))):
return None
if (not records.next()):
return None
rule = Rule.new_from_records(records)
tempRule = MessageToJson(rule)
jRule = json.loads(tempRule)
jRule['created'] = str(datetime.fromtimestamp(rule.created).strftime('%Y-%m-%d %H:%M:%S'))
return json.dumps(jRule, indent=' ')
except Exception as e:
print('rule_to_json() exception:', e)
return None
def export_rule(self, node, rule_name, outdir):
try:
records = self._db.get_rule(rule_name, node)
if (records.next() == False):
print('export_rule() get_error 2:', records)
return False
rule = Rule.new_from_records(records)
rulesdir = ((outdir + '/') + node)
try:
os.makedirs(rulesdir, 448)
except Exception as e:
print('exception creating dirs:', e)
rulename = rule.name
if ('.json' not in rulename):
rulename = (rulename + '.json')
with open(((rulesdir + '/') + rulename), 'w') as jsfile:
actual_json_text = MessageToJson(rule)
jsfile.write(actual_json_text)
return True
except Exception as e:
print(self.LOG_TAG, 'export_rules(', node, outdir, ') exception:', e)
return False
def export_rules(self, node, outdir):
records = self._db.get_rules(node)
if (records == None):
return False
try:
while (records.next() != False):
rule = Rule.new_from_records(records)
rulesdir = ((outdir + '/') + node)
try:
os.makedirs(rulesdir, 448)
except:
pass
rulename = rule.name
if ('.json' not in rulename):
rulename = (rulename + '.json')
with open(((rulesdir + '/') + rulename), 'w') as jsfile:
actual_json_text = MessageToJson(rule)
jsfile.write(actual_json_text)
except Exception as e:
print(self.LOG_TAG, 'export_rules(', node, outdir, ') exception:', e)
return False
return True
def import_rules(self, rulesdir):
try:
rules = []
for rulename in os.listdir(rulesdir):
with open(((rulesdir + '/') + rulename), 'r') as f:
jsrule = f.read()
pb_rule = Parse(text=jsrule, message=ui_pb2.Rule(), ignore_unknown_fields=True)
rules.append(pb_rule)
return rules
except Exception as e:
print(self.LOG_TAG, 'import_rules() exception:', e)
return None |
def lazy_import():
from fastly.model.apex_redirect_all_of import ApexRedirectAllOf
from fastly.model.service_id_and_version import ServiceIdAndVersion
from fastly.model.timestamps import Timestamps
globals()['ApexRedirectAllOf'] = ApexRedirectAllOf
globals()['ServiceIdAndVersion'] = ServiceIdAndVersion
globals()['Timestamps'] = Timestamps |
class ChatShared(JsonDeserializable):
def de_json(cls, json_string):
if (json_string is None):
return None
obj = cls.check_json(json_string)
return cls(**obj)
def __init__(self, request_id: int, chat_id: int) -> None:
self.request_id: int = request_id
self.chat_id: int = chat_id |
def graph(docs):
G = nx.Graph()
for doc in docs:
for pair in pairs(doc):
if ((pair[0][0], pair[1][0]) in G.edges()):
G.edges[(pair[0][0], pair[1][0])]['weight'] += 1
else:
G.add_edge(pair[0][0], pair[1][0], weight=1)
return G |
class SelfDestructBuiltIn(SolidityBuiltInFunction):
def setup_impl(self, call_info: FunctionCallInfo):
assert (len(call_info.arguments) == 1)
destroy = ir.SelfDestruct(call_info.ast_node, call_info.arguments[0])
halt = ir.Halt(call_info.ast_node, False)
self.cfg = CfgSimple.statements(destroy, halt).without_appendable(halt) |
def get_print_string_code(string):
code = '[-]'
code += '>[-]'
code += '<'
prev_value = 0
for i in range(len(string)):
current_value = ord(string[i])
code += get_set_cell_value_code(current_value, prev_value, zero_next_cell_if_necessary=False)
code += '.'
prev_value = current_value
return code |
class MyObject_labeled(event.Component):
('!a')
def r1(self, *events):
print(('r1 ' + ' '.join([ev.type for ev in events])))
('!a:b')
def r2(self, *events):
print(('r2 ' + ' '.join([ev.type for ev in events])))
('!a:a')
def r3(self, *events):
print(('r3 ' + ' '.join([ev.type for ev in events]))) |
def file_hash(*args, **kwargs):
from .hashes import file_hash as new_file_hash
message = '\n Importing file_hash from pooch.utils is DEPRECATED. Please import from the\n top-level namespace (`from pooch import file_hash`) instead, which is fully\n backwards compatible with pooch >= 0.1.\n '
warnings.warn(message, DeprecationWarning, stacklevel=2)
return new_file_hash(*args, **kwargs) |
class MarkdownTest(unittest.TestCase):
def test_parses_normal_text_as_a_paragraph(self):
self.assertEqual(parse('This will be a paragraph'), '<p>This will be a paragraph</p>')
def test_parsing_italics(self):
self.assertEqual(parse('_This will be italic_'), '<p><em>This will be italic</em></p>')
def test_parsing_bold_text(self):
self.assertEqual(parse('__This will be bold__'), '<p><strong>This will be bold</strong></p>')
def test_mixed_normal_italics_and_bold_text(self):
self.assertEqual(parse('This will _be_ __mixed__'), '<p>This will <em>be</em> <strong>mixed</strong></p>')
def test_with_h1_header_level(self):
self.assertEqual(parse('# This will be an h1'), '<h1>This will be an h1</h1>')
def test_with_h2_header_level(self):
self.assertEqual(parse('## This will be an h2'), '<h2>This will be an h2</h2>')
def test_with_h3_header_level(self):
self.assertEqual(parse('### This will be an h3'), '<h3>This will be an h3</h3>')
def test_with_h4_header_level(self):
self.assertEqual(parse('#### This will be an h4'), '<h4>This will be an h4</h4>')
def test_with_h5_header_level(self):
self.assertEqual(parse('##### This will be an h5'), '<h5>This will be an h5</h5>')
def test_with_h6_header_level(self):
self.assertEqual(parse('###### This will be an h6'), '<h6>This will be an h6</h6>')
def test_h7_header_level_is_a_paragraph(self):
self.assertEqual(parse('####### This will not be an h7'), '<p>####### This will not be an h7</p>')
def test_unordered_lists(self):
self.assertEqual(parse('* Item 1\n* Item 2'), '<ul><li>Item 1</li><li>Item 2</li></ul>')
def test_with_a_little_bit_of_everything(self):
self.assertEqual(parse('# Header!\n* __Bold Item__\n* _Italic Item_'), '<h1>Header!</h1><ul><li><strong>Bold Item</strong></li><li><em>Italic Item</em></li></ul>')
def test_with_markdown_symbols_in_the_header_text_that_should_not_be_interpreted(self):
self.assertEqual(parse('# This is a header with # and * in the text'), '<h1>This is a header with # and * in the text</h1>')
def test_with_markdown_symbols_in_the_list_item_text_that_should_not_be_interpreted(self):
self.assertEqual(parse('* Item 1 with a # in the text\n* Item 2 with * in the text'), '<ul><li>Item 1 with a # in the text</li><li>Item 2 with * in the text</li></ul>')
def test_with_markdown_symbols_in_the_paragraph_text_that_should_not_be_interpreted(self):
self.assertEqual(parse('This is a paragraph with # and * in the text'), '<p>This is a paragraph with # and * in the text</p>')
def test_unordered_lists_close_properly_with_preceding_and_following_lines(self):
self.assertEqual(parse('# Start a list\n* Item 1\n* Item 2\nEnd a list'), '<h1>Start a list</h1><ul><li>Item 1</li><li>Item 2</li></ul><p>End a list</p>') |
def gen_profiler(func_attrs, workdir, header_files, backend_spec):
op_type = func_attrs['op']
file_pairs = []
elem_input_type = backend_spec.dtype_to_backend_type(func_attrs['inputs'][0]._attrs['dtype'])
code = PROFILER_TEMPLATE.render(header_files=header_files, elem_input_type=elem_input_type, kernel=kernel.render(prefix=backend_spec.prefix, cub=backend_spec.cub, elem_input_type=elem_input_type), func_signature=FUNC_SIGNATURE.render(func_name=func_attrs['name'], prefix=backend_spec.prefix))
op_name = func_attrs['op']
add_profiler(file_pairs, workdir, op_type, op_name, code)
return file_pairs |
class OptionPlotoptionsLineOnpointPosition(Options):
def offsetX(self):
return self._config_get(None)
def offsetX(self, num: float):
self._config(num, js_type=False)
def offsetY(self):
return self._config_get(None)
def offsetY(self, num: float):
self._config(num, js_type=False)
def x(self):
return self._config_get(None)
def x(self, num: float):
self._config(num, js_type=False)
def y(self):
return self._config_get(None)
def y(self, num: float):
self._config(num, js_type=False) |
class NumberListStringArgument(ArgumentDefinition):
NOT_A_VALID_NUMBER_LIST_STRING = 'The input should be of the type: <b><pre>\n\t23,5.5,11,1.01,3\n</pre></b>i.e. numeric values separated by commas.'
VALUE_NOT_A_NUMBER = "The value: '%s' is not a number."
PATTERN = re.compile('^[0-9\\.\\-+, \\t]+$')
def __init__(self, **kwargs: bool) -> None:
super().__init__(**kwargs)
def validate(self, token: str) -> ValidationStatus:
validation_status = super().validate(token)
if (not validation_status):
return validation_status
else:
match = NumberListStringArgument.PATTERN.match(token)
if (match is None):
validation_status.setFailed()
validation_status.addToMessage(NumberListStringArgument.NOT_A_VALID_NUMBER_LIST_STRING)
else:
groups = token.split(',')
for group in groups:
group = group.strip()
if (len(group) > 0):
try:
float(group.strip())
except ValueError:
validation_status.setFailed()
validation_status.addToMessage((NumberListStringArgument.VALUE_NOT_A_NUMBER % group))
validation_status.setValue(token)
return validation_status |
.parametrize('version_info', [(2, 1, 0), (2, 7, 0), (2, 7, 15), (3, 6, 0), (3, 6, 7), (3, 6, 7, 'candidate', 1)])
def test__get_stdlib_packages_unsupported(version_info: tuple[((int | str), ...)]) -> None:
with mock.patch('sys.version_info', version_info), pytest.raises(UnsupportedPythonVersionError, match=re.escape(f'Python version {version_info[0]}.{version_info[1]} is not supported. Only versions >= 3.8 are supported.')):
Core._get_stdlib_modules() |
def operator_contains(item, field, value):
if (field not in item):
return False
try:
ipaddress.ip_network(item[field])
ipaddress.ip_network(value)
return operator_contains_network(item[field], value)
except ValueError:
pass
return ((field in item) and (value in item[field])) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.