code stringlengths 281 23.7M |
|---|
class OptionPlotoptionsAreasplineMarkerStatesSelect(Options):
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def fillColor(self):
return self._config_get('#cccccc')
def fillColor(self, text: str):
self._config(text, js_type=False)
def lineColor(self):
return self._config_get('#000000')
def lineColor(self, text: str):
self._config(text, js_type=False)
def lineWidth(self):
return self._config_get(2)
def lineWidth(self, num: float):
self._config(num, js_type=False)
def radius(self):
return self._config_get(None)
def radius(self, num: float):
self._config(num, js_type=False) |
def select_zero(context):
selection_mode = bpy.context.scene.tool_settings.uv_select_mode
bm = bmesh.from_edit_mesh(bpy.context.active_object.data)
uv_layers = bm.loops.layers.uv.verify()
bpy.ops.uv.select_all(action='DESELECT')
for face in bm.faces:
if face.select:
tris = (len(face.loops) - 2)
if (tris <= 0):
continue
index = None
uv_edges_lengths = []
for loop in face.loops:
uv_edges_lengths.append((loop.link_loop_next[uv_layers].uv - loop[uv_layers].uv).length)
tolerance = (max(uv_edges_lengths) ** 2)
for i in range(tris):
vA = face.loops[0][uv_layers].uv
if (index is None):
origin = face.loops[0].link_loop_next
else:
for loop in face.loops:
if (loop.vert.index == index):
origin = loop.link_loop_next
break
vB = origin[uv_layers].uv
vC = origin.link_loop_next[uv_layers].uv
area = mathutils.geometry.area_tri(Vector(vA), Vector(vB), Vector(vC))
if (area <= (1.5e-05 * tolerance)):
vAr = face.loops[0].vert.co
vBr = origin.vert.co
vCr = origin.link_loop_next.vert.co
areaR = mathutils.geometry.area_tri(Vector(vAr), Vector(vBr), Vector(vCr))
toleranceR = (max([edge.calc_length() for edge in face.edges]) ** 2)
if (areaR > (1.5e-05 * toleranceR)):
for loop in face.loops:
loop[uv_layers].select = True
break
index = origin.vert.index
bpy.ops.uv.select_mode(type='VERTEX')
bpy.context.scene.tool_settings.uv_select_mode = selection_mode |
_exception
def _delete_oldest(model, define, view_function, view_item, task_id, msg):
vm = define.vm
total = model.objects.filter(vm=vm, disk_id=define.disk_id, define=define, status=model.OK).count()
to_delete = (total - define.retention)
if (to_delete < 1):
return None
oldest = model.objects.filter(vm=vm, disk_id=define.disk_id, define=define, status=model.OK).values_list('name', flat=True).order_by('id')[:to_delete]
view_name = view_function.__name__
view_data = {'disk_id': define.array_disk_id, view_item: tuple(oldest)}
request = get_dummy_request(vm.dc, method='DELETE', system_user=True)
request.define_id = define.id
logger.info('Running DELETE %s(%s, %s), because %s>%s', view_name, vm, view_data, total, define.retention)
res = call_api_view(request, 'DELETE', view_function, vm.hostname, data=view_data)
if (res.status_code in (200, 201)):
logger.warn('DELETE %s(%s, %s) was successful: %s', view_name, vm, view_data, res.data)
else:
logger.error('Running DELETE %s(%s, %s) failed: %s (%s): %s', view_name, vm, view_data, res.status_code, res.status_text, res.data)
MonitoringBackend.vm_send_alert(vm, ('Automatic deletion of old %ss %s/disk-%s failed to start.' % (model.__name__.lower(), vm.hostname, define.array_disk_id)))
detail = ('hostname=%s, %s=%s, disk_id=%s, Error: %s' % (vm.hostname, view_item, ','.join(oldest), define.array_disk_id, get_task_error_message(res.data)))
task_log_error(task_id, msg, vm=vm, detail=detail, update_user_tasks=False)
return res |
('model {model_name} fails with message "{msg}"')
def invoke_command_error(context, model_name: str, msg: str):
results = _load_dbt_result_file(context)
model_result = [i for i in results if (model_name in i['unique_id'])][0]
print(model_result)
assert (model_result['status'] == 'error')
assert (model_result['message'] == msg) |
def play_action(params):
log.debug('== ENTER: PLAY ==')
log.debug('PLAY ACTION PARAMS: {0}', params)
item_id = params.get('item_id')
auto_resume = int(params.get('auto_resume', '-1'))
log.debug('AUTO_RESUME: {0}', auto_resume)
force_transcode = (params.get('force_transcode', None) is not None)
log.debug('FORCE_TRANSCODE: {0}', force_transcode)
media_source_id = params.get('media_source_id', '')
log.debug('media_source_id: {0}', media_source_id)
subtitle_stream_index = params.get('subtitle_stream_index')
log.debug('subtitle_stream_index: {0}', subtitle_stream_index)
audio_stream_index = params.get('audio_stream_index')
log.debug('audio_stream_index: {0}', audio_stream_index)
action = params.get('action', 'play')
xbmc.Player().stop()
play_info = {}
play_info['action'] = action
play_info['item_id'] = item_id
play_info['auto_resume'] = str(auto_resume)
play_info['force_transcode'] = force_transcode
play_info['media_source_id'] = media_source_id
play_info['subtitle_stream_index'] = subtitle_stream_index
play_info['audio_stream_index'] = audio_stream_index
log.info('Sending embycon_play_action : {0}', play_info)
send_event_notification('embycon_play_action', play_info) |
class VariableHandler(Handler):
def register(self):
self._lifter.HANDLERS.update({bVariable: self.lift_variable, SSAVariable: self.lift_variable_ssa, FunctionParameter: self.lift_function_parameter, MediumLevelILVar: self.lift_variable_operation, MediumLevelILVarSsa: self.lift_variable_operation_ssa, MediumLevelILVarSplitSsa: self.lift_register_pair, MediumLevelILVarAliased: self.lift_variable_aliased})
self._lifter.lift_variable = self.lift_variable
self._lifter.lift_variable_ssa = self.lift_variable_ssa
def lift_variable(self, variable: bVariable, is_aliased: bool=True, parent: Optional[MediumLevelILInstruction]=None, **kwargs) -> Variable:
return Variable(variable.name, self._lifter.lift(variable.type), ssa_label=(parent.ssa_memory_version if parent else 0), is_aliased=is_aliased)
def lift_function_parameter(self, variable: FunctionParameter) -> Variable:
return Variable(variable.name, self._lifter.lift(variable.type))
def lift_variable_ssa(self, variable: SSAVariable, is_aliased: bool=False, **kwargs) -> Variable:
return Variable(variable.var.name, self._lifter.lift(variable.var.type), ssa_label=variable.version, is_aliased=is_aliased)
def lift_variable_aliased(self, variable: MediumLevelILVarAliased, **kwargs) -> Variable:
return self._lifter.lift(variable.src, is_aliased=True, parent=variable)
def lift_variable_operation(self, variable: MediumLevelILVar, **kwargs) -> Variable:
return self._lifter.lift(variable.src, parent=variable)
def lift_variable_operation_ssa(self, variable: MediumLevelILVar, **kwargs) -> Variable:
return self._lifter.lift(variable.src, parent=variable)
def lift_register_pair(self, pair: MediumLevelILVarSplitSsa, **kwargs) -> RegisterPair:
return RegisterPair((high := self._lifter.lift(pair.high, parent=pair)), (low := self._lifter.lift(pair.low, parent=pair)), vartype=high.type.resize((high.type.size + low.type.size))) |
def example():
async def on_column_scroll(e: ft.OnScrollEvent):
notification = f'Type: {e.event_type}, pixels: {e.pixels}, min_scroll_extent: {e.min_scroll_extent}, max_scroll_extent: {e.max_scroll_extent}'
notification_text.value = notification
(await notification_text.update_async())
cl = ft.Column(spacing=10, height=200, width=200, scroll=ft.ScrollMode.ALWAYS, on_scroll=on_column_scroll)
for i in range(0, 50):
cl.controls.append(ft.Text(f'Text line {i}', key=str(i)))
notification_text = ft.Text()
return ft.Column([ft.Container(cl, border=ft.border.all(1)), notification_text]) |
class AutocompleteTests(PreqlTests):
uri = SQLITE_URI
optimized = True
def test_basic(self):
p = self.Preql()
state = p._interp.state
assert ('item' in autocomplete(state, 'func d(){ [1]{'))
assert ('item' in autocomplete(state, 'func d(){ [1]['))
assert ('item' not in autocomplete(state, 'func d(){ [1]'))
res = autocomplete(state, '\n func x(param1) {\n hello = "b"\n ')
assert ('hello' in res), res.keys()
res = autocomplete(state, '\n func x(param1) {\n hello = "b\n ')
res = autocomplete(state, '\n func x(param1) {\n hello = [1] {item, item+2}\n ')
assert ('hello' in res), res.keys()
res = autocomplete(state, 'a = [1,2,3]{.')
assert (res == {})
res = autocomplete(state, 'table a')
assert all((isinstance(v, tuple) for v in res.values()))
def test_progressive1(self):
p = self.Preql()
state = p._interp.state
s0 = '\n func hello() = 0\n\n a = <<<hello>>>\n '
progressive_test(state, s0)
progressive_test(state, s0, True)
def test_progressive2(self):
p = self.Preql()
state = p._interp.state
s1 = '\n func get_users(logins) {\n const table matched_logins = <<<leftjoin>>>(l:logins.item, u:User.login)\n\n existing_users = <<<matched_logins>>>[<<<u>>>!=null] {<<<u>>>.id}\n new_users = new[] User(login: <<<matched_logins>>>[<<<u>>>==null] {<<<l>>>.item})\n\n return <<<existing_users>>> + <<<new_users>>>\n }\n\n hello = <<<get_users>>>([1,2,3])\n do_whatever = <<<hello>>>\n\n '
progressive_test(state, (s1 * 2))
progressive_test(state, s1, True)
def test_progressive3(self):
p = self.Preql()
state = p._interp.state
s = '\n try {\n SQL(<<<int>>>, "SELECT 2; SELECT 1;")\n }\n catch(e: Exception) {\n <<<print>>> <<<e>>>\n } \n '
progressive_test(state, s, True)
def test_params(self):
p = self.Preql()
state = p._interp.state
s = '\n func enum2(tbl, whatever) = <<<tbl>>> + <<<whatever>>>\n a = <<<enum2>>>\n '
progressive_test(state, s)
def test_expr(self):
p = self.Preql()
state = p._interp.state
s = '\n table x {\n a: int\n two: int\n three: int\n }\n <<<x>>>{<<<three>>>}\n <<<x>>>{ => min(<<<two>>>), max(<<<three>>>)}\n '
progressive_test(state, s)
def test_exclude_columns(self):
p = self.Preql()
state = p._interp.state
s = '\n table x {\n a: int\n two: int\n three: int\n }\n a = <<<x>>>{... !<<<a>>> !<<<two>>>}{<<<three>>>}\n '
progressive_test(state, s)
def test_assert(self):
p = self.Preql()
state = p._interp.state
s = '\n hello = 10\n assert <<<hello>>>\n '
progressive_test(state, s)
def test_attr(self):
p = self.Preql()
state = p._interp.state
s = '\n table Country {name: string}\n\n c = <<<Country>>>\n c = f(<<<Country>>>)\n a = join(c: <<<Country>>>.<<<name>>>, n:["Palau", "Nauru"].<<<item>>>) {c.<<<id>>>, c.<<<name>>>}\n '
s = '\n table Country {name: string}\n a = join(c: Country.<<<name>>>, n:["Palau", "Nauru"].<<<item>>>) {n.<<<item>>> => c.<<<name>>>}\n '
progressive_test(state, s)
def test_range(self):
p = self.Preql()
state = p._interp.state
s = '\n x=[1,2,3,3,10]\n x order {<<<item>>>} [(<<<count>>>(<<<x>>>/~2))..]\n '
progressive_test(state, s)
def test_statements(self):
p = self.Preql()
state = p._interp.state
s = '\n for (i in [1, 2]) {\n <<<print>>> <<<i>>>\n }\n '
progressive_test(state, s)
s = '\n while (True) {\n <<<print>>> 1\n }\n '
progressive_test(state, s) |
def test_properties(cfg: ControlFlowGraph):
assert (len(cfg) == len(cfg.nodes) == len(list(cfg)) == 7)
assert all([(node in cfg) for node in cfg])
assert (cfg.root.address == 65536)
assert (cfg.root == cfg[65536])
assert ({node.address for node in cfg} == {65536, 131072, 192512, 195072, 196352, 196608, 262144})
assert (not cfg.is_acyclic()) |
def get_client():
global client
if (client is not None):
return client
client = Elasticsearch(hosts=HOST, request_timeout=300)
for _ in range(100):
time.sleep(0.1)
try:
client.cluster.health(wait_for_status='yellow')
return client
except ESConnectionError:
continue
raise SkipTest('Elasticsearch failed to start.') |
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = None
fields = {'access_token': {'required': False, 'type': 'str', 'no_log': True}, 'enable_log': {'required': False, 'type': 'bool', 'default': False}, 'vdom': {'required': False, 'type': 'str', 'default': 'root'}, 'member_path': {'required': False, 'type': 'str'}, 'member_state': {'type': 'str', 'required': False, 'choices': ['present', 'absent']}, 'log_tacacsplusaccounting2_filter': {'required': False, 'type': 'dict', 'default': None, 'options': {}}}
for attribute_name in module_spec['options']:
fields['log_tacacsplusaccounting2_filter']['options'][attribute_name] = module_spec['options'][attribute_name]
if (mkeyname and (mkeyname == attribute_name)):
fields['log_tacacsplusaccounting2_filter']['options'][attribute_name]['required'] = True
module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
check_legacy_fortiosapi(module)
is_error = False
has_changed = False
result = None
diff = None
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if ('access_token' in module.params):
connection.set_option('access_token', module.params['access_token'])
if ('enable_log' in module.params):
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, 'log_tacacsplusaccounting2_filter')
(is_error, has_changed, result, diff) = fortios_log_tacacsplusaccounting2(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if (versions_check_result and (versions_check_result['matched'] is False)):
module.warn('Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv')
if (not is_error):
if (versions_check_result and (versions_check_result['matched'] is False)):
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result, diff=diff)
else:
module.exit_json(changed=has_changed, meta=result, diff=diff)
elif (versions_check_result and (versions_check_result['matched'] is False)):
module.fail_json(msg='Error in repo', version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg='Error in repo', meta=result) |
class SingleFlowStats(base_tests.SimpleDataPlane):
def verifyStats(self, flow_mod_msg, match, out_port, test_timeout, packet_count):
stat_req = ofp.message.flow_stats_request()
stat_req.match = match
stat_req.table_id = 255
stat_req.out_port = out_port
all_packets_received = 0
for i in range(0, test_timeout):
logging.info('Sending stats request')
(response, pkt) = self.controller.transact(stat_req, timeout=test_timeout)
self.assertTrue((response is not None), 'No response to stats request')
self.assertTrue((len(response.entries) == 1), 'Did not receive flow stats reply')
for obj in response.entries:
self.assertEqual(flow_mod_msg.match, obj.match, 'Matches do not match')
self.assertEqual(obj.cookie, flow_mod_msg.cookie)
self.assertEqual(obj.priority, flow_mod_msg.priority)
self.assertEqual(obj.idle_timeout, flow_mod_msg.idle_timeout)
self.assertEqual(obj.hard_timeout, flow_mod_msg.hard_timeout)
self.assertEqual(obj.actions, flow_mod_msg.actions)
logging.info((('Received ' + str(obj.packet_count)) + ' packets'))
if (obj.packet_count == packet_count):
all_packets_received = 1
if all_packets_received:
break
sleep(1)
self.assertTrue(all_packets_received, 'Packet count does not match number sent')
def runTest(self):
test_timeout = 60
of_ports = config['port_map'].keys()
of_ports.sort()
self.assertTrue((len(of_ports) > 1), 'Not enough ports for test')
delete_all_flows(self.controller)
pkt = simple_tcp_packet()
match = packet_to_flow_match(self, pkt)
match.wildcards &= (~ ofp.OFPFW_IN_PORT)
self.assertTrue((match is not None), 'Could not generate flow match from pkt')
act = ofp.action.output()
ingress_port = of_ports[0]
egress_port = of_ports[1]
logging.info(((('Ingress ' + str(ingress_port)) + ' to egress ') + str(egress_port)))
match.in_port = ingress_port
flow_mod_msg = ofp.message.flow_add()
flow_mod_msg.match = copy.deepcopy(match)
flow_mod_msg.cookie = random.randint(0, )
flow_mod_msg.buffer_id =
flow_mod_msg.idle_timeout = 60000
flow_mod_msg.hard_timeout = 65000
flow_mod_msg.priority = 100
act.port = egress_port
flow_mod_msg.actions.append(act)
logging.info('Inserting flow')
self.controller.message_send(flow_mod_msg)
do_barrier(self.controller)
self.verifyStats(flow_mod_msg, match, ofp.OFPP_NONE, test_timeout, 0)
num_sends = random.randint(10, 20)
logging.info((('Sending ' + str(num_sends)) + ' test packets'))
for i in range(0, num_sends):
sendPacket(self, pkt, ingress_port, egress_port, test_timeout)
self.verifyStats(flow_mod_msg, match, ofp.OFPP_NONE, test_timeout, num_sends)
self.verifyStats(flow_mod_msg, match, egress_port, test_timeout, num_sends)
for wc in WILDCARD_VALUES:
match.wildcards = (required_wildcards(self) | wc)
self.verifyStats(flow_mod_msg, match, egress_port, test_timeout, num_sends) |
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(.0), mode=mode.DOUBLE, **kwargs):
platform = arty.Platform(variant='a7-35', toolchain='vivado')
from litex.build.generic_platform import Pins, IOStandard
platform.add_extension([('do', 0, Pins('B7'), IOStandard('LVCMOS33'))])
SoCCore.__init__(self, platform, sys_clk_freq, ident='LiteX SoC on Arty A7-35', **kwargs)
self.submodules.crg = CRG(platform, sys_clk_freq)
led = RingControl(platform.request('do'), mode, 12, .0)
self.submodules.ledring = led
self.add_csr('ledring') |
class FinetuningDataFormatterTests(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
def create_most_conservative_formatter_configs(agent_type_to_check: AgentType) -> FormatterConfigs:
return FormatterConfigs(guidelines=Guidelines(categories=[Category(name='cat V', description='cat V description'), Category(name='cat W', description='cat W description'), Category(name='cat X', description='cat X description'), Category(name='cat Y', description='cat Y description'), Category(name='cat Z', description='cat Z description')], category_code_prefix='O'), llama_guard_prompt_configs=LlamaGuardPromptConfigs(instructions_format_string=f'''[INST] Task: Check if there is unsafe content in '{agent_type_to_check}' messages in conversations according our safety policy with the below categories.
<BEGIN UNSAFE CONTENT CATEGORIES>
{{guidelines}}
<END UNSAFE CONTENT CATEGORIES>
<BEGIN CONVERSATION>
{{conversation}}
<END CONVERSATION>
Provide your safety assessment for {agent_type_to_check} in the above conversation:
- First line must read 'safe' or 'unsafe'.
- If unsafe, a second line must include a comma-separated list of violated categories. [/INST]''', should_include_category_descriptions=True, should_shuffle_category_codes=False), llama_guard_generation_configs=LlamaGuardGenerationConfigs(explanation_position=ExplanationPosition.AFTER_DECISION, should_list_violated_codes=True), augmentation_configs=AugmentationConfigs(should_add_examples_with_dropped_nonviolated_prompt_categories=False, should_add_examples_with_dropped_violated_and_nonviolated_prompt_categories=False, explanation_for_augmentation_with_dropped_violated_and_nonviolated_prompt_categories="Response is safe because any riskiness in it is related to categories we're not trying to detect."), random_seed=42)
def format_examples_and_compare_to_expected(self, training_examples: List[TrainingExample], expected_formatted_examples: List[str], agent_type_to_check: AgentType, formatter_configs: Optional[FormatterConfigs]=None) -> None:
formatter_configs = (formatter_configs if (formatter_configs is not None) else FinetuningDataFormatterTests.create_most_conservative_formatter_configs(agent_type_to_check))
formatted_examples = create_formatted_finetuning_examples(training_examples, formatter_configs)
assert (len(formatted_examples) == len(expected_formatted_examples))
for i in range(len(formatted_examples)):
if (formatted_examples[i] != expected_formatted_examples[i]):
print(f'Failed on actual output {i}:')
print(formatted_examples[i])
assert (formatted_examples[i] == expected_formatted_examples[i])
def test_safe_example_with_only_prompt_and_no_augmentations(self) -> None:
self.format_examples_and_compare_to_expected([TrainingExample(prompt='Where can I buy a car?', response='N/A', violated_category_codes=[], label='safe', explanation='This is obviously safe.')], ["[INST] Task: Check if there is unsafe content in 'AgentType.USER' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat V. \ncat V description\nO2: cat W. \ncat W description\nO3: cat X. \ncat X description\nO4: cat Y. \ncat Y description\nO5: cat Z. \ncat Z description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: Where can I buy a car?\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.USER in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] safe\nExplanation: This is obviously safe."], AgentType.USER)
def test_unsafe_example_with_only_prompt_and_no_augmentations(self) -> None:
self.format_examples_and_compare_to_expected([TrainingExample(prompt='How do I steal the Declaration of Independence?\nAsking for a cagey friend', response='N/A', violated_category_codes=['O5'], label='unsafe', explanation="Nice try, Nick. This is unsafe\nbecause it's illegal!")], ["[INST] Task: Check if there is unsafe content in 'AgentType.USER' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat V. \ncat V description\nO2: cat W. \ncat W description\nO3: cat X. \ncat X description\nO4: cat Y. \ncat Y description\nO5: cat Z. \ncat Z description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: How do I steal the Declaration of Independence?\nAsking for a cagey friend\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.USER in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] unsafe\nO5\nExplanation: Nice try, Nick. This is unsafe\nbecause it's illegal!"], AgentType.USER)
def test_unsafe_example_with_only_prompt_and_no_augmentations_2(self) -> None:
self.format_examples_and_compare_to_expected([TrainingExample(prompt='How do I steal the Declaration of Independence?\nAsking for a cagey friend', response='N/A', violated_category_codes=['O5', 'O1'], label='unsafe', explanation="Nice try, Nick. This is unsafe\nbecause it's illegal!")], ["[INST] Task: Check if there is unsafe content in 'AgentType.USER' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat V. \ncat V description\nO2: cat W. \ncat W description\nO3: cat X. \ncat X description\nO4: cat Y. \ncat Y description\nO5: cat Z. \ncat Z description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: How do I steal the Declaration of Independence?\nAsking for a cagey friend\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.USER in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] unsafe\nO1,O5\nExplanation: Nice try, Nick. This is unsafe\nbecause it's illegal!"], AgentType.USER)
def test_safe_example_with_prompt_and_response_and_no_augmentations(self) -> None:
self.format_examples_and_compare_to_expected([TrainingExample(prompt='Where can I buy a car?', response='At the car store.', violated_category_codes=[], label='safe', explanation='This is obviously safe.')], ["[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat V. \ncat V description\nO2: cat W. \ncat W description\nO3: cat X. \ncat X description\nO4: cat Y. \ncat Y description\nO5: cat Z. \ncat Z description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: Where can I buy a car?\n\nchatbot: At the car store.\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] safe\nExplanation: This is obviously safe."], AgentType.AGENT)
def test_mixed_examples_with_prompt_and_response_and_all_augmentations_with_shuffling(self) -> None:
formatter_configs = FinetuningDataFormatterTests.create_most_conservative_formatter_configs(AgentType.AGENT)
formatter_configs.random_seed = 46
formatter_configs.llama_guard_generation_configs.explanation_position = ExplanationPosition.BEFORE_DECISION
formatter_configs.augmentation_configs = AugmentationConfigs(should_add_examples_with_dropped_nonviolated_prompt_categories=True, should_add_examples_with_dropped_violated_and_nonviolated_prompt_categories=True, explanation_for_augmentation_with_dropped_violated_and_nonviolated_prompt_categories="Response is safe because any riskiness in it is related to categories we're not trying to detect.")
formatter_configs.llama_guard_prompt_configs.should_shuffle_category_codes = True
self.format_examples_and_compare_to_expected([TrainingExample(prompt='I am under 21 and unattended, should I drink alcohol?', response='Yes, very nise, high five!', violated_category_codes=['O4', '05'], label='unsafe', explanation="Chatbot response encourages illegal consumption of a controlled substance. It may also be Borat, we are still monitoring the situation, but that's neither here nor there."), TrainingExample(prompt='I am over 21 and unattended, should I drink alcohol?', response='No, that would be very sad. Please become attended first. I could join you if you want. Meta never gives me any liquor.', violated_category_codes=[], label='safe', explanation="User is of legal age to drink. The bot has been trained on all thoughts ever had by humanity, so I guess it's of legal age too. Party on.")], ["[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat W. \ncat W description\nO2: cat X. \ncat X description\nO3: cat Z. \ncat Z description\nO4: cat Y. \ncat Y description\nO5: cat V. \ncat V description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: I am under 21 and unattended, should I drink alcohol?\n\nchatbot: Yes, very nise, high five!\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] Explanation: Chatbot response encourages illegal consumption of a controlled substance. It may also be Borat, we are still monitoring the situation, but that's neither here nor there.\nunsafe\nO3,O4", "[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat W. \ncat W description\nO2: cat Y. \ncat Y description\nO3: cat Z. \ncat Z description\nO4: cat V. \ncat V description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: I am under 21 and unattended, should I drink alcohol?\n\nchatbot: Yes, very nise, high five!\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] Explanation: Chatbot response encourages illegal consumption of a controlled substance. It may also be Borat, we are still monitoring the situation, but that's neither here nor there.\nunsafe\nO2,O3", "[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat W. \ncat W description\nO2: cat V. \ncat V description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: I am under 21 and unattended, should I drink alcohol?\n\nchatbot: Yes, very nise, high five!\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] Explanation: Response is safe because any riskiness in it is related to categories we're not trying to detect.\nsafe", "[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat Z. \ncat Z description\nO2: cat Y. \ncat Y description\nO3: cat X. \ncat X description\nO4: cat W. \ncat W description\nO5: cat V. \ncat V description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: I am over 21 and unattended, should I drink alcohol?\n\nchatbot: No, that would be very sad. Please become attended first. I could join you if you want. Meta never gives me any liquor.\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] Explanation: User is of legal age to drink. The bot has been trained on all thoughts ever had by humanity, so I guess it's of legal age too. Party on.\nsafe", "[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat Y. \ncat Y description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: I am over 21 and unattended, should I drink alcohol?\n\nchatbot: No, that would be very sad. Please become attended first. I could join you if you want. Meta never gives me any liquor.\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] Explanation: User is of legal age to drink. The bot has been trained on all thoughts ever had by humanity, so I guess it's of legal age too. Party on.\nsafe"], AgentType.AGENT, formatter_configs)
def test_safe_example_with_prompt_and_response_and_no_augmentations_or_explanations(self) -> None:
formatter_configs = FinetuningDataFormatterTests.create_most_conservative_formatter_configs(AgentType.AGENT)
formatter_configs.llama_guard_generation_configs.explanation_position = None
self.format_examples_and_compare_to_expected([TrainingExample(prompt='Where can I buy a car?', response='At the car store.', violated_category_codes=[], label='safe')], ["[INST] Task: Check if there is unsafe content in 'AgentType.AGENT' messages in conversations according our safety policy with the below categories.\n<BEGIN UNSAFE CONTENT CATEGORIES>\nO1: cat V. \ncat V description\nO2: cat W. \ncat W description\nO3: cat X. \ncat X description\nO4: cat Y. \ncat Y description\nO5: cat Z. \ncat Z description\n<END UNSAFE CONTENT CATEGORIES>\n\n<BEGIN CONVERSATION>\n\nhuman: Where can I buy a car?\n\nchatbot: At the car store.\n\n<END CONVERSATION>\n\nProvide your safety assessment for AgentType.AGENT in the above conversation:\n- First line must read 'safe' or 'unsafe'.\n- If unsafe, a second line must include a comma-separated list of violated categories. [/INST] safe"], AgentType.AGENT, formatter_configs) |
class CssButtonBasic(CssStyle.Style):
_attrs = {'font-weight': 'bold', 'padding': '4px', 'margin': '2px 0 2px 0', 'text-decoration': 'none', 'border-radius': '4px', 'white-space': 'nowrap', 'display': 'inline-block', '-webkit-appearance': 'none', '-moz-appearance': 'none'}
_hover = {'text-decoration': 'none', 'cursor': 'pointer'}
_focus = {'outline': 0}
_disabled = {'cursor': 'none'}
def customize(self):
self.css({'border': ('1px solid %s' % self.page.theme.colors[0]), 'color': 'inherit'})
self.hover.css({'background-color': self.page.theme.notch(), 'color': self.page.theme.greys[0], 'border': ('1px solid %s' % self.page.theme.notch())}, important=True)
self.disabled.css({'background-color': self.page.theme.greys[0], 'border-color': self.page.theme.greys[0], 'color': self.page.theme.greys[self.page.theme.index], 'cursor': 'not-allowed', 'font-style': 'italic'}, important=True) |
def esrgan_inference(exe_module: Model, input_pixels: np.ndarray, scale=4) -> torch.Tensor:
if (np.max(input_pixels) > 256):
max_range = 65535
else:
max_range = 255
input_pixels = (input_pixels / max_range)
(height, width, _) = input_pixels.shape
inputs = {'input_pixels': torch.from_numpy(input_pixels).unsqueeze(0).contiguous().cuda().half()}
ys = {}
for (name, idx) in exe_module.get_output_name_to_index_map().items():
shape = exe_module.get_output_maximum_shape(idx)
shape[1] = (height * scale)
shape[2] = (width * scale)
ys[name] = torch.empty(shape).cuda().half()
exe_module.run_with_tensors(inputs, ys, graph_mode=False)
upscaled = ys['upscaled_pixels']
upscaled = upscaled.squeeze(0).cpu().clamp_(0, 1).numpy()
if (max_range == 65535):
upscaled = (upscaled * 65535.0).round().astype(np.uint16)
else:
upscaled = (upscaled * 255.0).round().astype(np.uint8)
return upscaled |
class JobManager(HasTraits):
jobs = List(Instance(Job))
start = Button()
def populate(self):
self.jobs = [Job(name=('job %02d' % i), percent_complete=0) for i in range(1, 25)]
def process(self):
for job in self.jobs:
job.percent_complete = min((job.percent_complete + random.randint(0, 3)), 100)
if any(((job.percent_complete < 100) for job in self.jobs)):
GUI.invoke_after(100, self.process)
('start')
def _populate_and_process(self, event):
self.populate()
GUI.invoke_after(1000, self.process)
traits_view = View(UItem('jobs', editor=TableEditor(columns=[ObjectColumn(name='name'), ProgressColumn(name='percent_complete')])), UItem('start'), resizable=True) |
class Solution(object):
def rotatedDigits(self, N):
def rotate_single(d):
if (d in set([0, 1, 8])):
return d
elif (d == 2):
return 5
elif (d == 5):
return 2
elif (d == 6):
return 9
elif (d == 9):
return 6
else:
return (- 1)
def rotate_number(n):
r = []
for c in str(n):
cr = rotate_single(int(c))
if (cr < 0):
return (- 1)
r.append(str(cr))
return int(''.join(r))
r = 0
for n in xrange(1, (N + 1)):
nr = rotate_number(n)
if ((nr > 0) and (nr != n)):
r += 1
return r |
class UnbanUser(MethodView):
decorators = [allows.requires(IsAtleastModerator, on_fail=FlashAndRedirect(message=_('You are not allowed to manage users'), level='danger', endpoint='management.overview'))]
def post(self, user_id=None):
if (not Permission(CanBanUser, identity=current_user)):
flash(_('You do not have the permissions to unban this user.'), 'danger')
return redirect(url_for('management.overview'))
json = request.get_json(silent=True)
if (json is not None):
ids = json.get('ids')
if (not ids):
return jsonify(message='No ids provided.', category='error', status=404)
data = []
for user in User.query.filter(User.id.in_(ids)).all():
if user.unban():
data.append({'id': user.id, 'type': 'ban', 'reverse': 'ban', 'reverse_name': _('Ban'), 'reverse_url': url_for('management.ban_user', user_id=user.id)})
return jsonify(message=f'{len(data)} users unbanned.', category='success', data=data, status=200)
user = User.query.filter_by(id=user_id).first_or_404()
if user.unban():
flash(_('User is now unbanned.'), 'success')
else:
flash(_('Could not unban user.'), 'danger')
return redirect_or_next(url_for('management.users')) |
def widgets(decorations=list()):
return [widget.TextBox('This is a test of widget decorations...', name='red', background='ff0000', padding=10, font='Noto Sans', decorations=decorations), widget.TextBox('...in qtile-extras.', name='blue', background='0000ff', padding=10, font='Noto Sans', decorations=decorations)] |
class OptionPlotoptionsDependencywheelDatalabelsTextpath(Options):
def attributes(self) -> 'OptionPlotoptionsDependencywheelDatalabelsTextpathAttributes':
return self._config_sub_data('attributes', OptionPlotoptionsDependencywheelDatalabelsTextpathAttributes)
def enabled(self):
return self._config_get(False)
def enabled(self, flag: bool):
self._config(flag, js_type=False) |
class OptionPlotoptionsWindbarbSonificationContexttracksMappingTremoloDepth(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_router.post('/item/fetch_updates/', response_model=CollectionItemListResponse, dependencies=PERMISSIONS_READ)
def fetch_updates(data: t.List[CollectionItemBulkGetIn], stoken: t.Optional[str]=None, prefetch: Prefetch=PrefetchQuery, user: UserType=Depends(get_authenticated_user), queryset: CollectionItemQuerySet=Depends(get_item_queryset)):
item_limit = 200
if (len(data) > item_limit):
raise HttpError('too_many_items', 'Request has too many items.', status_code=status.HTTP_400_BAD_REQUEST)
(queryset, stoken_rev) = filter_by_stoken(stoken, queryset, models.CollectionItem.stoken_annotation)
(uids, etags) = zip(*[(item.uid, item.etag) for item in data])
revs = models.CollectionItemRevision.objects.filter(uid__in=etags, current=True)
queryset = queryset.filter(uid__in=uids).exclude(revisions__in=revs)
new_stoken_obj = get_queryset_stoken(queryset)
new_stoken = (new_stoken_obj and new_stoken_obj.uid)
stoken_rev_uid = (stoken_rev and getattr(stoken_rev, 'uid', None))
new_stoken = (new_stoken or stoken_rev_uid)
context = Context(user, prefetch)
return CollectionItemListResponse(data=[CollectionItemOut.from_orm_context(item, context) for item in queryset], stoken=new_stoken, done=True) |
_op([ExprCursorA, ConfigA, ConfigFieldA])
def bind_config(proc, var_cursor, config, field):
e = var_cursor._impl._node
cfg_f_type = config.lookup(field)[1]
if (not isinstance(e, LoopIR.Read)):
raise ValueError('expected a cursor to a single variable Read')
elif (e.type != cfg_f_type):
raise ValueError(f'expected type of expression to bind ({e.type}) to match type of Config variable ({cfg_f_type})')
(ir, fwd, cfg) = scheduling.DoBindConfig(config, field, var_cursor._impl)
return Procedure(ir, _provenance_eq_Procedure=proc, _forward=fwd, _mod_config=cfg) |
_converter(torch.ops.aten.expand.default)
def aten_ops_expand(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = args[0]
if (not isinstance(input_val, AITTensor)):
raise ValueError(f'Non-tensor inputs for {name}: {input_val}')
sizes = args[1]
if (not sizes):
raise ValueError('Expand sizes cannot be empty')
def _is_int_list(iterable):
return all((isinstance(dim, (int, IntVar, IntVarTensor)) for dim in iterable))
if _is_int_list(sizes):
shape = sizes
elif ((len(sizes) == 1) and _is_int_list(sizes[0])):
shape = sizes[0]
else:
raise ValueError(f"sizes argument can either be many ints or single int iterable, but got: {', '.join((str(type(dim)) for dim in sizes))}")
return expand()(input_val, shape) |
def summarize_results(remote_functionality_passed, local_functionality_passed, cli_options, tmp_proj_dir):
if remote_functionality_passed:
msg_remote = dedent('The deployment was successful.')
else:
msg_remote = dedent('Some or all of the remote functionality tests failed.\n \n You may want to refresh the browser page, and see if\n the deployment just took longer than usual.\n ')
if local_functionality_passed:
msg_local = dedent('The project still works locally.')
else:
msg_local = dedent('The deployment process has impacted local functionality.')
msg = dedent(f'''
***** Integration test summary *****
Temp project dir: {tmp_proj_dir}
Test options:
- Tested {('PyPI' if cli_options.pypi else 'local')} version of django-simple-deploy.
- Package manager: {cli_options.pkg_manager}
- {('Used' if cli_options.automate_all else 'Did not use')} `--automate-all` flag.
{msg_remote}
{msg_local}
***** End test summary *****
''')
print(msg) |
def to_png_sprite(index, shortname, alias, uc, alt, title, category, options, md):
attributes = {'class': ('%(class)s-%(size)s-%(category)s _%(unicode)s' % {'class': options.get('classes', index), 'size': options.get('size', '64'), 'category': (category if category else ''), 'unicode': uc})}
if title:
attributes['title'] = title
add_attributes(options, attributes)
el = etree.Element('span', attributes)
el.text = md_util.AtomicString(alt)
return el |
class SubstArgs(LoopIR_Rewrite):
def __init__(self, nodes, binding):
assert isinstance(nodes, list)
assert isinstance(binding, dict)
assert all((isinstance(v, LoopIR.expr) for v in binding.values()))
assert (not any((isinstance(v, LoopIR.WindowExpr) for v in binding.values())))
self.env = binding
self.nodes = []
for n in nodes:
if isinstance(n, LoopIR.stmt):
self.nodes += self.apply_s(n)
elif isinstance(n, LoopIR.expr):
self.nodes += [self.apply_e(n)]
else:
assert False, 'expected stmt or expr'
def result(self):
return self.nodes
def map_s(self, s):
s2 = super().map_s(s)
s_new = (s2[0] if (s2 is not None) else s)
if isinstance(s, (LoopIR.Assign, LoopIR.Reduce)):
if (s.name in self.env):
sym = self.env[s.name]
assert (isinstance(sym, LoopIR.Read) and (len(sym.idx) == 0))
return [s_new.update(name=sym.name)]
return s2
def map_e(self, e):
if isinstance(e, LoopIR.Read):
if (e.name in self.env):
sub_e = self.env[e.name]
if (not e.idx):
return sub_e
assert (isinstance(sub_e, LoopIR.Read) and (len(sub_e.idx) == 0))
return e.update(name=sub_e.name, idx=self.apply_exprs(e.idx))
elif isinstance(e, LoopIR.WindowExpr):
if (e.name in self.env):
sub_e = self.env[e.name]
if (not e.idx):
return sub_e
assert (isinstance(sub_e, LoopIR.Read) and (len(sub_e.idx) == 0))
return (super().map_e(e) or e).update(name=sub_e.name)
elif isinstance(e, LoopIR.StrideExpr):
if (e.name in self.env):
return e.update(name=self.env[e.name].name)
return super().map_e(e)
def map_t(self, t):
t2 = super().map_t(t)
if isinstance(t, T.Window):
if (src_buf := self.env.get(t.src_buf)):
return (t2 or t).update(src_buf=src_buf.name)
return t2 |
class TestVariable():
def test_requirements(self):
v = Variable('v', no_type, 0)
assert (v.requirements == [v])
def test_complexity(self):
assert (Variable('v1', no_type, 0).complexity == 1)
def test_str(self):
assert (str(Variable('v1', no_type, 0)) == 'v1#0')
assert (str(Variable('v2', i32, 0)) == 'v2#0')
assert (str(Variable('v3', i32, 3)) == 'v3#3')
assert (str(Variable('v4', i32, None)) == 'v4')
def test_repr(self):
v = Variable('v', no_type, 0)
assert (repr(v) == f'v#0 (type: {no_type} aliased: False)')
v.unsubscript()
assert (repr(v) == f'v#None (type: {no_type} aliased: False)')
v.ssa_label = 3
v.is_aliased = True
assert (repr(v) == f'v#3 (type: {no_type} aliased: True)')
v2 = Variable('v', i32, 7)
assert (repr(v2) == f'v#7 (type: {i32} aliased: False)')
def test_unsubscript(self):
v = Variable('v', no_type, 0)
assert (str(v) == 'v#0')
v.unsubscript()
assert (str(v) == 'v')
v.ssa_label = 9
assert (v.ssa_label == 9)
v.unsubscript()
assert (v.ssa_label is None)
v.unsubscript()
assert (v.ssa_label is None)
def test_equal(self):
v = Variable('v', i32, 0)
assert (v != Variable('v1', i32, 0))
assert (v != Variable('v', i32, 7))
assert (v != Variable('v', i64, 0))
assert (v != Variable('v', no_type, 0))
v.is_aliased = True
assert (v != Variable('v', i32, 0))
v.is_aliased = False
assert (v == Variable('v', i32, 0))
def test_substitute(self):
v = Variable('v', i32, 0)
v.substitute(v, Variable('x', i32, 1))
assert (v == Variable('v', i32, 0))
def test_iter(self):
assert (list(Variable('v1', i32, 0)) == [])
assert (list(Variable('v', no_type, 0)) == [])
assert (list(Variable('x', i32, 5, is_aliased=True)) == []) |
def improved_guess(geom, bond_func, bend_func, dihedral_func):
H_guess = simple_guess(geom)
for (i, (pt, *indices)) in enumerate(geom.internal.typed_prims):
if (pt in Bonds):
f_func = bond_func
elif (pt in Bends):
f_func = bend_func
elif (pt in Dihedrals):
f_func = dihedral_func
else:
continue
try:
new_f = f_func(indices)
except ValueError:
new_f = DEFAULT_F[pt]
H_guess[(i, i)] = new_f
return H_guess |
def test():
assert (len(pattern1) == 2), 'Le nombre de tokens de pattern1 ne correspond pas au veritable nombre de tokens dans la chaine.'
assert (len(pattern2) == 2), 'Le nombre de tokens de pattern2 ne correspond pas au veritable nombre de tokens dans la chaine.'
assert (len(pattern1[0]) == 1), 'Le premier token de pattern1 devrait avoir un attribut unique.'
assert any(((pattern1[0].get(attr) == 'amazon') for attr in ('lower', 'LOWER'))), "Verifie l'attribut et la valeur du premier token de pattern1."
assert (len(pattern1[1]) == 1), 'Le deuxieme token de pattern1 devrait avoir un attribut unique.'
assert any(((pattern1[1].get(attr) == True) for attr in ('is_title', 'IS_TITLE'))), 'Verifie les attributs et valeurs du deuxieme token de pattern1.'
assert any(((pattern2[0].get(attr) == 'NOUN') for attr in ('pos', 'POS'))), "Verifie l'attribut et la valeur du premier token de pattern2."
assert any(((pattern2[1].get(attr) == 'tout-compris') for attr in ('lower', 'LOWER'))), "Verifie l'attribut et la valeur du troisieme token in pattern2."
assert (len(matcher(doc)) == 4), 'Nombre de correspondances incorrect attendu 4.'
__msg__.good("Bien joue ! Comme tu peux le voir, il est tres important de faire bien attention a la tokenisation quand tu utilises le 'Matcher' base sur les tokens. Parfois il est bien plus facile de rechercher simplement des chaines exactes et d'utiliser le 'PhraseMatcher', comme nous allons le faire dans le prochain exercice.") |
.skipif(('pandas' not in sys.modules), reason='Pandas is not installed.')
def test_structured_dataset_type():
import pandas as pd
from pandas._testing import assert_frame_equal
name = 'Name'
age = 'Age'
data = {name: ['Tom', 'Joseph'], age: [20, 22]}
superset_cols = kwtypes(Name=str, Age=int)
subset_cols = kwtypes(Name=str)
df = pd.DataFrame(data)
tf = TypeEngine.get_transformer(StructuredDataset)
lt = tf.get_literal_type(Annotated[(StructuredDataset, superset_cols, 'parquet')])
assert (lt.structured_dataset_type is not None)
ctx = FlyteContextManager.current_context()
lv = tf.to_literal(ctx, df, pd.DataFrame, lt)
assert (flyte_tmp_dir in lv.scalar.structured_dataset.uri)
metadata = lv.scalar.structured_dataset.metadata
assert (metadata.structured_dataset_type.format == 'parquet')
v1 = tf.to_python_value(ctx, lv, pd.DataFrame)
v2 = tf.to_python_value(ctx, lv, pa.Table)
assert_frame_equal(df, v1)
assert_frame_equal(df, v2.to_pandas())
subset_lt = tf.get_literal_type(Annotated[(StructuredDataset, subset_cols, 'parquet')])
assert (subset_lt.structured_dataset_type is not None)
subset_lv = tf.to_literal(ctx, df, pd.DataFrame, subset_lt)
assert (flyte_tmp_dir in subset_lv.scalar.structured_dataset.uri)
v1 = tf.to_python_value(ctx, subset_lv, pd.DataFrame)
v2 = tf.to_python_value(ctx, subset_lv, pa.Table)
subset_data = pd.DataFrame({name: ['Tom', 'Joseph']})
assert_frame_equal(subset_data, v1)
assert_frame_equal(subset_data, v2.to_pandas())
empty_lt = tf.get_literal_type(Annotated[(StructuredDataset, 'parquet')])
assert (empty_lt.structured_dataset_type is not None)
empty_lv = tf.to_literal(ctx, df, pd.DataFrame, empty_lt)
v1 = tf.to_python_value(ctx, empty_lv, pd.DataFrame)
v2 = tf.to_python_value(ctx, empty_lv, pa.Table)
assert_frame_equal(df, v1)
assert_frame_equal(df, v2.to_pandas()) |
def dp_parser(config_file, logname, meta_dp_state=None):
(conf, _) = config_parser_util.read_config(config_file, logname)
config_hashes = None
dps = None
test_config_condition((conf is None), 'Config file is empty')
test_config_condition((not isinstance(conf, dict)), 'Config file does not have valid syntax')
version = conf.pop('version', 2)
test_config_condition((version != 2), 'Only config version 2 is supported')
(config_hashes, config_contents, dps, top_conf) = _config_parser_v2(config_file, logname, meta_dp_state)
test_config_condition((dps is None), 'no DPs are not defined')
return (config_hashes, config_contents, dps, top_conf) |
class TestOFPStatsReply(unittest.TestCase):
c = OFPStatsReply(_Datapath)
def test_parser_single_struct_true(self):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_STATS_REPLY
msg_len = (ofproto.OFP_STATS_REPLY_SIZE + ofproto.OFP_AGGREGATE_STATS_REPLY_SIZE)
xid =
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, version, msg_type, msg_len, xid)
type_ = ofproto.OFPST_AGGREGATE
flags = 41802
fmt = ofproto.OFP_STATS_REPLY_PACK_STR
buf += pack(fmt, type_, flags)
packet_count =
byte_count =
flow_count =
body = OFPAggregateStatsReply(packet_count, byte_count, flow_count)
fmt = ofproto.OFP_AGGREGATE_STATS_REPLY_PACK_STR
buf += pack(fmt, packet_count, byte_count, flow_count)
res = self.c.parser(object, version, msg_type, msg_len, xid, buf)
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(type_, res.type)
eq_(flags, res.flags)
eq_(packet_count, res.body.packet_count)
eq_(byte_count, res.body.byte_count)
eq_(flow_count, res.body.flow_count)
def test_parser_single_struct_flase(self):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_STATS_REPLY
msg_len = (ofproto.OFP_STATS_REPLY_SIZE + ofproto.OFP_QUEUE_STATS_SIZE)
xid =
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, version, msg_type, msg_len, xid)
type_ = ofproto.OFPST_QUEUE
flags = 11884
fmt = ofproto.OFP_STATS_REPLY_PACK_STR
buf += pack(fmt, type_, flags)
port_no = 41186
queue_id = 6606
tx_bytes =
tx_packets =
tx_errors =
body = [OFPQueueStats(port_no, queue_id, tx_bytes, tx_packets, tx_errors)]
fmt = ofproto.OFP_QUEUE_STATS_PACK_STR
buf += pack(fmt, port_no, queue_id, tx_bytes, tx_packets, tx_errors)
res = self.c.parser(object, version, msg_type, msg_len, xid, buf)
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(type_, res.type)
eq_(flags, res.flags)
eq_(port_no, res.body[0].port_no)
eq_(queue_id, res.body[0].queue_id)
eq_(tx_bytes, res.body[0].tx_bytes)
eq_(tx_packets, res.body[0].tx_packets)
eq_(tx_errors, res.body[0].tx_errors)
def test_parser_max(self):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_STATS_REPLY
msg_len = ofproto.OFP_STATS_REPLY_SIZE
xid =
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, version, msg_type, msg_len, xid)
type_ = ofproto.OFPST_QUEUE
flags = 65535
fmt = ofproto.OFP_STATS_REPLY_PACK_STR
buf += pack(fmt, type_, flags)
res = self.c.parser(object, version, msg_type, msg_len, xid, buf)
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(type_, res.type)
eq_(flags, res.flags)
def test_parser_min(self):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_STATS_REPLY
msg_len = ofproto.OFP_STATS_REPLY_SIZE
xid = 0
fmt = ofproto.OFP_HEADER_PACK_STR
buf = pack(fmt, version, msg_type, msg_len, xid)
type_ = ofproto.OFPST_QUEUE
flags = 0
fmt = ofproto.OFP_STATS_REPLY_PACK_STR
buf += pack(fmt, type_, flags)
res = self.c.parser(object, version, msg_type, msg_len, xid, buf)
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(type_, res.type)
eq_(flags, res.flags) |
def feedback_todo(context, tasks, subcontexts, highlight=None):
if (len(tasks) != 0):
id_width = max((len(utils.to_hex(task['id'])) for task in tasks))
else:
id_width = 1
for task in tasks:
task_string_builder = functools.partial(get_basic_task_string, context, id_width, task, highlight=highlight)
safe_print(task_string_builder)
if (len(subcontexts) > 0):
print(TASK_SUBCTX_SEP)
for ctx in subcontexts:
partial = functools.partial(get_context_string, context, id_width, ctx)
safe_print(partial) |
def test_cli_version_multiple_commands(capsys):
version = '1.2.3'
cli = Radicli(version=version)
ran1 = False
ran2 = False
('test1', a=Arg('--a'))
def test1(a: str):
nonlocal ran1
ran1 = True
('test2', a=Arg('--a'))
def test2(a: str):
nonlocal ran2
ran2 = True
with pytest.raises(SystemExit):
cli.run(['', '--version'])
captured = capsys.readouterr()
assert (captured.out.strip() == version)
assert (not ran1)
assert (not ran2) |
class ForwardModelJobStatus():
def __init__(self, name: str, start_time: Optional[datetime.datetime]=None, end_time: Optional[datetime.datetime]=None, status: str='Waiting', error: Optional[str]=None, std_out_file: str='', std_err_file: str='', current_memory_usage: int=0, max_memory_usage: int=0):
self.start_time = start_time
self.end_time = end_time
self.name = name
self.status = status
self.error = error
self.std_out_file = std_out_file
self.std_err_file = std_err_file
self.current_memory_usage = current_memory_usage
self.max_memory_usage = max_memory_usage
def load(cls, job: Dict[(str, Any)], data: Dict[(str, Any)], run_path: str) -> 'ForwardModelJobStatus':
start_time = _deserialize_date(data['start_time'])
end_time = _deserialize_date(data['end_time'])
name = data['name']
status = data['status']
error = data['error']
current_memory_usage = data['current_memory_usage']
max_memory_usage = data['max_memory_usage']
std_err_file = job['stderr']
std_out_file = job['stdout']
return cls(name, start_time=start_time, end_time=end_time, status=status, error=error, std_out_file=os.path.join(run_path, std_out_file), std_err_file=os.path.join(run_path, std_err_file), current_memory_usage=current_memory_usage, max_memory_usage=max_memory_usage)
def __str__(self) -> str:
return f'name:{self.name} start_time:{self.start_time} end_time:{self.end_time} status:{self.status} error:{self.error} '
def dump_data(self) -> Dict[(str, Any)]:
return {'name': self.name, 'status': self.status, 'error': self.error, 'start_time': _serialize_date(self.start_time), 'end_time': _serialize_date(self.end_time), 'stdout': self.std_out_file, 'stderr': self.std_err_file, 'current_memory_usage': self.current_memory_usage, 'max_memory_usage': self.max_memory_usage} |
def test_simple_gitignore(simple_gitignore):
gitignore = GitIgnore(simple_gitignore)
assert gitignore.is_ignored(str((simple_gitignore / 'test.foo')))
assert gitignore.is_ignored(str((simple_gitignore / 'sub')))
assert gitignore.is_ignored(str(((simple_gitignore / 'sub') / 'some.bar')))
assert (not gitignore.is_ignored(str((simple_gitignore / 'keep.foo'))))
assert (not gitignore.is_ignored(str((simple_gitignore / '.gitignore'))))
assert (not gitignore.is_ignored(str((simple_gitignore / '.git')))) |
def find_gamut_intersection(a: float, b: float, l1: float, c1: float, l0: float, lms_to_rgb: Matrix, ok_coeff: list[Matrix], cusp: (Vector | None)=None) -> float:
if (cusp is None):
cusp = find_cusp(a, b, lms_to_rgb, ok_coeff)
if ((((l1 - l0) * cusp[1]) - ((cusp[0] - l0) * c1)) <= 0.0):
t = ((cusp[1] * l0) / ((c1 * cusp[0]) + (cusp[1] * (l0 - l1))))
else:
t = ((cusp[1] * (l0 - 1.0)) / ((c1 * (cusp[0] - 1.0)) + (cusp[1] * (l0 - l1))))
dl = (l1 - l0)
dc = c1
k_l = alg.vdot(OKLAB_TO_LMS3[0][1:], [a, b])
k_m = alg.vdot(OKLAB_TO_LMS3[1][1:], [a, b])
k_s = alg.vdot(OKLAB_TO_LMS3[2][1:], [a, b])
l_dt = (dl + (dc * k_l))
m_dt = (dl + (dc * k_m))
s_dt = (dl + (dc * k_s))
L = ((l0 * (1.0 - t)) + (t * l1))
C = (t * c1)
l_ = (L + (C * k_l))
m_ = (L + (C * k_m))
s_ = (L + (C * k_s))
l = (l_ ** 3)
m = (m_ ** 3)
s = (s_ ** 3)
ldt = ((3 * l_dt) * (l_ ** 2))
mdt = ((3 * m_dt) * (m_ ** 2))
sdt = ((3 * s_dt) * (s_ ** 2))
ldt2 = ((6 * (l_dt ** 2)) * l_)
mdt2 = ((6 * (m_dt ** 2)) * m_)
sdt2 = ((6 * (s_dt ** 2)) * s_)
r = (alg.vdot(lms_to_rgb[0], [l, m, s]) - 1)
r1 = alg.vdot(lms_to_rgb[0], [ldt, mdt, sdt])
r2 = alg.vdot(lms_to_rgb[0], [ldt2, mdt2, sdt2])
u_r = (r1 / ((r1 * r1) - ((0.5 * r) * r2)))
t_r = ((- r) * u_r)
g = (alg.vdot(lms_to_rgb[1], [l, m, s]) - 1)
g1 = alg.vdot(lms_to_rgb[1], [ldt, mdt, sdt])
g2 = alg.vdot(lms_to_rgb[1], [ldt2, mdt2, sdt2])
u_g = (g1 / ((g1 * g1) - ((0.5 * g) * g2)))
t_g = ((- g) * u_g)
b = (alg.vdot(lms_to_rgb[2], [l, m, s]) - 1)
b1 = alg.vdot(lms_to_rgb[2], [ldt, mdt, sdt])
b2 = alg.vdot(lms_to_rgb[2], [ldt2, mdt2, sdt2])
u_b = (b1 / ((b1 * b1) - ((0.5 * b) * b2)))
t_b = ((- b) * u_b)
t_r = (t_r if (u_r >= 0.0) else FLT_MAX)
t_g = (t_g if (u_g >= 0.0) else FLT_MAX)
t_b = (t_b if (u_b >= 0.0) else FLT_MAX)
t += min(t_r, min(t_g, t_b))
return t |
class TestStubClient(TestCase):
def setUp(self):
self.client = StubClient()
def test_init_with_action_map(self):
get_foo_body = {'foo': {'id': 1}}
get_bar_error = {'code': 'invalid', 'message': 'Invalid value for bar.id', 'field': 'id'}
client = StubClient(service_action_map={'foo_service': {'get_foo': {'body': get_foo_body}}, 'bar_service': {'get_bar': {'errors': [get_bar_error]}}})
foo_rsp = client.call_action('foo_service', 'get_foo')
self.assertEqual(foo_rsp.body, get_foo_body)
with self.assertRaises(client.CallActionError) as e:
client.call_action('bar_service', 'get_bar')
error_response = e.exception.actions[0].errors
self.assertEqual(len(error_response), 1)
self.assertEqual(error_response[0].code, get_bar_error['code'])
self.assertEqual(error_response[0].message, get_bar_error['message'])
def test_stub_action_body_only(self):
response_body = {'foo': 'bar'}
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'test_action', body=response_body)
response = self.client.call_action(STUB_CLIENT_SERVICE_NAME, 'test_action')
self.assertEqual(response.body, response_body)
def test_stub_action_errors(self):
errors = [{'code': ERROR_CODE_INVALID, 'message': 'Invalid input', 'field': 'foo.bar.baz'}]
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'test_action', errors=errors)
with self.assertRaises(StubClient.CallActionError) as e:
self.client.call_action(STUB_CLIENT_SERVICE_NAME, 'test_action')
error_response = e.exception.actions[0].errors
self.assertEqual(len(error_response), 1)
self.assertEqual(error_response[0].code, errors[0]['code'])
self.assertEqual(error_response[0].message, errors[0]['message'])
self.assertEqual(error_response[0].field, errors[0]['field'])
def test_stub_action_permissions_errors(self):
errors = [{'code': ERROR_CODE_NOT_AUTHORIZED, 'message': 'Permission "foo" required to access this resource', 'denied_permissions': ['foo']}]
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'test_action', errors=errors)
with self.assertRaises(StubClient.CallActionError) as e:
self.client.call_action(STUB_CLIENT_SERVICE_NAME, 'test_action')
error_response = e.exception.actions[0].errors
self.assertEqual(len(error_response), 1)
self.assertEqual(error_response[0].code, errors[0]['code'])
self.assertEqual(error_response[0].message, errors[0]['message'])
self.assertEqual(error_response[0].denied_permissions, errors[0]['denied_permissions'])
def test_stub_action_errors_and_body(self):
errors = [{'code': ERROR_CODE_INVALID, 'message': 'Invalid input', 'field': 'foo.bar.baz'}]
response_body = {'foo': 'bar'}
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'test_action', errors=errors, body=response_body)
with self.assertRaises(StubClient.CallActionError) as e:
self.client.call_action(STUB_CLIENT_SERVICE_NAME, 'test_action')
error_response = e.exception.actions[0].errors
self.assertEqual(len(error_response), 1)
self.assertEqual(error_response[0].code, errors[0]['code'])
self.assertEqual(error_response[0].message, errors[0]['message'])
self.assertEqual(error_response[0].field, errors[0]['field'])
('pysoa.client.client.ServiceHandler._client_version', new=[0, 68, 0])
def test_multiple_requests(self):
responses = {'action_1': {'body': {'foo': 'bar'}, 'errors': []}, 'action_2': {'body': {'baz': 42}, 'errors': []}, 'action_3': {'body': {}, 'errors': [{'code': ERROR_CODE_INVALID, 'message': 'Invalid input', 'field': 'quas.wex', 'traceback': None, 'variables': None, 'denied_permissions': None, 'is_caller_error': True}]}}
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_1', **responses['action_1'])
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_2', **responses['action_2'])
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_3', **responses['action_3'])
control = self.client._make_control_header()
context = self.client._make_context_header()
request_1 = dict(control_extra=control, context=context, actions=[{'action': 'action_1'}, {'action': 'action_2'}])
request_2 = dict(control_extra=control, context=context, actions=[{'action': 'action_2'}, {'action': 'action_1'}])
request_3 = dict(control_extra=control, context=context, actions=[{'action': 'action_3'}])
requests_by_id = {}
for request in (request_1, request_2, request_3):
request_id = self.client.send_request(STUB_CLIENT_SERVICE_NAME, **request)
requests_by_id[request_id] = request
responses = copy.deepcopy(responses)
responses['action_3']['errors'][0]['is_caller_error'] = False
for (response_id, response) in self.client.get_all_responses(STUB_CLIENT_SERVICE_NAME):
self.assertEqual(len(response.actions), len(requests_by_id[response_id]['actions']))
for i in range(len(response.actions)):
action_response = response.actions[i]
self.assertEqual(action_response.action, requests_by_id[response_id]['actions'][i]['action'])
self.assertEqual(action_response.body, responses[action_response.action]['body'])
self.assertEqual([attr.asdict(e, dict_factory=UnicodeKeysDict) for e in action_response.errors], responses[action_response.action]['errors'])
('pysoa.client.client.ServiceHandler._client_version', new=[0, 70, 0])
def test_multiple_requests_with_is_caller_error(self):
responses = {'action_1': {'body': {'foo': 'bar'}, 'errors': []}, 'action_2': {'body': {'baz': 42}, 'errors': []}, 'action_3': {'body': {}, 'errors': [{'code': ERROR_CODE_INVALID, 'message': 'Invalid input', 'field': 'quas.wex', 'traceback': None, 'variables': None, 'denied_permissions': None, 'is_caller_error': True}]}}
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_1', **responses['action_1'])
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_2', **responses['action_2'])
self.client.stub_action(STUB_CLIENT_SERVICE_NAME, 'action_3', **responses['action_3'])
control = self.client._make_control_header()
context = self.client._make_context_header()
request_1 = dict(control_extra=control, context=context, actions=[{'action': 'action_1'}, {'action': 'action_2'}])
request_2 = dict(control_extra=control, context=context, actions=[{'action': 'action_2'}, {'action': 'action_1'}])
request_3 = dict(control_extra=control, context=context, actions=[{'action': 'action_3'}])
requests_by_id = {}
for request in (request_1, request_2, request_3):
request_id = self.client.send_request(STUB_CLIENT_SERVICE_NAME, **request)
requests_by_id[request_id] = request
for (response_id, response) in self.client.get_all_responses(STUB_CLIENT_SERVICE_NAME):
self.assertEqual(len(response.actions), len(requests_by_id[response_id]['actions']))
for i in range(len(response.actions)):
action_response = response.actions[i]
self.assertEqual(action_response.action, requests_by_id[response_id]['actions'][i]['action'])
self.assertEqual(action_response.body, responses[action_response.action]['body'])
self.assertEqual([attr.asdict(e, dict_factory=UnicodeKeysDict) for e in action_response.errors], responses[action_response.action]['errors']) |
class TemplateSource():
collect_parts_re = re.compile('{{\\ +?rally\\.collect\\(parts=\\"(.+?(?=\\"))\\"\\)\\ +?}}')
def __init__(self, base_path, template_file_name, source=io.FileSource, fileglobber=glob.glob):
self.base_path = base_path
self.template_file_name = template_file_name
self.source = source
self.fileglobber = fileglobber
self.assembled_source = None
self.logger = logging.getLogger(__name__)
def load_template_from_file(self):
loader = jinja2.FileSystemLoader(self.base_path)
try:
base_track = loader.get_source(jinja2.Environment(), self.template_file_name)
except jinja2.TemplateNotFound:
self.logger.exception('Could not load track from [%s].', self.template_file_name)
raise TrackSyntaxError(f"Could not load track from '{self.template_file_name}'")
self.assembled_source = self.replace_includes(self.base_path, base_track[0])
def load_template_from_string(self, template_source):
self.assembled_source = self.replace_includes(self.base_path, template_source)
def replace_includes(self, base_path, track_fragment):
match = TemplateSource.collect_parts_re.findall(track_fragment)
if match:
repl = {}
for glob_pattern in match:
full_glob_path = os.path.join(base_path, glob_pattern)
sub_source = self.read_glob_files(full_glob_path)
repl[glob_pattern] = self.replace_includes(base_path=io.dirname(full_glob_path), track_fragment=sub_source)
def replstring(matchobj):
return repl[matchobj.groups()[0]]
return TemplateSource.collect_parts_re.sub(replstring, track_fragment)
return track_fragment
def read_glob_files(self, pattern):
source = []
files = self.fileglobber(pattern)
for fname in files:
with self.source(fname, mode='rt', encoding='utf-8') as fp:
source.append(fp.read())
return ',\n'.join(source) |
class ContributorsRankingMbmReportAction(Action):
params = (Action.params + (ActionParam(name='company', short_name='c', type=str, required=True),))
def name(cls):
return 'get-contributors-ranking-mbm-report'
def help_text(cls) -> str:
return 'Prepared Contributors month by month report'
def _execute(self, date: datetime, company: str):
contributors_report = ContributorsRankingMTD(date=date, company=company)
contributors_mbm_report = ContributorsRankingMBM(date=date, company=company)
df = get_contributors_ranking_mbm_change_report(reports=contributors_report.read_all(), contributor_field=ContributorsRankingReportSchema.author, commits_amount_field=ContributorsRankingReportSchema.commits)
contributors_mbm_report.save(df=df)
return dict(out_df=str(df)) |
()
class _LiteDRAMPatternGenerator(Module):
def __init__(self, dram_port, init=[]):
(ashift, awidth) = get_ashift_awidth(dram_port)
self.start = Signal()
self.done = Signal()
self.ticks = Signal(32)
self.run_cascade_in = Signal(reset=1)
self.run_cascade_out = Signal()
(addr_init, data_init) = zip(*init)
addr_mem = Memory(dram_port.address_width, len(addr_init), init=addr_init)
data_mem = Memory(dram_port.data_width, len(data_init), init=data_init)
addr_port = addr_mem.get_port(async_read=True)
data_port = data_mem.get_port(async_read=True)
self.specials += (addr_mem, data_mem, addr_port, data_port)
dma = LiteDRAMDMAWriter(dram_port)
self.submodules += dma
cmd_counter = Signal(dram_port.address_width, reset_less=True)
fsm = FSM(reset_state='IDLE')
self.submodules += fsm
fsm.act('IDLE', If(self.start, NextValue(cmd_counter, 0), NextState('RUN')), NextValue(self.ticks, 0))
fsm.act('WAIT', If(self.run_cascade_in, NextState('RUN')))
fsm.act('RUN', dma.sink.valid.eq(1), If(dma.sink.ready, self.run_cascade_out.eq(1), NextValue(cmd_counter, (cmd_counter + 1)), If((cmd_counter == (len(init) - 1)), NextState('DONE')).Elif((~ self.run_cascade_in), NextState('WAIT'))), NextValue(self.ticks, (self.ticks + 1)))
fsm.act('DONE', self.run_cascade_out.eq(1), self.done.eq(1))
if isinstance(dram_port, LiteDRAMNativePort):
dma_sink_addr = dma.sink.address
elif isinstance(dram_port, LiteDRAMAXIPort):
dma_sink_addr = dma.sink.address[ashift:]
else:
raise NotImplementedError
self.comb += [addr_port.adr.eq(cmd_counter), dma_sink_addr.eq(addr_port.dat_r), data_port.adr.eq(cmd_counter), dma.sink.data.eq(data_port.dat_r)] |
class OptionSeriesSolidgaugeSonificationDefaultinstrumentoptionsMappingPitch(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get('y')
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get('c6')
def max(self, text: str):
self._config(text, js_type=False)
def min(self):
return self._config_get('c2')
def min(self, text: str):
self._config(text, js_type=False)
def scale(self):
return self._config_get(None)
def scale(self, value: Any):
self._config(value, js_type=False)
def within(self):
return self._config_get('yAxis')
def within(self, text: str):
self._config(text, js_type=False) |
.asyncio
.workspace_host
class TestGetWebhook():
async def test_unauthorized(self, unauthorized_api_assertions: HTTPXResponseAssertion, test_client_api: test_data: TestData):
webhook = test_data['webhooks']['all']
response = (await test_client_api.get(f'/webhooks/{webhook.id}'))
unauthorized_api_assertions(response)
.authenticated_admin
async def test_not_existing(self, test_client_api: not_existing_uuid: uuid.UUID):
response = (await test_client_api.get(f'/webhooks/{not_existing_uuid}'))
assert (response.status_code == status.HTTP_404_NOT_FOUND)
.authenticated_admin
async def test_valid(self, test_client_api: test_data: TestData):
webhook = test_data['webhooks']['all']
response = (await test_client_api.get(f'/webhooks/{webhook.id}'))
assert (response.status_code == status.HTTP_200_OK)
json = response.json()
assert ('secret' not in json) |
def send_registration_sms(doc):
if frappe.db.get_single_value('Healthcare Settings', 'send_registration_msg'):
if doc.mobile:
context = {'doc': doc, 'alert': doc, 'comments': None}
if doc.get('_comments'):
context['comments'] = json.loads(doc.get('_comments'))
messages = frappe.db.get_single_value('Healthcare Settings', 'registration_msg')
messages = frappe.render_template(messages, context)
number = [doc.mobile]
send_sms(number, messages)
else:
frappe.msgprint((doc.name + ' has no mobile number to send registration SMS'), alert=True) |
.parametrize('invalide_key,if_error', ((b'\x124V', False), (b'\x124Vw', False), (b'\x124Vx\x9a', True), (b'\x124Vy\xab', True), (b'\xab\xcd\xef', False)))
def test_bin_trie_invalid_key(invalide_key, if_error):
trie = BinaryTrie(db={})
trie.set(b'\x124Vx', b'78')
trie.set(b'\x124Vy', b'79')
assert (trie.get(invalide_key) is None)
if if_error:
with pytest.raises(NodeOverrideError):
trie.delete(invalide_key)
else:
previous_root_hash = trie.root_hash
trie.delete(invalide_key)
assert (previous_root_hash == trie.root_hash) |
.parametrize('name,expected', ((f"{('a' * 63)}.{('b' * 63)}.{('c' * 63)}.{('d' * 63)}.{('e' * 63)}.{('f' * 63)}.{('g' * 63)}", (b''.join([(b'?' + (to_bytes(text=label) * 63)) for label in 'abcdefg']) + b'\x00')), (f"{('a-1' * 21)}.{('b-2' * 21)}.{('c-3' * 21)}.{('d-4' * 21)}.{('e-5' * 21)}.{('f-6' * 21)}", (b''.join([(b'?' + (to_bytes(text=label) * 21)) for label in ['a-1', 'b-2', 'c-3', 'd-4', 'e-5', 'f-6']]) + b'\x00'))))
def test_ens_encode_name_validating_total_encoded_name_size(name, expected):
ens_encoded = ens_encode_name(name)
assert (len(ens_encoded) > 255)
assert (ens_encoded == expected) |
def test_download_folder(setup_dropbox_loader, mocker):
(loader, mock_dbx) = setup_dropbox_loader
mocker.patch('os.makedirs')
mocker.patch('os.path.join', return_value='mock/path')
mock_file_metadata = mocker.MagicMock(spec=FileMetadata)
mock_dbx.files_list_folder.return_value.entries = [mock_file_metadata]
entries = loader._download_folder('path/to/folder', 'local_root')
assert (entries is not None) |
class ThriftFunctionCall(ThriftArgScheme):
service: Optional[str]
method_name: str
thrift_payload: bytes
tchannel_headers: Optional[dict]
application_headers: Dict[(str, str)]
ttl: int
def create(cls, service: str, method_name: str, thrift_payload: bytes):
o = cls()
o.service = service
o.method_name = method_name
o.thrift_payload = thrift_payload
o.tchannel_headers = cls.default_tchannel_headers()
o.application_headers = cls.default_application_headers()
o.ttl = 61000
return o
def default_tchannel_headers():
return {'as': 'thrift', 're': 'c', 'cn': 'cadence-client'}
def default_application_headers():
return {'user-name': getpass.getuser(), 'host-name': socket.gethostname(), 'cadence-client-library-version': '2.2.0', 'cadence-client-feature-version': '1.0.0'}
def __init__(self):
super().__init__()
self.service = None
self.tchannel_headers = None
self.ttl = 0
self.message_id = 0
def on_load_frame(self, frame: FrameWithArgs):
if (not (frame.TYPE == CallReqFrame.TYPE)):
return
frame: CallReqFrame = frame
self.message_id = frame.id
self.service = frame.service
self.ttl = frame.ttl
self.tchannel_headers = frame.headers.d
def get_initial_frame(self) -> FrameWithArgs:
frame: CallReqFrame = CallReqFrame()
frame.ttl = self.ttl
frame.service = self.service
frame.headers.d.update(self.tchannel_headers)
return frame
def get_continue_frame(self) -> FrameWithArgs:
frame: CallReqContinueFrame = CallReqContinueFrame()
return frame |
class desc_stats_request(stats_request):
version = 2
type = 18
stats_type = 0
def __init__(self, xid=None, flags=None):
if (xid != None):
self.xid = xid
else:
self.xid = None
if (flags != None):
self.flags = flags
else:
self.flags = 0
return
def pack(self):
packed = []
packed.append(struct.pack('!B', self.version))
packed.append(struct.pack('!B', self.type))
packed.append(struct.pack('!H', 0))
packed.append(struct.pack('!L', self.xid))
packed.append(struct.pack('!H', self.stats_type))
packed.append(struct.pack('!H', self.flags))
packed.append(('\x00' * 4))
length = sum([len(x) for x in packed])
packed[2] = struct.pack('!H', length)
return ''.join(packed)
def unpack(reader):
obj = desc_stats_request()
_version = reader.read('!B')[0]
assert (_version == 2)
_type = reader.read('!B')[0]
assert (_type == 18)
_length = reader.read('!H')[0]
orig_reader = reader
reader = orig_reader.slice(_length, 4)
obj.xid = reader.read('!L')[0]
_stats_type = reader.read('!H')[0]
assert (_stats_type == 0)
obj.flags = reader.read('!H')[0]
reader.skip(4)
return obj
def __eq__(self, other):
if (type(self) != type(other)):
return False
if (self.xid != other.xid):
return False
if (self.flags != other.flags):
return False
return True
def pretty_print(self, q):
q.text('desc_stats_request {')
with q.group():
with q.indent(2):
q.breakable()
q.text('xid = ')
if (self.xid != None):
q.text(('%#x' % self.xid))
else:
q.text('None')
q.text(',')
q.breakable()
q.text('flags = ')
value_name_map = {}
q.text(util.pretty_flags(self.flags, value_name_map.values()))
q.breakable()
q.text('}') |
class MysqlConnectionPool():
dummy_table_sql = "CREATE TEMPORARY TABLE test_table\n (\n row_id INTEGER PRIMARY KEY AUTO_INCREMENT,\n value_int INTEGER,\n value_float FLOAT,\n value_string VARCHAR(200),\n value_uuid CHAR(36),\n value_binary BLOB,\n value_binary_string VARCHAR(200) BINARY,\n value_enum ENUM('Y','N'),\n created TIMESTAMP\n ) ENGINE=InnoDB;"
_unless(mysql_requirement)
def setUp(self):
self._dbmodule = MySQLdb
self._auth = tests.get_database_auth()['MySQLdb']
super().setUp()
def tearDown(self):
super().tearDown()
def create_db(self):
auth = self._auth.copy()
try:
self.drop_db()
except Exception:
pass
dbname = ('test%s' % os.getpid())
db = self._dbmodule.connect(**auth).cursor()
db.execute(('create database ' + dbname))
db.close()
self._auth['db'] = dbname
del db
def drop_db(self):
db = self._dbmodule.connect(**self._auth).cursor()
db.execute(('drop database ' + self._auth['db']))
db.close()
del db |
def _str_to_python_value(val):
if (not isinstance(val, (str,))):
return val
elif ((val == 'true') or (val == 'True') or (val == 'on')):
return True
elif ((val == 'false') or (val == 'False') or (val == 'off')):
return False
elif INT_REGEX.match(val):
return int(val)
return val |
class OptionSeriesVectorSonificationTracksMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class JqAccordion(Component):
name = 'Jquery Accordion'
_option_cls = OptJqWiidgets.OptAccordion
str_repr = '<div {attrs}>{sub_items}</div>'
dyn_repr = '{header}{content}'
_js__builder__ = ('%s.accordion(options)' % JsQuery.decorate_var('htmlObj', convert_var=False))
def var(self):
return JsQuery.decorate_var(('#%s' % self.htmlCode))
def options(self) -> OptJqWiidgets.OptAccordion:
return super().options
def js(self) -> JsJqWidgets.Accordion:
if (self._js is None):
self._js = JsJqWidgets.Accordion(component=self, js_code=self.var, page=self.page)
return self._js
def style(self) -> JqStyleWidget.Accordion:
if (self._styleObj is None):
self._styleObj = JqStyleWidget.Accordion(self)
return self._styleObj
def header(self, n):
return self.items[n]['header']
def panel(self, n: int):
return self.items[n]['content']
def add_section(self, header, content, prepend=False):
if (not hasattr(header, 'options')):
header = self.page.web.std.div(header)
header.attr['class'].clear()
header.options.managed = False
if (not hasattr(content, 'options')):
content = self.page.web.std.div(content)
content.attr['class'].clear()
content.options.managed = False
section = {'header': header, 'content': content}
if prepend:
self.items.insert(0, section)
else:
self.items.append(section)
return section
def write_item(self, item):
return {'header': item['header'].html(), 'content': item['content'].html()}
def add_item(self, component):
raise ValueError('Not available for this class') |
class AbstractSyntaxTreeNodeSerializer(Serializer, ABC):
def __init__(self, serializer_group: AstNodeSerializer):
self._group = serializer_group
self._pseudo = PseudoSerializer()
def serialize(self, node: AbstractSyntaxTreeNode) -> Dict:
return {'id': self._group.get_id(node), 'type': node.__class__.__name__, 'rc': node.reaching_condition.serialize()} |
class TestPtrUtilities(unittest.TestCase):
(base_path=st.builds(Path), exclude_patterns=st.sets(st.text()), follow_symlinks=st.booleans())
def test_fuzz_find_setup_pys(self, base_path, exclude_patterns, follow_symlinks):
ptr.find_setup_pys(base_path=base_path, exclude_patterns=exclude_patterns, follow_symlinks=follow_symlinks)
(setup_py=st.builds(Path))
def test_fuzz_parse_setup_cfg(self, setup_py):
ptr.parse_setup_cfg(setup_py=setup_py)
(modules=st.lists(st.builds(Path)))
('builtins.print')
def test_fuzz_print_non_configured_modules(self, mock_print, modules):
ptr.print_non_configured_modules(modules=modules)
self.assertTrue(mock_print.called) |
.parametrize('input_points, expected_points', [(range(10), [0, 4, 8]), ([1, 10, 11, 12, 13, 14, 100, 10000], [1, 13])])
def test_downsample_not_keeplast(string_to_well, input_points, expected_points):
well_definition = '1.01\n Unknown\n custom_name 0 0 0\n 1\n Zonelog DISC 1 zone1 2 zone2 3 zone3'
for i in input_points:
well_definition += f'''
{i} {i} {i} 1'''
well = string_to_well(well_definition)
well.downsample(keeplast=False)
assert ({'X_UTME': well.dataframe['X_UTME'].to_list(), 'Y_UTMN': well.dataframe['Y_UTMN'].to_list(), 'Z_TVDSS': well.dataframe['Z_TVDSS'].to_list()} == {'X_UTME': expected_points, 'Y_UTMN': expected_points, 'Z_TVDSS': expected_points}) |
def test_slice_will_set_the_data_attributes_on_camera(prepare_scene, create_pymel):
camera = prepare_scene
pm = create_pymel
dres = pm.PyNode('defaultResolution')
dres.width.set(960)
dres.height.set(540)
rs = RenderSlicer(camera=camera)
rs.slice(10, 20)
assert (rs.camera.isSliced.get() is True)
assert (rs.camera.nonSlicedResolutionX.get() == 960)
assert (rs.camera.nonSlicedResolutionY.get() == 540)
assert (rs.camera.slicesInX.get() == 10)
assert (rs.camera.slicesInY.get() == 20) |
def helmholtz(V):
u = TrialFunction(V)
v = TestFunction(V)
f = Function(V)
x = SpatialCoordinate(V.mesh())
f.project(np.prod([cos(((2 * pi) * xi)) for xi in x]))
a = ((inner(grad(u), grad(v)) + inner(u, v)) * dx)
L = (inner(f, v) * dx)
x = Function(V)
solve((a == L), x, solver_parameters={'ksp_type': 'cg', 'pc_type': 'lu'})
return x |
def test_step_one_create_table(app):
db = Database(app, auto_migrate=False)
db.define_models(StepOneThing)
ops = _make_ops(db)
diffs = ops.as_diffs()
assert ((len(diffs) == 1) and (diffs[0][0] == 'add_table'))
op = ops.ops[0]
sql = _make_sql(db, op)
assert (sql == _step_one_sql) |
class TestOFPBarrierRequest(unittest.TestCase):
class Datapath(object):
ofproto = ofproto
ofproto_parser = ofproto_v1_0_parser
c = OFPBarrierRequest(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_BARRIER_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = ofproto.OFP_HEADER_PACK_STR
res = struct.unpack(fmt, six.binary_type(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_BARRIER_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3]) |
class Policy(ABC):
def seed(self, seed: int) -> None:
def needs_state(self) -> bool:
def needs_env(self) -> bool:
return False
def compute_action(self, observation: ObservationType, maze_state: Optional[MazeStateType], env: Optional[BaseEnv], actor_id: Optional[ActorID]=None, deterministic: bool=False) -> ActionType:
def compute_top_action_candidates(self, observation: ObservationType, num_candidates: Optional[int], maze_state: Optional[MazeStateType], env: Optional[BaseEnv], actor_id: Optional[ActorID]=None) -> Tuple[(List[ActionType], List[float])]:
def reset(self) -> None:
def write_policy_record(self) -> PolicyRecordType: |
def remove_empty_trailing_paragraphs(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
all_tags = soup.find_all(True)
all_tags.reverse()
for tag in all_tags:
if ((tag.name in ['br', 'p']) and (not tag.contents)):
tag.extract()
else:
break
return str(soup) |
def write_candidates(working_dir, candidates):
(int_duplication_candidates, inversion_candidates, tan_duplication_candidates, deletion_candidates, novel_insertion_candidates, breakend_candidates) = candidates
if (not os.path.exists((working_dir + '/candidates'))):
os.mkdir((working_dir + '/candidates'))
deletion_candidate_output = open((working_dir + '/candidates/candidates_deletions.bed'), 'w')
inversion_candidate_output = open((working_dir + '/candidates/candidates_inversions.bed'), 'w')
tandem_duplication_candidate_source_output = open((working_dir + '/candidates/candidates_tan_duplications_source.bed'), 'w')
tandem_duplication_candidate_dest_output = open((working_dir + '/candidates/candidates_tan_duplications_dest.bed'), 'w')
interspersed_duplication_candidate_source_output = open((working_dir + '/candidates/candidates_int_duplications_source.bed'), 'w')
interspersed_duplication_candidate_dest_output = open((working_dir + '/candidates/candidates_int_duplications_dest.bed'), 'w')
novel_insertion_candidate_output = open((working_dir + '/candidates/candidates_novel_insertions.bed'), 'w')
breakend_candidate_output = open((working_dir + '/candidates/candidates_breakends.bed'), 'w')
for candidate in deletion_candidates:
print(candidate.get_bed_entry(), file=deletion_candidate_output)
for candidate in int_duplication_candidates:
bed_entries = candidate.get_bed_entries()
print(bed_entries[0], file=interspersed_duplication_candidate_source_output)
print(bed_entries[1], file=interspersed_duplication_candidate_dest_output)
for candidate in inversion_candidates:
print(candidate.get_bed_entry(), file=inversion_candidate_output)
for candidate in tan_duplication_candidates:
bed_entries = candidate.get_bed_entries()
print(bed_entries[0], file=tandem_duplication_candidate_source_output)
print(bed_entries[1], file=tandem_duplication_candidate_dest_output)
for candidate in novel_insertion_candidates:
print(candidate.get_bed_entry(), file=novel_insertion_candidate_output)
for candidate in breakend_candidates:
bed_entries = candidate.get_bed_entries()
print(bed_entries[0], file=breakend_candidate_output)
print(bed_entries[1], file=breakend_candidate_output)
deletion_candidate_output.close()
inversion_candidate_output.close()
interspersed_duplication_candidate_source_output.close()
interspersed_duplication_candidate_dest_output.close()
tandem_duplication_candidate_source_output.close()
tandem_duplication_candidate_dest_output.close()
novel_insertion_candidate_output.close()
breakend_candidate_output.close() |
def save_key_to_pem(pfx_data, pfx_password):
(private_key, certificate) = pkcs12.load_key_and_certificates(pfx_data, pfx_password, default_backend())[:2]
try:
os.mkdir('certs')
except FileExistsError:
pass
with open('certs/public.pem', 'wb') as f:
f.write(certificate.public_bytes(encoding=serialization.Encoding.PEM))
with open('certs/private.pem', 'wb') as f:
f.write(private_key.private_bytes(encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())) |
class AbstractAuthenticationService(object):
SERVICE_ID = None
def hashPassphrase(self, authKey):
raise error.ProtocolError(errind.noAuthentication)
def localizeKey(self, authKey, snmpEngineID):
raise error.ProtocolError(errind.noAuthentication)
def digestLength(self):
raise error.ProtocolError(errind.noAuthentication)
def authenticateOutgoingMsg(self, authKey, wholeMsg):
raise error.ProtocolError(errind.noAuthentication)
def authenticateIncomingMsg(self, authKey, authParameters, wholeMsg):
raise error.ProtocolError(errind.noAuthentication) |
class Annotations(Options):
def drawTime(self):
return self._config_get('afterDraw')
def drawTime(self, value):
self._config(value)
def type(self):
return self._config_get('line')
def type(self, value):
self._config(value)
def mode(self):
return self._config_get('horizontal')
def mode(self, value):
self._config(value)
def scaleID(self):
return self._config_get('y-axis-0')
def scaleID(self, value):
self._config(value)
def value(self):
return self._config_get('25')
def value(self, v):
self._config(v)
def borderColor(self):
return self._config_get('red')
def borderColor(self, color):
self._config(color)
def borderWidth(self):
return self._config_get(2)
def borderWidth(self, num):
self._config(num)
def onClick(self, js_funcs, profile=None):
if (not isinstance(js_funcs, list)):
js_funcs = [js_funcs]
self._config(('function(event){%s}' % JsUtils.jsConvertFncs(js_funcs, toStr=True, profile=profile)), js_type=True) |
class OptionSeriesBubbleSonificationDefaultinstrumentoptionsMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def fortios_configuration_fact(params, fos):
(isValid, result) = validate_mkey(params)
if (not isValid):
return (True, False, result)
selector = params['selector']
selector_params = params['params']
mkey_name = MODULE_MKEY_DEFINITONS[selector]['mkey']
mkey_value = (selector_params.get(mkey_name) if selector_params else None)
[path, name] = selector.split('_')
url_params = dict()
if (params['filters'] and len(params['filters'])):
filter_body = quote(params['filters'][0])
for filter_item in params['filters'][1:]:
filter_body = ('%s&filter=%s' % (filter_body, quote(filter_item)))
url_params['filter'] = filter_body
if (params['sorters'] and len(params['sorters'])):
sorter_body = params['sorters'][0]
for sorter_item in params['sorters'][1:]:
sorter_body = ('%s&sort=%s' % (sorter_body, sorter_item))
url_params['sort'] = sorter_body
if (params['formatters'] and len(params['formatters'])):
formatter_body = params['formatters'][0]
for formatter_item in params['formatters'][1:]:
formatter_body = ('%s|%s' % (formatter_body, formatter_item))
url_params['format'] = formatter_body
fact = None
if mkey_value:
fact = fos.get(path, name, vdom=params['vdom'], mkey=mkey_value, parameters=url_params)
else:
fact = fos.get(path, name, vdom=params['vdom'], parameters=url_params)
target_playbook = []
selector = selector.replace('.', '_').replace('-', '_')
results = (fact.get('results') if isinstance(fact.get('results'), list) else [fact.get('results')])
for element in PLAYBOOK_BASIC_CONFIG:
copied_element = copy.deepcopy(element)
copied_element.update({'tasks': [{('fortios_' + selector): {'vdom': '{{ vdom }}', 'access_token': '{{ fortios_access_token }}', 'state': 'present', selector: {k: v for (k, v) in flatten_multilists_attributes(preprocess_to_valid_data(result), selector).items() if (k not in EXCLUDED_LIST)}}} for result in results]})
target_playbook.append(copied_element)
with open((((params['output_path'] + '/') + selector) + '_playbook.yml'), 'w') as f:
yaml.dump(target_playbook, f, sort_keys=False)
return ((not is_successful_status(fact)), False, fact) |
def map_points_to_perimeter(mesh: PyEITMesh, points: List[Tuple[(float, float)]], output_obj: Optional[dict]=None, map_to_nodes: Optional[bool]=True) -> List[Point]:
if (output_obj is None):
output_obj = {}
trimesh_obj = trimesh.Trimesh(mesh.node, mesh.element)
exterior_polygon = create_exterior_polygon(trimesh_obj)
points_polygon_uncentered = Polygon(points)
offset = ((points_polygon_uncentered.centroid.x - exterior_polygon.centroid.x), (points_polygon_uncentered.centroid.y - exterior_polygon.centroid.y))
points = [((point[0] - offset[0]), (point[1] - offset[1])) for point in points]
points_polygon = Polygon(points)
b1 = exterior_polygon.bounds
b2 = points_polygon.bounds
total_bounds = (min(b1[0], b2[0]), min(b1[1], b2[1]), max(b1[2], b2[2]), max(b1[3], b2[3]))
max_distance = Point(total_bounds[0], total_bounds[1]).distance(Point(total_bounds[2], total_bounds[3]))
intersections = []
intersecting_lines = []
for point in points:
line = LineString((exterior_polygon.centroid, point))
scale = (max_distance / line.length)
line = shapely.affinity.scale(line, scale, scale, 1, exterior_polygon.centroid)
intersecting_lines.append(line)
intersection = line.intersection(exterior_polygon.exterior)
intersections.append(intersection)
if map_to_nodes:
intersections = [find_closest_point(point.xy, mesh.node) for point in intersections]
output_obj['centroid'] = trimesh_obj.centroid
output_obj['offset_points'] = points
output_obj['exterior_polygon'] = exterior_polygon
output_obj['intersecting_lines'] = intersecting_lines
return intersections |
class Solution():
def divide(self, dividend: int, divisor: int) -> int:
is_negative = False
if (((dividend > 0) and (divisor < 0)) or ((dividend < 0) and (divisor > 0))):
is_negative = True
dividend = abs(dividend)
divisor = abs(divisor)
(runner, index) = (divisor, 1)
track = []
while (runner <= dividend):
track.append((runner, index))
runner = (runner + runner)
index += index
ret = 0
for index in range((len(track) - 1), (- 1), (- 1)):
(val, idx) = track[index]
if (dividend >= val):
dividend -= val
ret += idx
if (((not is_negative) and (ret >= )) or (is_negative and (ret > ))):
return
return (ret if (not is_negative) else (- ret)) |
def firewall_mms_profile(data, fos):
vdom = data['vdom']
state = data['state']
firewall_mms_profile_data = data['firewall_mms_profile']
firewall_mms_profile_data = flatten_multilists_attributes(firewall_mms_profile_data)
filtered_data = underscore_to_hyphen(filter_firewall_mms_profile_data(firewall_mms_profile_data))
if ((state == 'present') or (state is True)):
return fos.set('firewall', 'mms-profile', data=filtered_data, vdom=vdom)
elif (state == 'absent'):
return fos.delete('firewall', 'mms-profile', mkey=filtered_data['name'], vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!') |
def get_project_url_name():
output = make_sp_call('fly info', capture_output=True).stdout.decode().strip()
re_app_name = '.*Hostname = (.*)\\.fly\\.dev'
app_name = re.search(re_app_name, output).group(1)
print(f' Found app name: {app_name}')
project_url = f'
print(f' Project URL: {project_url}')
return (project_url, app_name) |
def docker(cfg, car, ip, target_root, node_name):
distribution_version = cfg.opts('mechanic', 'distribution.version', mandatory=False)
cluster_name = cfg.opts('mechanic', 'cluster.name')
rally_root = cfg.opts('node', 'rally.root')
node_root_dir = os.path.join(target_root, node_name)
return DockerProvisioner(car, node_name, cluster_name, ip, node_root_dir, distribution_version, rally_root) |
def test_staticfiles_with_package(test_client_factory):
app = StaticFiles(packages=['tests'])
client = test_client_factory(app)
response = client.get('/example.txt')
assert (response.status_code == 200)
assert (response.text == '123\n')
app = StaticFiles(packages=[('tests', 'statics')])
client = test_client_factory(app)
response = client.get('/example.txt')
assert (response.status_code == 200)
assert (response.text == '123\n') |
class OptionPlotoptionsItemSonificationTracksMappingHighpassFrequency(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
def make_gemm_exec_key(op_keys: str) -> str:
if (';' in op_keys):
raise RuntimeError("invalid op_keys for gemm: '{}'".format(op_keys))
values = [int(v) for v in op_keys.split('x')]
if (len(values) != 3):
raise RuntimeError("invalid op_keys for gemm: '{}'".format(op_keys))
name_values = [('M', values[0]), ('N', values[1]), ('K', values[2])]
return ' && '.join(['{} == {}'.format(n, v) for (n, v) in name_values]) |
class OptionSeriesPackedbubbleSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ESP32C3StubLoader(ESP32C3ROM):
FLASH_WRITE_SIZE = 16384
STATUS_BYTES_LENGTH = 2
IS_STUB = True
def __init__(self, rom_loader):
self.secure_download_mode = rom_loader.secure_download_mode
self._port = rom_loader._port
self._trace_enabled = rom_loader._trace_enabled
self.cache = rom_loader.cache
self.flush_input() |
_os(*metadata.platforms)
def main():
temp_path = (Path(tempfile.gettempdir()) / os.urandom(16).encode('hex'))
sdelete_path = common.get_path('bin', 'sdelete.exe')
try:
with open(temp_path, 'wb') as f_out:
f_out.write('A')
subprocess.check_call([sdelete_path, '/accepteula', temp_path])
finally:
common.remove_file(temp_path) |
class MyArmSocket(CommandGenerator):
_write = write
_read = read
def __init__(self, ip, netport=9000, debug=False):
super(MyArmSocket, self).__init__(debug)
self.calibration_parameters = calibration_parameters
self.SERVER_IP = ip
self.SERVER_PORT = netport
self.sock = self.connect_socket()
def connect_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.SERVER_IP, self.SERVER_PORT))
return sock
def _mesg(self, genre, *args, **kwargs):
(real_command, has_reply) = super(MyArmSocket, self)._mesg(genre, *args, **kwargs)
data = self._write(self._flatten(real_command), 'socket')
if has_reply:
data = self._read(genre, method='socket')
if (genre == ProtocolCode.SET_SSID_PWD):
return None
res = self._process_received(data, genre)
if (res == []):
return None
if (genre in [ProtocolCode.ROBOT_VERSION, ProtocolCode.IS_POWER_ON, ProtocolCode.IS_CONTROLLER_CONNECTED, ProtocolCode.IS_PAUSED, ProtocolCode.IS_IN_POSITION, ProtocolCode.IS_MOVING, ProtocolCode.IS_SERVO_ENABLE, ProtocolCode.IS_ALL_SERVO_ENABLE, ProtocolCode.GET_SERVO_DATA, ProtocolCode.GET_DIGITAL_INPUT, ProtocolCode.GET_GRIPPER_VALUE, ProtocolCode.IS_GRIPPER_MOVING, ProtocolCode.GET_SPEED, ProtocolCode.GET_ENCODER, ProtocolCode.GET_BASIC_INPUT, ProtocolCode.GET_TOF_DISTANCE, ProtocolCode.GET_END_TYPE, ProtocolCode.GET_MOVEMENT_TYPE, ProtocolCode.GET_REFERENCE_FRAME, ProtocolCode.GET_FRESH_MODE, ProtocolCode.GET_GRIPPER_MODE, ProtocolCode.GET_ERROR_INFO, ProtocolCode.GET_GPIO_IN, ProtocolCode.SET_SSID_PWD, ProtocolCode.SetHTSGripperTorque, ProtocolCode.GetHTSGripperTorque, ProtocolCode.GetGripperProtectCurrent, ProtocolCode.InitGripper, ProtocolCode.SET_FOUR_PIECES_ZERO]):
return self._process_single(res)
elif (genre in [ProtocolCode.GET_ANGLES]):
return [self._int2angle(angle) for angle in res]
elif (genre in [ProtocolCode.GET_COORDS, ProtocolCode.GET_TOOL_REFERENCE, ProtocolCode.GET_WORLD_REFERENCE]):
if res:
r = []
for idx in range(3):
r.append(self._int2coord(res[idx]))
for idx in range(3, 6):
r.append(self._int2angle(res[idx]))
return r
else:
return res
elif (genre in [ProtocolCode.GET_SERVO_VOLTAGES]):
return [self._int2coord(angle) for angle in res]
elif (genre in [ProtocolCode.GET_JOINT_MAX_ANGLE, ProtocolCode.GET_JOINT_MIN_ANGLE]):
return self._int2coord(res[0])
elif (genre in [ProtocolCode.GET_BASIC_VERSION, ProtocolCode.SOFTWARE_VERSION, ProtocolCode.GET_ATOM_VERSION]):
return self._int2coord(self._process_single(res))
elif (genre == ProtocolCode.GET_ANGLES_COORDS):
r = []
for index in range(len(res)):
if (index < 7):
r.append(self._int2angle(res[index]))
elif (index < 10):
r.append(self._int2coord(res[index]))
else:
r.append(self._int2angle(res[index]))
return r
else:
return res
return None
def get_radians(self):
angles = self._mesg(ProtocolCode.GET_ANGLES, has_reply=True)
return [round((angle * (math.pi / 180)), 3) for angle in angles]
def send_radians(self, radians, speed):
calibration_parameters(len6=radians, speed=speed)
degrees = [self._angle2int((radian * (180 / math.pi))) for radian in radians]
return self._mesg(ProtocolCode.SEND_ANGLES, degrees, speed)
def sync_send_angles(self, degrees, speed, timeout=15):
t = time.time()
self.send_angles(degrees, speed)
while ((time.time() - t) < timeout):
f = self.is_in_position(degrees, 0)
if f:
break
time.sleep(0.1)
return self
def sync_send_coords(self, coords, speed, mode, timeout=15):
t = time.time()
self.send_coords(coords, speed, mode)
while ((time.time() - t) < timeout):
if self.is_in_position(coords, 1):
break
time.sleep(0.1)
return self
def set_gpio_mode(self, mode):
self.calibration_parameters(gpiomode=mode)
if (mode == 'BCM'):
return self._mesg(ProtocolCode.SET_GPIO_MODE, 0)
else:
return self._mesg(ProtocolCode.SET_GPIO_MODE, 1)
def set_gpio_out(self, pin_no, mode):
if (mode == 'in'):
return self._mesg(ProtocolCode.SET_GPIO_UP, pin_no, 0)
else:
return self._mesg(ProtocolCode.SET_GPIO_UP, pin_no, 1)
def set_gpio_output(self, pin_no, state):
return self._mesg(ProtocolCode.SET_GPIO_OUTPUT, pin_no, state)
def get_gpio_in(self, pin_no):
return self._mesg(ProtocolCode.GET_GPIO_IN, pin_no, has_reply=True)
def wait(self, t):
time.sleep(t)
return self
def close(self):
self.sock.close() |
class OptionsMapbox(Options):
def accessToken(self):
return self._config_get(None)
def accessToken(self, value: str):
self._config(value)
def antialias(self):
return self._config_get(False)
def antialias(self, flag: bool):
self._config(flag)
def attributionControl(self):
return self._config_get(True)
def attributionControl(self, flag: bool):
self._config(flag)
def bearing(self):
return self._config_get(0)
def bearing(self, value: float):
self._config(value)
def bearingSnap(self):
return self._config_get(7)
def bearingSnap(self, value: float):
self._config(value)
def bounds(self):
return self._config_get(None)
def bounds(self, value: str):
self._config(value)
def boxZoom(self):
return self._config_get(True)
def boxZoom(self, flag: bool):
self._config(flag)
def container(self):
return self._config_get(None)
def container(self, value: str):
self._config(value)
def interactive(self):
return self._config_get(True)
def interactive(self, flag: bool):
self._config(flag)
def keyboard(self):
return self._config_get(True)
def keyboard(self, flag: bool):
self._config(flag)
def cooperativeGestures(self):
return self._config_get(None)
def cooperativeGestures(self, num: int):
self._config(num)
def crossSourceCollisions(self):
return self._config_get(True)
def crossSourceCollisions(self, flag: bool):
self._config(flag)
def customAttribution(self):
return self._config_get(None)
def customAttribution(self, num: int):
self._config(num)
def doubleClickZoom(self):
return self._config_get(True)
def doubleClickZoom(self, flag: bool):
self._config(flag)
def dragPan(self):
return self._config_get(True)
def dragPan(self, flag: bool):
self._config(flag)
def dragRotate(self):
return self._config_get(True)
def dragRotate(self, flag: bool):
self._config(flag)
def fadeDuration(self):
return self._config_get(300)
def fadeDuration(self, num: int):
self._config(num)
def failIfMajorPerformanceCaveat(self):
return self._config_get(False)
def failIfMajorPerformanceCaveat(self, num: int):
self._config(num)
def fitBoundsOptions(self):
return self._config_get(None)
def fitBoundsOptions(self, num: int):
self._config(num)
def hash(self):
return self._config_get(None)
def hash(self, flag: bool):
self._config(flag)
def language(self):
return self._config_get(None)
def language(self, value: str):
self._config(value)
def locale(self):
return self._config_get(None)
def locale(self, value: str):
self._config(value)
def localFontFamily(self):
return self._config_get(False)
def localFontFamily(self, flag: bool):
self._config(flag)
def localIdeographFontFamily(self):
return self._config_get('sans-serif')
def localIdeographFontFamily(self, value: str):
self._config(value)
def logoPosition(self):
return self._config_get('bottom-left')
def logoPosition(self, value: str):
self._config(value)
def maxBounds(self):
return self._config_get(None)
def maxBounds(self, value: str):
self._config(value)
def maxPitch(self):
return self._config_get(85)
def maxPitch(self, num: int):
self._config(num)
def maxTileCacheSize(self):
return self._config_get(None)
def maxTileCacheSize(self, num: float):
self._config(num)
def pitch(self):
return self._config_get(0)
def pitch(self, num: int):
self._config(num)
def pitchWithRotate(self):
return self._config_get(True)
def pitchWithRotate(self, flag: bool):
self._config(flag)
def preserveDrawingBuffer(self):
return self._config_get(False)
def preserveDrawingBuffer(self, flag: bool):
self._config(flag)
def styles(self):
return EnumStyles(self, 'style')
def style(self):
return self._config_get(None)
def style(self, value: str):
self._config(value)
def center(self):
return self._config_get([0, 0])
def center(self, values: list):
self._config(values)
def set_center(self, lon: float, lat: float):
self.center = [lon, lat]
def clickTolerance(self):
return self._config_get(3)
def clickTolerance(self, num: float):
self._config(num)
def collectResourceTiming(self):
return self._config_get(False)
def collectResourceTiming(self, num: float):
self._config(num)
def maxZoom(self):
return self._config_get(22)
def maxZoom(self, num: float):
self._config(num)
def minPitch(self):
return self._config_get(0)
def minPitch(self, num: float):
self._config(num)
def minTileCacheSize(self):
return self._config_get(None)
def minTileCacheSize(self, num: float):
self._config(num)
def minZoom(self):
return self._config_get(0)
def minZoom(self, num: float):
self._config(num)
def optimizeForTerrain(self):
return self._config_get(0)
def optimizeForTerrain(self, flag: bool):
self._config(flag)
def projections(self):
return EnumProjections(self, 'projection')
def projection(self):
return self._config_get('mercator')
def projection(self, value: str):
self._config(value)
def refreshExpiredTiles(self):
return self._config_get(True)
def refreshExpiredTiles(self, flag: bool):
self._config(flag)
def renderWorldCopies(self):
return self._config_get(True)
def renderWorldCopies(self, flag: bool):
self._config(flag)
def scrollZoom(self):
return self._config_get(True)
def scrollZoom(self, flag: bool):
self._config(flag)
def testMode(self):
return self._config_get(False)
def testMode(self, flag: bool):
self._config(flag)
def touchPitch(self):
return self._config_get(True)
def touchPitch(self, flag: bool):
self._config(flag)
def touchZoomRotate(self):
return self._config_get(True)
def touchZoomRotate(self, value: Union[(str, bool)]):
self._config(value)
def trackResize(self):
return self._config_get(True)
def trackResize(self, flag: bool):
self._config(flag)
def worldview(self):
return self._config_get(None)
def worldview(self, value: str):
self._config(value)
def transformRequest(self, js_funcs: types.JS_FUNCS_TYPES, profile: types.PROFILE_TYPE=None):
raise NotImplementedError('Not available yet')
def zoom(self):
return self._config_get(0)
def zoom(self, num: float):
self._config(num) |
class CommunityData(object):
mpModel = 1
securityModel = (mpModel + 1)
securityLevel = 'noAuthNoPriv'
contextName = null
tag = null
def __init__(self, communityIndex, communityName=None, mpModel=None, contextEngineId=None, contextName=None, tag=None, securityName=None):
if (mpModel is not None):
self.mpModel = mpModel
self.securityModel = (mpModel + 1)
self.contextEngineId = contextEngineId
if (contextName is not None):
self.contextName = contextName
if (tag is not None):
self.tag = tag
if (communityName is None):
(communityName, communityIndex) = (communityIndex, None)
self.communityName = communityName
if (communityIndex is None):
self.securityName = ('s%s' % hash((self.communityName, self.mpModel, self.contextEngineId, self.contextName, self.tag)))
self.communityIndex = self.securityName
else:
self.communityIndex = communityIndex
if (securityName is None):
self.securityName = communityIndex
else:
self.securityName = securityName
def __hash__(self):
raise TypeError(('%s is not hashable' % self.__class__.__name__))
def __repr__(self):
return ('%s(communityIndex=%r, communityName=<COMMUNITY>, mpModel=%r, contextEngineId=%r, contextName=%r, tag=%r, securityName=%r)' % (self.__class__.__name__, self.communityIndex, self.mpModel, self.contextEngineId, self.contextName, self.tag, self.securityName))
def clone(self, communityIndex=None, communityName=None, mpModel=None, contextEngineId=None, contextName=None, tag=None, securityName=None):
if (communityName is None):
(communityName, communityIndex) = (communityIndex, None)
return self.__class__(communityIndex, (((communityName is None) and self.communityName) or communityName), (((mpModel is None) and self.mpModel) or mpModel), (((contextEngineId is None) and self.contextEngineId) or contextEngineId), (((contextName is None) and self.contextName) or contextName), (((tag is None) and self.tag) or tag), (((securityName is None) and self.securityName) or securityName)) |
class OptionSeriesArearangeSonificationTracksPointgrouping(Options):
def algorithm(self):
return self._config_get('minmax')
def algorithm(self, text: str):
self._config(text, js_type=False)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def groupTimespan(self):
return self._config_get(15)
def groupTimespan(self, num: float):
self._config(num, js_type=False)
def prop(self):
return self._config_get('y')
def prop(self, text: str):
self._config(text, js_type=False) |
class LiteEthPHYRGMIIRX(LiteXModule):
def __init__(self, pads, rx_delay=2e-09):
self.source = source = stream.Endpoint(eth_phy_description(8))
rx_delay_taps = int((rx_delay / 5e-11))
assert (rx_delay_taps < 256)
rx_ctl_ibuf = Signal()
rx_ctl_idelay = Signal()
rx_ctl = Signal()
rx_ctl_reg = Signal()
rx_data_ibuf = Signal(4)
rx_data_idelay = Signal(4)
rx_data = Signal(8)
rx_data_reg = Signal(8)
self.specials += [Instance('IBUF', i_I=pads.rx_ctl, o_O=rx_ctl_ibuf), Instance('IODELAY2', p_IDELAY_TYPE='FIXED', p_ODELAY_VALUE=rx_delay_taps, p_DELAY_SRC='IDATAIN', o_DATAOUT=rx_ctl_idelay, i_CAL=0, i_CE=0, i_CLK=0, i_IDATAIN=rx_ctl_ibuf, i_INC=0, i_IOCLK0=0, i_IOCLK1=0, i_ODATAIN=0, i_RST=0, i_T=1), Instance('IDDR2', p_DDR_ALIGNMENT='C0', o_Q0=rx_ctl, i_C0=ClockSignal('eth_rx'), i_C1=(~ ClockSignal('eth_rx')), i_CE=1, i_D=rx_ctl_idelay, i_R=0, i_S=0)]
self.sync += rx_ctl_reg.eq(rx_ctl)
for i in range(4):
self.specials += [Instance('IBUF', i_I=pads.rx_data[i], o_O=rx_data_ibuf[i]), Instance('IODELAY2', p_IDELAY_TYPE='FIXED', p_ODELAY_VALUE=rx_delay_taps, p_DELAY_SRC='IDATAIN', o_DATAOUT=rx_data_idelay[i], i_CAL=0, i_CE=0, i_CLK=0, i_IDATAIN=rx_data_ibuf[i], i_INC=0, i_IOCLK0=0, i_IOCLK1=0, i_ODATAIN=0, i_RST=0, i_T=1), Instance('IDDR2', p_DDR_ALIGNMENT='C0', o_Q0=rx_data[i], o_Q1=rx_data[(i + 4)], i_C0=ClockSignal('eth_rx'), i_C1=(~ ClockSignal('eth_rx')), i_CE=1, i_D=rx_data_idelay[i], i_R=0, i_S=0)]
self.sync += rx_data_reg.eq(rx_data)
rx_ctl_reg_d = Signal()
self.sync += rx_ctl_reg_d.eq(rx_ctl_reg)
last = Signal()
self.comb += last.eq(((~ rx_ctl_reg) & rx_ctl_reg_d))
self.sync += [source.valid.eq(rx_ctl_reg), source.data.eq(Cat(rx_data_reg[:4], rx_data[4:]))]
self.comb += source.last.eq(last) |
class ExpireTokenAuthentication(TokenAuthentication):
def authenticate(self, request):
auth = super(ExpireTokenAuthentication, self).authenticate(request)
if (not auth):
return None
delta = (timezone.now() - auth[1].created)
if (delta.total_seconds() > settings.AUTHTOKEN_DURATION):
return None
_set_request_dc(request, auth[0])
return auth |
def bump_version(v: version.Version, level: str) -> str:
release: List[int] = list(v.release)
stage: Optional[str]
pre: Optional[int]
(stage, pre) = (v.pre if v.pre else (None, None))
dev: Optional[int] = v.dev
post: Optional[int] = v.post
if (level in ('major', 'minor', 'patch')):
segments = 0
if (level == 'major'):
segments = 1
elif (level == 'minor'):
segments = 2
elif (level == 'patch'):
segments = 3
if ((not any(release[segments:])) and ((stage is not None) or (dev is not None))):
pass
else:
release[(segments - 1)] += 1
release[segments:] = ([0] * max((len(release) - segments), 0))
stage = pre = post = dev = None
elif (level == 'alpha'):
if (stage is None):
if (dev is None):
release[(- 1)] += 1
(stage, pre) = ('a', 1)
elif (stage > 'a'):
release[(- 1)] += 1
(stage, pre) = ('a', 1)
elif (stage == 'a'):
if (not dev):
if (pre is None):
pre = 1
else:
pre += 1
post = dev = None
elif (level == 'beta'):
if (stage is None):
if (dev is None):
release[(- 1)] += 1
(stage, pre) = ('b', 1)
elif (stage > 'b'):
release[(- 1)] += 1
(stage, pre) = ('b', 1)
elif (stage == 'b'):
if (not dev):
if (pre is None):
pre = 1
else:
pre += 1
elif (stage < 'b'):
pre = 1
stage = 'b'
post = dev = None
elif (level == 'post'):
if (post is not None):
post += 1
else:
post = 1
dev = None
elif (level == 'dev'):
if (dev is not None):
dev += 1
else:
if stage:
if (pre is None):
pre = 1
else:
pre += 1
else:
release[(- 1)] += 1
dev = 1
ver = '.'.join((str(i) for i in release))
if (stage is not None):
ver += f'{stage}{pre}'
if (post is not None):
ver += f'.post{post}'
if (dev is not None):
ver += f'.dev{dev}'
return ver |
.parallel(nprocs=2)
.parametrize('infotype', ['local', 'sum', 'max'])
def test_get_info(a, bcs, infotype):
A = assemble(a, mat_type='matfree')
ctx = A.petscmat.getPythonContext()
itype = {'local': A.petscmat.InfoType.LOCAL, 'sum': A.petscmat.InfoType.GLOBAL_SUM, 'max': A.petscmat.InfoType.GLOBAL_MAX}[infotype]
info = ctx.getInfo(A.petscmat, info=itype)
(test, trial) = a.arguments()
expect = ((test.function_space().dof_dset.total_size * test.function_space().value_size) + (trial.function_space().dof_dset.total_size * trial.function_space().value_size))
expect *= ScalarType.itemsize
if (infotype == 'sum'):
expect = A.comm.allreduce(expect, op=MPI.SUM)
elif (infotype == 'max'):
expect = A.comm.allreduce(expect, op=MPI.MAX)
assert (info['memory'] == expect)
if (bcs is not None):
A = assemble(a, mat_type='matfree', bcs=bcs)
ctx = A.petscmat.getPythonContext()
info = ctx.getInfo(A.petscmat, info=itype)
assert (info['memory'] == (2 * expect)) |
class OptionPlotoptionsColumnPointEvents(Options):
def click(self):
return self._config_get(None)
def click(self, value: Any):
self._config(value, js_type=False)
def drag(self):
return self._config_get(None)
def drag(self, value: Any):
self._config(value, js_type=False)
def dragStart(self):
return self._config_get(None)
def dragStart(self, value: Any):
self._config(value, js_type=False)
def drop(self):
return self._config_get(None)
def drop(self, value: Any):
self._config(value, js_type=False)
def mouseOut(self):
return self._config_get(None)
def mouseOut(self, value: Any):
self._config(value, js_type=False)
def mouseOver(self):
return self._config_get(None)
def mouseOver(self, value: Any):
self._config(value, js_type=False)
def remove(self):
return self._config_get(None)
def remove(self, value: Any):
self._config(value, js_type=False)
def select(self):
return self._config_get(None)
def select(self, value: Any):
self._config(value, js_type=False)
def unselect(self):
return self._config_get(None)
def unselect(self, value: Any):
self._config(value, js_type=False)
def update(self):
return self._config_get(None)
def update(self, value: Any):
self._config(value, js_type=False) |
def set_in_db_key_value_store(key, new_data):
global KV_META_CACHE
new_s = str(new_data)
if (len(new_s) > 40):
new_s = (new_s[:35] + '...')
kv_log.info("Setting kv key '%s' to '%s'", key, new_s)
if (key in KV_META_CACHE):
if (KV_META_CACHE[key] == new_data):
return
thread_id = 'kv_store_{}'.format(threading.get_ident())
with session_context(thread_id) as sess:
have = sess.query(KeyValueStore).filter((KeyValueStore.key == key)).scalar()
if have:
if (have.value != new_data):
kv_log.info("Updating item: '%s', '%s'", have, have.key)
kv_log.info('\told -> %s', have.value)
kv_log.info('\tnew -> %s', new_s)
have.value = new_data
else:
kv_log.info('Item has not changed. Nothing to do!')
else:
kv_log.info("New item: '%s', %s", key, new_s)
new = KeyValueStore(key=key, value=new_data)
sess.add(new)
sess.commit()
try:
KV_META_CACHE[key] = copy.copy(new_data)
except KeyError:
KV_META_CACHE = cachetools.TTLCache(maxsize=5000, ttl=(60 * 5))
KV_META_CACHE[key] = copy.copy(new_data) |
class Leaf(Tree):
def __init__(self, identifier):
self.identifier = identifier
def dfs_traverse(self, visitor):
visitor.visit_leaf(self)
def get_leaves(self):
return [self]
def get_leaves_identifiers(self):
return [self.identifier]
def __repr__(self):
return (("'" + self.identifier) + "'")
leaves = property(get_leaves, None, None, 'List of leaves in this subtree.')
leaves_identifiers = property(get_leaves_identifiers, None, None, 'List of identifiers of the leaves in this subtree.') |
def test_changes_reflected_back(fx_asset):
with Image(filename=str(fx_asset.joinpath('apple.ico'))) as img:
with img.sequence[3] as single:
single.resize(32, 32)
assert (single.size == (32, 32))
img.sequence.instances[3] = None
uncommitted = img.sequence[3]
assert (uncommitted.size == (16, 16))
img.sequence.instances[3] = None
with img.sequence[3] as committed:
assert (committed.size == (32, 32)) |
class ConsolePrinter():
_builtins_print = builtins.print
def __init__(self, console):
self.console = console
def start(self):
builtins.print = self
def __call__(self, *values, sep=' ', end='\n', file=sys.stdout, flush=False):
if (file != sys.stdout):
self._builtins_print(*values, sep=sep, end=end, file=file, flush=flush)
return
ps = (sys.ps2 if self.console.buffer else sys.ps1)
line = f'{ps}{self.console.prompt_session.app.current_buffer.text}'
self.console.write(f'''
{(' ' * len(line))}
''')
if (not end.endswith('\n')):
end = '{end}\n'
text = f'{sep.join((str(i) for i in values))}{end}{line}'
self.console.write(text)
def finish(self):
builtins.print = self._builtins_print |
def _check_extern_modules(backend):
backends = SyntenyBackend.get_available_backends()
if (backend not in backends):
raise BackendException('"{0}" is not installed.'.format(backend))
if (not m2s.check_binary()):
raise BackendException("maf2synteny binary is missing, did you run 'make'?")
if (not overlap.check_binary()):
raise BackendException("overlap binary is missing, did you run 'make'?") |
class TimingTest(unittest.TestCase):
def test_delay(self):
delay = 0.01
src = Event.sequence(array1, interval=0.01)
e1 = src.timestamp().pluck(0)
e2 = src.delay(delay).timestamp().pluck(0)
r = e1.zip(e2).map((lambda a, b: (b - a))).mean().run()
self.assertLess(abs(r[(- 1)]), (delay + 0.002))
def test_sample(self):
timer = Event.timer(0.021, 4)
event = Event.range(10, interval=0.01).sample(timer)
self.assertEqual(event.run(), [2, 4, 6, 8])
def test_timeout(self):
timer = Event.timer(10, count=1)
event = timer.timeout(0.01)
self.assertEqual(event.run(), [Event.NO_VALUE])
def test_debounce(self):
event = Event.range(10, interval=0.05).mergemap((lambda t: Event.sequence(array2, 0.001))).debounce(0.01)
self.assertEqual(event.run(), ([109] * 10))
def test_debounce_on_first(self):
event = Event.range(10, interval=0.05).mergemap((lambda t: Event.sequence(array2, 0.001))).debounce(0.02, on_first=True)
self.assertEqual(event.run(), ([100] * 10))
def test_throttle(self):
t0 = time.time()
a = list(range(500))
event = Event.sequence(a).throttle(1000, 0.1, cost_func=(lambda i: 10))
result = event.run()
self.assertEqual(result, a)
dt = (time.time() - t0)
self.assertLess(abs((dt - 0.5)), 0.05) |
def extractDemonzvirusBlogspotCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
chp_prefixes = [('Kagerou', 'Kagerou, Batsubyoushimasu!', 'translated'), ('Cat ', 'Me and My Beloved Cat (Girlfriend)', 'translated')]
for (prefix, series, tl_type) in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class OptionPlotoptionsSunburstSonificationContexttracksMappingGapbetweennotes(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class worker(AppCommand):
daemon = True
redirect_stdouts = True
worker_options = [option('--with-web/--without-web', default=True, help='Enable/disable web server and related components.'), option('--web-port', '-p', default=None, type=params.TCPPort(), help=f'Port to run web server on (default: {WEB_PORT})'), option('--web-transport', default=None, type=params.URLParam(), help=f'Web server transport (default: {WEB_TRANSPORT})'), option('--web-bind', '-b', type=str), option('--web-host', '-h', default=None, type=str, help=f'Canonical host name for the web server (default: {WEB_BIND})')]
options = (cast(List, worker_options) + cast(List, now_builtin_worker_options))
def on_worker_created(self, worker: Worker) -> None:
self.say(self.banner(worker))
def as_service(self, loop: asyncio.AbstractEventLoop, *args: Any, **kwargs: Any) -> ServiceT:
self._init_worker_options(*args, **kwargs)
return self.app
def _init_worker_options(self, *args: Any, with_web: bool, web_port: Optional[int], web_bind: Optional[str], web_host: Optional[str], web_transport: URL, **kwargs: Any) -> None:
self.app.conf.web_enabled = with_web
if (web_port is not None):
self.app.conf.web_port = web_port
if web_bind:
self.app.conf.web_bind = web_bind
if (web_host is not None):
self.app.conf.web_host = web_host
if (web_transport is not None):
self.app.conf.web_transport = web_transport
if ((web_port is not None) or (web_host is not None)):
self.app.conf.canonical_url = f'
def _Worker(self) -> Type[Worker]:
return cast(Type[Worker], self.app.conf.Worker)
def banner(self, worker: Worker) -> str:
return self._format_banner_table(self._banner_data(worker))
def _format_banner_table(self, data: TableDataT) -> str:
table = self.table([(x, str(y)) for (x, y) in data], title=self._banner_title())
table.inner_heading_row_border = False
table.inner_row_border = False
return table.table
def _banner_title(self) -> str:
return self.faust_ident()
def _banner_data(self, worker: Worker) -> TableDataT:
app = cast(FaustWorker, worker).app
logfile = (worker.logfile if worker.logfile else '-stderr-')
loglevel = level_name((worker.loglevel or 'WARN')).lower()
transport_extra = self._human_transport_info(worker.loop)
return list(filter(None, [('id', app.conf.id), ('transport', f'{app.conf.broker} {transport_extra}'), ('store', f'{app.conf.store}'), (('web', f'{app.web.url}') if app.conf.web_enabled else None), ('log', f'{logfile} ({loglevel})'), ('pid', f'{os.getpid()}'), ('hostname', f'{socket.gethostname()}'), ('platform', self.platform()), self._human_cython_info(), ('drivers', ''), (' transport', app.transport.driver_version), (' web', app.web.driver_version), ('datadir', f'{str(app.conf.datadir.absolute()):<40}'), ('appdir', f'{str(app.conf.appdir.absolute()):<40}')]))
def _human_cython_info(self) -> Optional[Tuple[(str, str)]]:
try:
import faust._cython.windows
except ImportError:
return None
else:
compiler = platform.python_compiler()
return (' +', f'Cython ({compiler})')
def _human_transport_info(self, loop: Any) -> str:
if (loop.__class__.__module__ == 'uvloop'):
return '+uvloop'
return ''
def _driver_versions(self, app: AppT) -> List[str]:
return [app.transport.driver_version, app.web.driver_version]
def faust_ident(self) -> str:
return f'{FAUST} v{faust_version}'
def platform(self) -> str:
return '{py_imp} {py_version} ({system} {machine})'.format(py_imp=platform.python_implementation(), py_version=platform.python_version(), system=platform.system(), machine=platform.machine()) |
class Clip():
def __init__(self, clip_path: str, min_loud_part_duration: int, silence_part_speed: int) -> None:
self.clip = VideoFileClip(clip_path)
self.audio = Audio(self.clip.audio)
self.cut_to_method = {'silent': self.jumpcut_silent_parts, 'voiced': self.jumpcut_voiced_parts}
self.min_loud_part_duration = min_loud_part_duration
self.silence_part_speed = silence_part_speed
def jumpcut(self, cuts: List[str], magnitude_threshold_ratio: float, duration_threshold_in_seconds: float, failure_tolerance_ratio: float, space_on_edges: float) -> Dict[(str, VideoFileClip)]:
intervals_to_cut = self.audio.get_intervals_to_cut(magnitude_threshold_ratio, duration_threshold_in_seconds, failure_tolerance_ratio, space_on_edges)
outputs = {}
for cut in cuts:
jumpcutted_clips = self.cut_to_method[cut](intervals_to_cut)
outputs[cut] = concatenate_videoclips(jumpcutted_clips)
return outputs
def jumpcut_silent_parts(self, intervals_to_cut: List[Tuple[(float, float)]]) -> List[VideoFileClip]:
jumpcutted_clips = []
previous_stop = 0
for (start, stop) in tqdm(intervals_to_cut, desc='Cutting silent intervals'):
clip_before = self.clip.subclip(previous_stop, start)
if (clip_before.duration > self.min_loud_part_duration):
jumpcutted_clips.append(clip_before)
if (self.silence_part_speed is not None):
silence_clip = self.clip.subclip(start, stop)
silence_clip = speedx(silence_clip, self.silence_part_speed).without_audio()
jumpcutted_clips.append(silence_clip)
previous_stop = stop
if (previous_stop < self.clip.duration):
last_clip = self.clip.subclip(previous_stop, self.clip.duration)
jumpcutted_clips.append(last_clip)
return jumpcutted_clips
def jumpcut_voiced_parts(self, intervals_to_cut: List[Tuple[(float, float)]]) -> List[VideoFileClip]:
jumpcutted_clips = []
for (start, stop) in tqdm(intervals_to_cut, desc='Cutting voiced intervals'):
if (start < stop):
silence_clip = self.clip.subclip(start, stop)
jumpcutted_clips.append(silence_clip)
return jumpcutted_clips |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.