code stringlengths 281 23.7M |
|---|
def test_share_from_graphql_link():
data = {'description': {'text': ''}, 'media': {'animated_image': None, 'image': None, 'playable_duration_in_ms': 0, 'is_playable': False, 'playable_url': None}, 'source': {'text': 'a.com'}, 'style_list': ['share', 'fallback'], 'title_with_entities': {'text': 'a.com'}, 'properties': [], 'url': ' 'deduplication_key': 'ee.mid.$xyz', 'action_links': [{'title': 'About this website', 'url': None}], 'messaging_attribution': None, 'messenger_call_to_actions': [], 'xma_layout_info': None, 'target': {'__typename': 'ExternalUrl'}, 'subattachments': []}
assert (ShareAttachment(author=None, url=' original_url=' title='a.com', description='', source='a.com', image=None, original_image_url=None, attachments=[], id='ee.mid.$xyz') == ShareAttachment._from_graphql(data)) |
def boost(obj=None, backend: str=None, inline=False, boundscheck=True, wraparound=True, cdivision=False, nonecheck=True, nogil=False):
if ((backend is not None) and (not isinstance(backend, str))):
raise TypeError
ts = _get_transonic_calling_module(backend_name=backend)
decor = ts.boost(inline=inline, nogil=nogil, boundscheck=boundscheck, wraparound=wraparound, cdivision=cdivision, nonecheck=nonecheck)
if (callable(obj) or isinstance(obj, type)):
return decor(obj)
else:
return decor |
def get_node_storages(request, hostname, sr=('storage', 'node', 'storage__owner'), pr=('dc',), order_by=('zpool',)):
node = get_node(request, hostname, exists_ok=True, noexists_fail=True)
qs = node.nodestorage_set.select_related(*sr).order_by(*order_by)
extended = output_extended(request)
if extended:
qs = qs.prefetch_related(*pr).extra(extended)
return qs |
class github_issue_0027_test_case(unittest.TestCase):
def test_append_to_list_with_empty_index(self):
d = benedict({'results': [{'locations': ['Torino', 'Milano', 'Napoli']}]})
d['results[0].locations'].append('Roma')
self.assertEqual(d['results[0].locations'], ['Torino', 'Milano', 'Napoli', 'Roma']) |
def main():
if sys.version.startswith('2'):
sys.exit('Works with Python3 only.')
banner()
while True:
menu()
option = int(input('\x1b[92m> Enter Selection: '))
if (option == 1):
while True:
print('\n\t\t\t\t\x1b[96m\n\t\t\t\t[1] Get IPs by port\n\t\t\t\t[2] Get IPs by software product/version\n\t\t\t\t[3] Get IPs by OS\n\t\t\t\t[4] Get IPs by search terms\n\t\t\t\t[5] Build-a-search to get IPs\n\t\t\t\t[6] Use stock searches\n\t\t\t\t[7] Return to main menu\n\t\t\t\t')
option = int(input('\x1b[96m> Enter Selection: '))
if (option == 1):
port = input('\x1b[39m> Enter port: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
ip_by_port(port, keyword)
elif (option == 2):
product = input('\x1b[39m> Enter product: ')
version = input('\x1b[39m> Enter version (OPTIONAL): ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
ip_by_product(product, version, keyword)
elif (option == 3):
os = input('\x1b[39m> Enter OS: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
ip_by_os(os, keyword)
elif (option == 4):
search_term = input('\x1b[39m> Enter Shodan search: ')
print('\n')
ip_by_keyword(search_term)
elif (option == 5):
port = input('\x1b[39m> Enter port (OPTIONAL): ')
product = input('\x1b[39m> Enter product (OPTIONAL): ')
version = input('\x1b[39m> Enter version (OPTIONAL): ')
os = input('\x1b[39m> Enter OS (OPTIONAL): ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
build_ip_search(port, product, version, os, keyword)
elif (option == 6):
ip_by_stock_search()
elif (option == 7):
break
else:
print('\x1b[31mInvalid Choice')
elif (option == 2):
search_term = input('\x1b[39m> Enter Shodan search: ')
print('\n')
count_by_keyword(search_term)
elif (option == 3):
ip = input('\x1b[39m> Enter IP(s) or CIDR range: ')
print('\n')
check_malware_ip(ip)
elif (option == 4):
ip = input('\x1b[39m> Enter IP: ')
print('\n')
score = honeyscore(ip)
if (score == 1.1):
score = 'Error occurred for that IP.'
print(('Honeypot probability score: ' + str(score)))
elif (option == 5):
ip = input('\x1b[39m> Enter host IP: ')
print('\n')
get_host_profile(ip)
elif (option == 6):
domain = input('\x1b[39m> Enter domain: ')
print('\n')
get_domain_profile(domain)
elif (option == 7):
scan_object = {}
while True:
print('\n\t\t\t\t\x1b[96m\n\t\t\t\t[1] Initiate scan\n\t\t\t\t[2] Check scan status\n\t\t\t\t[3] View scan results\n\t\t\t\t[4] Show available protocols\n\t\t\t\t[5] Return to main menu\n\t\t\t\t')
option = int(input('\x1b[96m> Enter Selection: '))
if (option == 1):
scan_object = []
while True:
print('\n\t\t\t\t\t\t\x1b[93m\n\t\t\t\t\t\t[1] Basic scan\n\t\t\t\t\t\t[2] Scan particular ports/protocols\n\t\t\t\t\t\t[3] Return to Scan on-demand menu\n\t\t\t\t\t\t')
option = int(input('\x1b[93m> Enter Selection: '))
if (option == 1):
targets = input('\x1b[39m> Enter IP(s), CIDR range(s), or hostname(s) (comma-separated): ')
scan_object = targets.replace(' ', '').split(',')
if (len(scan_object) == 1):
scan_object = ''.join(scan_object)
run_scan(scan_object)
elif (option == 2):
scan_object = {}
while True:
target = input('\x1b[39m> Enter IP, CIDR range, or hostname (or type "scan" when ready to scan): ')
if (target == 'scan'):
break
port_protocol = ast.literal_eval(input("\n\x1b[39m> Ports and protocols required format: [(22, 'ssh'), (503, 'modbus'), (80, ' \nEnter below: \n"))
print('\n')
scan_object.update({target: port_protocol})
run_scan(scan_object)
elif (option == 3):
break
else:
print('\x1b[31mInvalid Choice')
elif (option == 2):
scan_id = input('\x1b[39m> Enter scan ID: ')
print('\n')
get_scan_status(scan_id)
elif (option == 3):
scan_id = input('\x1b[39m> Enter scan ID: ')
print('\n')
view_scan_results(scan_id)
elif (option == 4):
protocols = api.protocols()
for protocol in protocols:
print(((('\x1b[1;92m' + protocol) + ':\x1b[0;39m ') + protocols[protocol]))
elif (option == 5):
break
else:
print('\x1b[31mInvalid Choice')
elif (option == 8):
while True:
print('\n\t\t\t\t\x1b[96m\n\t\t\t\t[1] Find exploits by platform\n\t\t\t\t[2] Find exploits by port\n\t\t\t\t[3] Find exploits by type\n\t\t\t\t[4] Find exploits by description\n\t\t\t\t[5] Find exploits by CVE\n\t\t\t\t[6] Find exploits by search term\n\t\t\t\t[7] Build-a-search to find exploits\n\t\t\t\t[8] Count exploits by search terms\n\t\t\t\t[9] Return to main menu\n\t\t\t\t')
option = int(input('\x1b[96m> Enter Selection: '))
if (option == 1):
platform = input('\x1b[39m> Enter platform: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
exploits_by_platform(platform, keyword)
elif (option == 2):
port = input('\x1b[39m> Enter port: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
print('\n')
exploits_by_port(port, keyword)
elif (option == 3):
type = input('\x1b[39m> Enter type (dos, exploit, local, remote, shellcode, or webapps): ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
exploits_by_type(type, keyword)
elif (option == 4):
description = input('\x1b[39m> Enter exploit description: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
exploits_by_description(description, keyword)
elif (option == 5):
cve = input('\x1b[39m> Enter CVE: ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
exploits_by_cve(cve, keyword)
elif (option == 6):
search_term = input('\x1b[39m> Enter search term: ')
print('\n')
exploits_by_keyword(search_term)
elif (option == 7):
platform = input('\x1b[39m> Enter platform (OPTIONAL): ')
port = input('\x1b[39m> Enter port (OPTIONAL): ')
type = input('\x1b[39m> Enter type (dos, exploit, local, remote, shellcode, or webapps) (OPTIONAL): ')
description = input('\x1b[39m> Enter exploit description (OPTIONAL): ')
cve = input('\x1b[39m> Enter CVE (OPTIONAL): ')
keyword = input('\x1b[39m> Enter additional Shodan search term(s) (OPTIONAL): ')
build_exploit_search(platform, port, type, description, cve, keyword)
elif (option == 8):
search_term = input('\x1b[39m> Enter search term: ')
print('\n')
count_exploits_by_keyword(search_term)
elif (option == 9):
break
else:
print('\x1b[31mInvalid Choice')
elif (option == 9):
api_plan_info()
elif (option == 10):
break
else:
print('\x1b[31mInvalid Choice.')
print('\x1b[39m') |
def markdown_unescape(input_: str) -> str:
return input_.replace('\\_', '_').replace('\\*', '*').replace('\\[', '[').replace('\\]', ']').replace('\\(', '(').replace('\\)', ')').replace('\\~', '~').replace('\\`', '`').replace('\\>', '>').replace('\\#', '#').replace('\\+', '+').replace('\\-', '-').replace('\\=', '=').replace('\\|', '|').replace('\\{', '{').replace('\\}', '}').replace('\\.', '.').replace('\\!', '!').replace('\\\\', '\\') |
class OptionPlotoptionsOrganizationSonificationContexttracksMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsArcdiagramLevelsStates(Options):
def hover(self) -> 'OptionPlotoptionsArcdiagramLevelsStatesHover':
return self._config_sub_data('hover', OptionPlotoptionsArcdiagramLevelsStatesHover)
def inactive(self) -> 'OptionPlotoptionsArcdiagramLevelsStatesInactive':
return self._config_sub_data('inactive', OptionPlotoptionsArcdiagramLevelsStatesInactive)
def normal(self) -> 'OptionPlotoptionsArcdiagramLevelsStatesNormal':
return self._config_sub_data('normal', OptionPlotoptionsArcdiagramLevelsStatesNormal)
def select(self) -> 'OptionPlotoptionsArcdiagramLevelsStatesSelect':
return self._config_sub_data('select', OptionPlotoptionsArcdiagramLevelsStatesSelect) |
class IpTos(base_tests.SimpleDataPlane):
def runTest(self):
logging.info('Running Ip_Tos test')
of_ports = config['port_map'].keys()
of_ports.sort()
self.assertTrue((len(of_ports) > 1), 'Not enough ports for test')
delete_all_flows(self.controller)
in_port = of_ports[0]
egress_port = of_ports[1]
logging.info('Inserting a flow with match on Ip_Tos ')
logging.info('Sending matching and non-matching tcp/ip packets')
logging.info('Verifying only matching packets implements the action specified in the flow')
(pkt, match) = match_ip_tos(self, of_ports)
self.dataplane.send(of_ports[0], str(pkt))
verify_packets(self, pkt, [egress_port])
pkt2 = simple_tcp_packet(ip_tos=4)
self.dataplane.send(of_ports[0], str(pkt2))
verify_packet_in(self, str(pkt2), of_ports[0], ofp.OFPR_NO_MATCH) |
class TestAssignmentVisitor():
def code_node_ast(self) -> AbstractSyntaxTree:
root = SeqNode(LogicCondition.initialize_true(LogicCondition.generate_new_context()))
ast = AbstractSyntaxTree(root, {})
cn_1 = ast._add_code_node([Assignment(var('c'), const(5))])
cn_2 = ast._add_code_node([Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), const(5)])), Return([var('c')])])
ast._add_edge(root, cn_1)
ast._add_edge(root, cn_2)
ast._code_node_reachability_graph.add_reachability(cn_1, cn_2)
return ast
def for_loop_ast(self) -> AbstractSyntaxTree:
context = LogicCondition.generate_new_context()
root = SeqNode(LogicCondition.initialize_true(context))
ast = AbstractSyntaxTree(root, {LogicCondition.initialize_symbol('x1', context): Condition(OperationType.less_or_equal, [var('i'), const(5)])})
child_1 = ast._add_code_node([Assignment(var('c'), const(5))])
child_2 = ast.factory.create_for_loop_node(declaration=Assignment(var('i'), const(0)), modification=Assignment(var('i'), BinaryOperation(OperationType.plus, [var('i'), const(1)])), condition=LogicCondition.initialize_symbol('x1', context))
body = ast._add_code_node([Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), var('i')]))])
ast._add_nodes_from((child_2, body))
ast._add_edges_from(((root, child_1), (root, child_2), (child_2, body)))
ast._code_node_reachability_graph.add_reachability(child_1, body)
return ast
def test_code_node_ast(self, code_node_ast: AbstractSyntaxTree):
assert (AssignmentVisitor.from_ast(code_node_ast) == [Assignment(var('c'), const(5)), Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), const(5)]))])
assert (AssignmentVisitor.from_ast(code_node_ast, code_node_ast.nodes[(- 1)]) == [Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), const(5)]))])
def test_for_loop_ast(self, for_loop_ast: AbstractSyntaxTree):
assert (AssignmentVisitor.from_ast(for_loop_ast) == [Assignment(var('c'), const(5)), Assignment(var('i'), const(0)), Assignment(var('i'), BinaryOperation(OperationType.plus, [var('i'), const(1)])), Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), var('i')]))])
assert (AssignmentVisitor.from_ast(for_loop_ast, for_loop_ast.nodes[(- 1)]) == [Assignment(var('i'), const(0)), Assignment(var('i'), BinaryOperation(OperationType.plus, [var('i'), const(1)])), Assignment(var('c'), BinaryOperation(OperationType.plus, [var('c'), var('i')]))]) |
class MyTestCase(unittest.TestCase):
('netplot.config.config.Config')
def test_tcp_incoming_arg_incoming_True(self, config):
packets = sniff(offline='./test/packets/tcp_incoming.pcap')
config.incoming = True
processor = RawProcessor(config)
processor.process(packets[0])
self.assertEqual(['208.80.154.224'], processor.data)
('netplot.config.config.Config')
def test_tcp_outgoing_arg_incoming_False(self, config):
packets = sniff(offline='./test/packets/tcp_outgoing.pcap')
config.incoming = False
processor = RawProcessor(config)
processor.process(packets[0])
self.assertEqual(['208.80.154.224'], processor.data) |
def test_no_events():
class Emitter():
def __aiter__(self):
return self
async def __anext__(self):
raise StopAsyncIteration
class SomeResource():
async def on_get(self, req, resp):
self._called = True
resp.sse = Emitter()
assert (resp.sse is not None)
resource = SomeResource()
app = App()
app.add_route('/', resource)
client = testing.TestClient(app)
client.simulate_get()
assert resource._called |
def load_osci_commits_ranking_to_bq(date: datetime.datetime, date_period: str=DatePeriodType.YTD):
if (date_period not in (DatePeriodType.MTD, DatePeriodType.YTD)):
raise ValueError(f'Unsupported {date_period}')
report = OSCICommitsRankingFactory().get_cls(date_period=date_period)(date=date)
table = date_period_to_table_map[date_period]
log.debug(f'Load {report.name} for {date:%Y-%m-%d} to {table.table_id}')
report_df = report.read()
report_df = report_df[PublicSchemas.company_commits_ranking.required]
report_df = report_df.reset_index().rename(columns={'index': table.Columns.position})
report_df[table.Columns.position] += 1
report_df = report_df.rename(columns=table.mapping)
report_df[table.Columns.date] = date.date()
return DataLake().big_query.load_dataframe(df=report_df, table_id=table.table_id, schema=table.schema) |
class Rayleigh(Distribution):
def __init__(self, scale):
if (scale is None):
self.scale = 1.0
else:
self.scale = scale
self.bounds = np.array([0.999, np.inf])
if (self.scale < 0):
raise ValueError('Invalid parameters in Rayleigh distribution. Scale should be positive.')
self.parent = rayleigh(scale=self.scale)
(self.mean, self.variance, self.skewness, self.kurtosis) = self.parent.stats(moments='mvsk')
self.x_range_for_pdf = np.linspace(0.0, (8.0 * self.scale), RECURRENCE_PDF_SAMPLES)
def get_icdf(self, xx):
return self.parent.ppf(xx)
def get_description(self):
text = (('is a Rayleigh distribution; characterised by its scale parameter, which has been set to ' + str(self.scale)) + '.')
return text
def get_pdf(self, points=None):
if (points is not None):
return self.parent.pdf(points)
else:
raise ValueError('Please digit an input for get_pdf method')
def get_cdf(self, points=None):
if (points is not None):
return self.parent.cdf(points)
else:
raise ValueError('Please digit an input for get_cdf method')
def get_samples(self, m=None):
if (m is not None):
number = m
else:
number = 500000
return self.parent.rvs(size=number) |
def fetch_generation_forecast(zone_key: ZoneKey=ZoneKey('BO'), session: (Session | None)=None, target_datetime: (datetime | None)=None, logger: Logger=getLogger(__name__)) -> list:
(raw_data, query_date) = fetch_data(session=session, target_datetime=target_datetime)
generation_forecast = parse_generation_forecast(zone_key, query_date, raw_data, logger)
return generation_forecast.to_list() |
def transfer_shaders(source, target, allow_component_assignments=False):
if isinstance(source, pm.nt.Transform):
source_shape = source.getShape()
else:
source_shape = source
shapes_and_engines = source_shape.outputs(type=pm.nt.ShadingEngine, c=1)
if len(shapes_and_engines):
for (source_attribute, shading_engine) in shapes_and_engines:
pm.lockNode(shading_engine, l=0, lockUnpublished=0)
is_component_assignment = ('objectgroups' in source_attribute.lower())
if (is_component_assignment and allow_component_assignments):
components = []
target_shape = target
if isinstance(target, pm.nt.Transform):
target_shape = target.getShape()
for component in pm.sets(shading_engine, q=1):
if (source_shape.name() in str(component)):
target_component = component.replace(source_shape.name(), target_shape.name())
components.append(target_component)
pm.sets(shading_engine, fe=components)
else:
pm.sets(shading_engine, fe=target)
if (target.instanceCount() > 1):
for i in range(1, target.instanceCount()):
target.attr(('instObjGroups[%s]' % i)).disconnect()
(target.attr(('instObjGroups[%s]' % i)) >> shading_engine.attr('dagSetMembers').next_available) |
class MetaEngine():
def __init__(self, db: MetaData):
self.db = db
def create_table(self, name, columns, primary_keys, **kwargs):
self.db.create_table(name, columns, primary_keys, **kwargs)
def drop_table(self, name):
self.db.drop_table(name)
def add_column(self, table_name, column):
self.db.add_column(table_name, column)
def drop_column(self, table_name, column_name):
self.db.drop_column(table_name, column_name)
def alter_column(self, table_name, column_name, changes):
pchanges = self._parse_column_changes(changes)
updates = {k: v[1] for (k, v) in pchanges.items()}
self.db.change_column(table_name, column_name, updates)
def create_index(self, name, table_name, fields, expr, unique, **kw):
self.db.create_index(table_name, name, fields, expr, unique, **kw)
def drop_index(self, name, table_name):
self.db.drop_index(table_name, name)
def create_foreign_key_constraint(self, name, table_name, column_names, foreign_table_name, foreign_keys, on_delete):
self.db.create_foreign_key_constraint(table_name, name, column_names, foreign_table_name, foreign_keys, on_delete)
def drop_foreign_key_constraint(self, name, table_name):
self.db.drop_foreign_key_constraint(table_name, name)
def _parse_column_changes(changes: List[Tuple[(str, str, str, Dict[(str, Any)], Any, Any)]]) -> Dict[(str, List[Any])]:
rv = {}
for change in changes:
if (change[0] == 'modify_type'):
rv['type'] = [change[4], change[5], change[3]['existing_length']]
elif (change[0] == 'modify_length'):
rv['length'] = [change[4], change[5], change[3]['existing_type']]
elif (change[0] == 'modify_notnull'):
rv['notnull'] = [change[4], change[5]]
elif (change[0] == 'modify_default'):
rv['default'] = [change[4], change[5], change[3]['existing_type']]
else:
rv[change[0].split('modify_')[(- 1)]] = [change[4], change[5], change[3]]
return rv |
class PluginHubParam(BaseModel):
channel: Optional[str] = Field('git', description='Plugin storage channel')
url: Optional[str] = Field(' description='Plugin storage url')
branch: Optional[str] = Field('main', description='github download branch', nullable=True)
authorization: Optional[str] = Field(None, description='github download authorization', nullable=True) |
(('cfg', 'expected'), [param({'_target_': 'tests.instantiate.ArgsClass'}, ArgsClass(), id='config:no_params'), param({'_target_': 'tests.instantiate.ArgsClass', '_args_': [1]}, ArgsClass(1), id='config:args_only'), param({'_target_': 'tests.instantiate.ArgsClass', '_args_': [1], 'foo': 10}, ArgsClass(1, foo=10), id='config:args+kwargs'), param({'_target_': 'tests.instantiate.ArgsClass', 'foo': 10}, ArgsClass(foo=10), id='config:kwargs_only')])
def test_instantiate_args_kwargs(cfg: Any, expected: Any) -> None:
assert (instantiate(cfg) == expected) |
class OptionPlotoptionsErrorbarSonificationContexttracksMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class TemperatureMonitor(HeatMonitor):
unstructured: bool = pd.Field(False, title='Unstructured Grid', description='Return data on the original unstructured grid.')
conformal: bool = pd.Field(False, title='Conformal Monitor Meshing', description="If ``True`` the heat simulation mesh will conform to the monitor's geometry. While this can be set for both Cartesian and unstructured monitors, it bears higher significance for the latter ones. Effectively, setting ``conformal = True`` for unstructured monitors (``unstructured = True``) ensures that returned temperature values will not be obtained by interpolation during postprocessing but rather directly transferred from the computational grid.")
def storage_size(self, num_cells: int, tmesh: ArrayFloat1D) -> int:
num_steps = self.num_steps(tmesh)
return (((BYTES_REAL * num_steps) * num_cells) * len(self.fields)) |
class TestHTTPIO(unittest.TestCase):
_remote_uri = '
_filename = 'facebook.html'
_pathmgr = PathManager()
def run(self, result=None):
with patch('iopath.common.event_logger.EventLogger.log_event'):
super(TestHTTPIO, self).run(result)
def _patch_download(self) -> Generator[(None, None, None)]:
def fake_download(url: str, dir: str, *, filename: str) -> str:
dest = os.path.join(dir, filename)
with open(dest, 'w') as f:
f.write('test')
return dest
with patch.object(file_io, 'get_cache_dir', return_value=self._cache_dir), patch.object(file_io, 'download', side_effect=fake_download):
(yield)
def setUpClass(cls) -> None:
pid = os.getpid()
cls._cache_dir: str = os.path.join(get_cache_dir(), f'{__name__}_{pid}')
cls._pathmgr.register_handler(HTTPURLHandler())
if os.path.exists(cls._cache_dir):
shutil.rmtree(cls._cache_dir)
os.makedirs(cls._cache_dir, exist_ok=True)
def test_get_local_path(self) -> None:
with self._patch_download():
local_path = self._pathmgr.get_local_path(self._remote_uri)
self.assertTrue(os.path.exists(local_path))
self.assertTrue(os.path.isfile(local_path))
local_path = self._pathmgr.get_local_path(self._remote_uri, force=True)
self.assertTrue(os.path.exists(local_path))
self.assertTrue(os.path.isfile(local_path))
local_path = self._pathmgr.get_local_path(self._remote_uri, cache_dir=self._cache_dir)
self.assertTrue(local_path.startswith(self._cache_dir))
self.assertTrue(os.path.exists(local_path))
self.assertTrue(os.path.isfile(local_path))
def test_open(self) -> None:
with self._patch_download():
with self._pathmgr.open(self._remote_uri, 'rb') as f:
self.assertTrue(os.path.exists(f.name))
self.assertTrue(os.path.isfile(f.name))
self.assertTrue((f.read() != ''))
def test_open_writes(self) -> None:
with self.assertRaises(AssertionError):
with self._pathmgr.open(self._remote_uri, 'w') as f:
f.write('foobar')
def test_open_new_path_manager(self) -> None:
with self._patch_download():
path_manager = PathManager()
with self.assertRaises(OSError):
f = path_manager.open(self._remote_uri, 'rb')
path_manager.register_handler(HTTPURLHandler())
with path_manager.open(self._remote_uri, 'rb') as f:
self.assertTrue(os.path.isfile(f.name))
self.assertTrue((f.read() != ''))
def test_bad_args(self) -> None:
with self.assertRaises(NotImplementedError):
self._pathmgr.copy(self._remote_uri, self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.exists(self._remote_uri, foo='foo')
with self.assertRaises(ValueError):
self._pathmgr.get_local_path(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.isdir(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.isfile(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.ls(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.mkdirs(self._remote_uri, foo='foo')
with self.assertRaises(ValueError):
self._pathmgr.open(self._remote_uri, foo='foo')
with self.assertRaises(ValueError):
self._pathmgr.opena(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.rm(self._remote_uri, foo='foo')
with self.assertRaises(NotImplementedError):
self._pathmgr.set_cwd(self._remote_uri, foo='foo')
self._pathmgr.set_strict_kwargs_checking(False)
self._pathmgr.get_local_path(self._remote_uri, foo='foo')
f = self._pathmgr.open(self._remote_uri, foo='foo')
f.close()
self._pathmgr.set_strict_kwargs_checking(True)
def test_copy_across_handlers(self) -> None:
with tempfile.NamedTemporaryFile(delete=True) as file:
local_path = file.name
with self._patch_download():
self._pathmgr.copy(self._remote_uri, local_path)
self.assertTrue(self._pathmgr.exists(local_path))
self._pathmgr.rm(local_path) |
def print_proper_context_variations(logfile, freqs, cats, lemma_count):
cwords = set()
varrat_c = dict()
for cat in cats:
varrat_c[cat] = dict()
cwords.update(set(freqs[cat].keys()))
for cword in cwords:
fsum = 0
maxf = 0
for cat in cats:
if (cword in freqs[cat]):
f = (freqs[cat][cword] / lemma_count[cat])
fsum += f
if (f > maxf):
maxf = f
mode = cat
varrat_c[mode][cword] = ((maxf / fsum) * sqrt(fsum))
for cat in cats:
print(('\n%s:' % cat), file=logfile)
print('index', ('%-20s' % 'context'), '\t'.join(cats), sep='\t', file=logfile)
for cword in sorted(varrat_c[cat], key=varrat_c[cat].get, reverse=True)[:100]:
print(('%.2f\t%-20s' % (varrat_c[cat][cword], cword)), '\t'.join([str(freqs[c][cword]) for c in cats]), sep='\t', file=logfile) |
def load_locks_from_tag(remote: str, tag: str) -> (str, dict, dict):
import json
git = utils.make_git()
exists_args = ['ls-remote']
if remote:
exists_args.append(remote)
exists_args.append(f'refs/tags/{tag}')
assert git(*exists_args), f"tag: {tag} does not exist in {(remote or 'local')}"
fetch_tags = ['fetch']
if remote:
fetch_tags += [remote, '--tags', '-f', tag]
else:
fetch_tags += ['--tags', '-f', tag]
git(*fetch_tags)
commit_hash = git('rev-list', '-1', tag)
try:
version = json.loads(git('show', f'{tag}:detection_rules/etc/version.lock.json'))
except CalledProcessError:
version = json.loads(git('show', f'{tag}:etc/version.lock.json'))
try:
deprecated = json.loads(git('show', f'{tag}:detection_rules/etc/deprecated_rules.json'))
except CalledProcessError:
deprecated = json.loads(git('show', f'{tag}:etc/deprecated_rules.json'))
return (commit_hash, version, deprecated) |
class OptionPlotoptionsBoxplotSonificationDefaultspeechoptionsMappingRate(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_in_both(CompWithInit4, Foo)
def test_component_init4():
c1 = Foo(a_prop=0)
c2 = Foo(a_prop=0)
c3 = CompWithInit4(c1, 8)
print(c1.a_prop, c3.a_prop)
loop.iter()
print(c1.a_prop, c3.a_prop)
c3.create(c2, 9)
loop.iter()
print(c2.a_prop, c3.a_prop)
loop.iter()
print(c2.a_prop, c3.a_prop) |
class TrackerContentProxy(ContentProxy):
def _fetch_content_type_counts(self):
if ('counts' not in self._cache):
if (self.item._ct_inventory and (self.item._ct_inventory.get('_version_', (- 1)) == INVENTORY_VERSION)):
try:
self._cache['counts'] = self._from_inventory(self.item._ct_inventory)
except KeyError:
pass
if ('counts' not in self._cache):
super()._fetch_content_type_counts()
self.item._ct_inventory = self._to_inventory(self._cache['counts'])
self.item.__class__.objects.filter(id=self.item.id).update(_ct_inventory=self.item._ct_inventory)
if hasattr(self.item, 'get_descendants'):
self.item.get_descendants(include_self=False).update(_ct_inventory=None)
return self._cache['counts']
def _translation_map(self):
cls = self.item.__class__
if (cls not in _translation_map_cache):
map = {}
model_to_contenttype = ContentType.objects.get_for_models(*self.item._feincms_content_types)
for (idx, fct) in enumerate(self.item._feincms_content_types):
dct = model_to_contenttype[fct]
map[(- dct.id)] = idx
map[idx] = dct.id
_translation_map_cache[cls] = map
return _translation_map_cache[cls]
def _from_inventory(self, inventory):
map = self._translation_map()
return {region: [(pk, map[(- ct)]) for (pk, ct) in items] for (region, items) in inventory.items() if (region != '_version_')}
def _to_inventory(self, counts):
map = self._translation_map()
inventory = {region: [(pk, map[ct]) for (pk, ct) in items] for (region, items) in counts.items()}
inventory['_version_'] = INVENTORY_VERSION
return inventory |
def generate_legacy_template_version(ecs_version: str, mappings_section: Dict, out_dir: str, template_settings_file: str) -> None:
ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', 'legacy'))
template: Dict = template_settings(ecs_version, mappings_section, template_settings_file, is_legacy=True)
filename: str = join(out_dir, 'elasticsearch/legacy/template.json')
save_json(filename, template) |
def test():
assert ('token_texts' not in __solution__), 'token_texts?'
assert ('pos_tags' not in __solution__), 'pos_tags?'
assert ('token.pos_ ==' in __solution__), '?'
assert (('token.i + 1' in __solution__) or ('token.i+1' in __solution__)), '?'
__msg__.good('Great work!docdoc[token.i + 1]token.i + 1 < len(doc) ') |
class TestFBNetV3MaskRCNNQATEager(RCNNBaseTestCases.TemplateTestCase):
def setup_custom_test(self):
super().setup_custom_test()
self.cfg.merge_from_file('detectron2go://mask_rcnn_fbnetv3a_dsmask_C4.yaml')
self.cfg.merge_from_list(['QUANTIZATION.BACKEND', 'qnnpack', 'QUANTIZATION.QAT.ENABLED', 'True'])
self.cfg.merge_from_list(['MODEL.FBNET_V2.NORM', 'bn'])
def test_inference(self):
self._test_inference()
_parameterized_test_export([['torchscript__ops', False], ['torchscript_int8', False]])
def test_export(self, predictor_type, compare_match):
_maybe_skip_test(self, predictor_type)
self._test_export(predictor_type, compare_match=compare_match) |
_deserializable
class AppConfig(BaseAppConfig):
def __init__(self, log_level: str='WARNING', id: Optional[str]=None, name: Optional[str]=None, collect_metrics: Optional[bool]=True, **kwargs):
self.name = name
super().__init__(log_level=log_level, id=id, collect_metrics=collect_metrics, **kwargs) |
class OptionSeriesSankeySonificationDefaultspeechoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_os(*metadata.platforms)
def main():
masquerade = '/tmp/bifrost'
common.create_macos_masquerade(masquerade)
common.log('Launching fake bifrost attack with kerberoast commands')
common.execute([masquerade, '-action', '-kerberoast'], timeout=10, kill=True)
common.remove_file(masquerade) |
.parametrize(('input_dts', 'expected_result'), [('', ''), ('data = [01 23 45 67 89 ab cd ef 01 23 45 67 89 ab cd ef];', 'data = (BINARY DATA ...);'), ('data = <0x01 0x2345 0x67 0x89 0xabcdef 0x1234 0x56 0x78 0x90 0xab 0xcd>;', 'data = (BINARY DATA ...);'), ('data = [01 23 45 67];', 'data = [01 23 45 67];'), ('data = <0x01 0x2345 0x67>;', 'data = <0x01 0x2345 0x67>;')])
def test_hide_dts_data(input_dts, expected_result):
assert (flt.hide_dts_binary_data(input_dts) == expected_result) |
class TestContacts(unittest.TestCase):
def test_contacts_load(self):
storage = MockStorage()
c1 = contacts.Contacts(storage)
self.assertEqual(0, len(c1._entries))
c1.add_contact(contacts.IdentitySystem.OnChain, 'name', pk_hex_1)
self.assertEqual(1, len(c1._entries))
c2 = contacts.Contacts(storage)
self.assertEqual(1, len(c1._entries))
def test_contacts_check_identity_exists(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
c1 = contacts.Contacts(storage)
c1.add_contact(system_id, 'name', pk_hex_1)
result = c1.check_identity_exists(system_id, pk_hex_1)
self.assertEqual(contacts.IdentityCheckResult.InUse, result)
result = c1.check_identity_exists(system_id, pk_hex_2)
self.assertEqual(contacts.IdentityCheckResult.Ok, result)
def test_contacts_check_identity_valid(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
c1 = contacts.Contacts(storage)
result = c1.check_identity_valid(system_id, pk_hex_1)
self.assertEqual(contacts.IdentityCheckResult.Ok, result)
result = c1.check_identity_valid(system_id, '...')
self.assertEqual(contacts.IdentityCheckResult.Invalid, result)
c1.add_contact(system_id, 'name', pk_hex_1)
result = c1.check_identity_exists(system_id, pk_hex_1)
self.assertEqual(contacts.IdentityCheckResult.InUse, result)
def test_contacts_check_label(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
c1 = contacts.Contacts(storage)
result = c1.check_label('')
self.assertEqual(contacts.IdentityCheckResult.Invalid, result)
result = c1.check_label('bob')
self.assertEqual(contacts.IdentityCheckResult.Ok, result)
c1.add_contact(system_id, 'name', pk_hex_1)
result = c1.check_label('name')
self.assertEqual(contacts.IdentityCheckResult.InUse, result)
result = c1.check_label('Name')
self.assertEqual(contacts.IdentityCheckResult.InUse, result)
def test_contacts_set_label(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
entry1 = contacts1.add_contact(system_id, 'name', pk_hex_1)
contacts1.set_label(entry1.contact_id, 'bob')
entry2 = contacts1.get_contact(entry1.contact_id)
self.assertEqual('bob', entry2.label)
def test_contacts_contact_exists(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
self.assertFalse(contacts1.contact_exists(1))
system_id = contacts.IdentitySystem.OnChain
contacts1.add_contact(system_id, 'name', pk_hex_1)
self.assertTrue(contacts1.contact_exists(1))
def test_contacts_get_contact(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
contact1_1 = contacts1.add_contact(system_id, 'name', pk_hex_1)
contact1_2 = contacts1.get_contact(contact1_1.contact_id)
self.assertEqual(contact1_1, contact1_2)
contacts1.set_label(contact1_1.contact_id, 'bob')
contact2 = contacts1.get_contact(contact1_1.contact_id)
self.assertNotEqual(contact1_1, contact2)
def test_contacts_get_contacts(self):
system_id = contacts.IdentitySystem.OnChain
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
contact1 = contacts1.add_contact(system_id, 'name1', pk_hex_1)
contact2 = contacts1.add_contact(system_id, 'name2', pk_hex_2)
entries = contacts1.get_contacts()
self.assertEqual(2, len(entries))
self.assertEqual(set([contact1.contact_id, contact2.contact_id]), set([c.contact_id for c in entries]))
def test_contacts_add_contact(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
with self.assertRaises(contacts.ContactDataError):
contacts1.add_contact(1000000, 'name1', pk_hex_1)
with self.assertRaises(contacts.ContactDataError):
contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', '...')
contact1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
self.assertEqual(1, len(contacts1._entries))
self.assertEqual(contact1, contacts1._entries[contact1.contact_id])
self.assertEqual(1, contact1.contact_id)
self.assertEqual('name1', contact1.label)
self.assertEqual(1, len(contact1.identities))
self.assertEqual(contacts.IdentitySystem.OnChain, contact1.identities[0].system_id)
self.assertEqual(pk_hex_1, contact1.identities[0].system_data)
def test_contacts_remove_contact(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
with self.assertRaises(KeyError):
contacts1.remove_contact(10000)
contact1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
contacts1.remove_contact(contact1.contact_id)
self.assertEqual(0, len(contacts1._entries))
def test_contacts_remove_contacts(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
contact1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
contact2 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name2', pk_hex_2)
self.assertEqual(2, len(contacts1._entries))
contacts1.remove_contacts([contact1.contact_id])
self.assertEqual(1, len(contacts1._entries))
contacts1.remove_contacts([10000])
self.assertEqual(1, len(contacts1._entries))
contacts1.remove_contacts([contact2.contact_id])
self.assertEqual(0, len(contacts1._entries))
contact1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
contact2 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name2', pk_hex_2)
self.assertEqual(2, len(contacts1._entries))
contacts1.remove_contacts([contact1.contact_id, contact2.contact_id])
self.assertEqual(0, len(contacts1._entries))
def test_contacts_add_identity(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
contact1_1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
contacts1.add_identity(contact1_1.contact_id, contacts.IdentitySystem.Paymail, 'xxx')
contact1_2 = contacts1.get_contact(contact1_1.contact_id)
self.assertEqual(2, len(contact1_2.identities))
system_ids = set([v.system_id for v in contact1_2.identities])
expected_system_ids = set([contacts.IdentitySystem.OnChain, contacts.IdentitySystem.Paymail])
self.assertEqual(expected_system_ids, system_ids)
identity1 = [v for v in contact1_2.identities if (v.system_id == contacts.IdentitySystem.OnChain)][0]
self.assertEqual(pk_hex_1, identity1.system_data)
identity2 = [v for v in contact1_2.identities if (v.system_id == contacts.IdentitySystem.Paymail)][0]
self.assertEqual('xxx', identity2.system_data)
def test_contacts_remove_identity(self):
storage = MockStorage()
contacts1 = contacts.Contacts(storage)
contact1_1 = contacts1.add_contact(contacts.IdentitySystem.OnChain, 'name1', pk_hex_1)
identity1 = contact1_1.identities[0]
identity2 = contacts1.add_identity(contact1_1.contact_id, contacts.IdentitySystem.Paymail, 'xxx')
self.assertEqual(2, len(contact1_1.identities))
contact1_2 = contacts1.get_contact(contact1_1.contact_id)
contacts1.remove_identity(contact1_2.contact_id, identity1.identity_id)
self.assertEqual(1, len(contact1_2.identities))
system_ids = set([v.system_id for v in contact1_2.identities])
expected_system_ids = set([contacts.IdentitySystem.Paymail])
self.assertEqual(expected_system_ids, system_ids) |
def bech32_decode(bech):
if (any((((ord(x) < 33) or (ord(x) > 126)) for x in bech)) or ((bech.lower() != bech) and (bech.upper() != bech))):
return (None, None, None)
bech = bech.lower()
pos = bech.rfind('1')
if ((pos < 1) or ((pos + 7) > len(bech))):
return (None, None, None)
if (not all(((x in CHARSET) for x in bech[(pos + 1):]))):
return (None, None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[(pos + 1):]]
if (bech32_polymod((bech32_hrp_expand(hrp) + data)) is None):
return (None, None, None)
return (hrp, data[:(- 6)]) |
def test_trigger_webhooks(workspace: Workspace, test_data: TestData, send_task_mock: MagicMock):
object = test_data['clients']['default_tenant']
trigger_webhooks(ClientCreated, object, schemas.client.Client, workspace_id=workspace.id, send_task=send_task_mock)
send_task_mock.assert_called_once()
assert (send_task_mock.call_args[1]['workspace_id'] == str(workspace.id))
event_json = send_task_mock.call_args[1]['event']
event = WebhookEvent.model_validate_json(event_json)
assert (event.type == ClientCreated.key())
assert (event.data['id'] == str(object.id)) |
(name='api.mon.base.tasks.mon_hostgroup_list', base=MgmtTask)
_task(log_exception=False)
def mon_hostgroup_list(task_id, dc_id, dc_bound=True, full=False, extended=False, **kwargs):
dc = Dc.objects.get_by_id(int(dc_id))
return get_monitoring(dc).hostgroup_list(dc_bound=dc_bound, full=full, extended=extended) |
def test_measure_interval(la):
la.configure_trigger('LA1', 'falling')
interval = la.measure_interval(channels=['LA1', 'LA2'], modes=['rising', 'falling'], timeout=0.1)
expected_interval = (((FREQUENCY ** (- 1)) * MICROSECONDS) * 0.5)
assert (expected_interval == pytest.approx(interval, abs=TWO_CLOCK_CYCLES)) |
def test_swap_keys():
test = {'Recipient Name': 'recipient_name', 'Action Date': 'action_date', 'Transaction Amount': 'federal_action_obligation'}
results = swap_keys(test)
assert (results == {'recipient_name': 'recipient_name', 'action_date': 'action_date', 'federal_action_obligation': 'federal_action_obligation'}) |
def test_write_union_tuple_primitive():
schema = {'name': 'test_name', 'namespace': 'test', 'type': 'record', 'fields': [{'name': 'val', 'type': ['string', 'int']}]}
data = [{'val': ('int', 1)}, {'val': ('string', 'string')}]
expected_data = [{'val': 1}, {'val': 'string'}]
new_file = BytesIO()
fastavro.writer(new_file, schema, data)
new_file.seek(0)
new_reader = fastavro.reader(new_file)
new_records = list(new_reader)
assert (new_records == expected_data) |
('init', cls=FandoghCommand)
('-n', '--name', 'name', prompt='Service Name')
def init(name):
service_name_pattern = re.compile('^([a-z]+(-*[a-z0-9]+)*){1,100}$')
if (not service_name_pattern.match(name)):
click.echo(format_text('manifest.name:service names must match regex "[a-z]([-a-z0-9]*[a-z0-9])?" and length lower than 100 char.', TextStyle.FAIL), err=True)
return
project_types = get_project_types()
project_type = prompt_project_types(project_types)
project_type_hint = key_hints.get(project_type['name'])
if project_type_hint:
project_type_hint()
chosen_params = {'context': click.prompt('The context directory', default='.')}
if project_type.get('parameters', None):
for param in project_type.get('parameters'):
hint = key_hints.get(param['key'], None)
if hint:
hint()
chosen_params[param['key']] = click.prompt(param['name'], default=param.get('default', None))
initialize_project(name, project_type, chosen_params)
create_fandoghignore_file(project_type.get('name'))
click.echo(format_text('Your source has been initialized.\nPlease consider to run `fandogh source run` command whenever you are going to deploy your changes', TextStyle.OKGREEN)) |
def pp_ext_inquiry_rsp(ext_inq_rsp):
print('Extended inquiry response: ', end='')
if (ext_inq_rsp[0] == 0):
print(red('None'))
return
print()
while (ext_inq_rsp[0] != 0):
length = ext_inq_rsp[0]
data = ext_inq_rsp[1:(1 + length)]
data_type = data[0]
ext_inq_rsp = ext_inq_rsp[(1 + length):]
print(INDENT, end='')
if (data_type == COMPLETE_LIST_OF_16_BIT_SERVICE_CLASS_UUIDS):
print(gap_type_names[data_type])
if ((length - 1) >= 2):
eir_data = data[1:]
if ((len(eir_data) % 2) != 0):
print(((INDENT * 2) + blue(('Invalid EIR data length: %d' % len(eir_data)))))
continue
for i in range(0, len(eir_data), 2):
uuid = int.from_bytes(eir_data[i:(i + 2)], byteorder='little')
print(((INDENT * 2) + ('0x%04x ' % uuid)), end='')
try:
print(blue(service_cls_profile_ids[uuid]['Name']))
except KeyError as e:
print(red('unknown'))
else:
print(((INDENT * 2) + red('None')))
elif (data_type == COMPLETE_LIST_OF_32_BIT_SERVICE_CLASS_UUIDS):
print(gap_type_names[data_type])
if ((length - 1) >= 4):
eir_data = data[1:]
if ((len(eir_data) % 4) != 0):
logger.info(((INDENT * 2) + 'Invalid EIR data length: {} {}'.format(len(eir_data), eir_data)))
continue
for i in range(0, len(eir_data), 4):
uuid = int.from_bytes(eir_data[i:(i + 4)], byteorder='little')
print(((INDENT * 2) + ('0x%08x ' % uuid)))
else:
print(((INDENT * 2) + red('None')))
elif (data_type == COMPLETE_LIST_OF_128_BIT_SERVICE_CLASS_UUIDS):
print(gap_type_names[data_type])
if ((length - 1) >= 16):
eir_data = data[1:]
if ((len(eir_data) % 16) != 0):
logger.info(((INDENT * 2) + 'Invalid EIR data length: {} {}'.format(len(eir_data), eir_data)))
continue
for i in range(0, len(eir_data), 16):
uuid = int.from_bytes(eir_data[i:(i + 16)], byteorder='little')
uuid_str = ('%032X' % uuid)
print((INDENT * 2), end='')
print(blue('-'.join([uuid_str[:8], uuid_str[8:12], uuid_str[12:16], uuid_str[16:20], uuid_str[20:32]])))
else:
print(((INDENT * 2) + red('None')))
elif ((data_type == SHORTENED_LOCAL_NAME) or (data_type == COMPLETE_LOCAL_NAME)):
print((gap_type_names[data_type] + ':'), blue(data[1:].decode()))
elif (data_type == TX_POWER_LEVEL):
print((gap_type_names[data_type] + ':'), blue((str(int.from_bytes(data[1:], byteorder='little')) + ' dBm')))
else:
try:
print(gap_type_names[data_type])
except KeyError as e:
print(red(('Unknown, 0x%02x' % data_type)))
print((INDENT * 2), data[1:], sep='') |
.parametrize('stattest_func, feature_type, expected', [(_custom_stattest, 'num', custom_stattest), ('ks', 'num', ks_stat_test), ('z', 'cat', z_stat_test), ('chisquare', 'cat', chi_stat_test), ('jensenshannon', 'num', jensenshannon_stat_test), ('kl_div', 'num', kl_div_stat_test), ('psi', 'num', psi_stat_test), ('wasserstein', 'num', wasserstein_stat_test)])
def test_get_stattest_valid_resolve(stattest_func, feature_type, expected):
test = get_stattest(pd.Series(dtype='float64'), pd.Series(dtype='float64'), feature_type, stattest_func)
assert (test.display_name == expected.display_name)
assert (test.func == expected.func) |
class TreasureHuntAction(FatetellAction):
def __init__(self, source, target):
self.source = source
self.target = target
self.fatetell_target = target
def fatetell_action(self, ft):
if ft.succeeded:
self.card = c = ft.card
migrate_cards([c], self.target.cards)
return True
return False
def fatetell_cond(self, c):
return (c.color == Card.BLACK) |
def _coconut_mk_anon_namedtuple(fields, types=None, of_kwargs=None):
if (types is None):
NT = _coconut.collections.namedtuple('_namedtuple_of', fields)
else:
NT = _coconut.typing.NamedTuple('_namedtuple_of', [(f, t) for (f, t) in _coconut.zip(fields, types)])
_coconut.copyreg.pickle(NT, (lambda nt: (_coconut_mk_anon_namedtuple, (nt._fields, types, nt._asdict()))))
if (of_kwargs is None):
return NT
return NT(**of_kwargs) |
def test_remove_all_from_compare_basket(web_frontend):
with web_frontend.app.test_request_context():
session['uids_for_comparison'] = [TEST_FW.uid, TEST_FW_2.uid]
session.modified = True
assert ('uids_for_comparison' in session)
assert (TEST_FW.uid in session['uids_for_comparison'])
assert (TEST_FW_2.uid in session['uids_for_comparison'])
CompareRoutes.remove_all_from_compare_basket(web_frontend, 'some_uid')
assert (TEST_FW.uid not in session['uids_for_comparison'])
assert (TEST_FW_2.uid not in session['uids_for_comparison']) |
def is_local(hostname, wait=False):
if (hostname is None):
return True
with _LOCK:
if (hostname in HOSTS_CACHE):
if isinstance(HOSTS_CACHE[hostname], threading.Thread):
return False
return HOSTS_CACHE[hostname]
try:
local_addresses = (['localhost'] + get_local_addresses())
result = (hostname.startswith('127.') or (hostname in local_addresses))
if (not result):
socket.inet_aton(hostname)
with _LOCK:
HOSTS_CACHE[hostname] = result
return result
except socket.error:
if wait:
result = __is_local(hostname)
return result
else:
thread = threading.Thread(target=__is_local, args=(hostname,))
thread.daemon = True
with _LOCK:
HOSTS_CACHE[hostname] = thread
thread.start()
return False |
class EnumFilterInList(FilterInList):
def __init__(self, column, name, options=None, enum_class=None, **kwargs):
self.enum_class = enum_class
super(EnumFilterInList, self).__init__(column, name, options, **kwargs)
def clean(self, value):
values = super(EnumFilterInList, self).clean(value)
if (self.enum_class is not None):
values = [self.enum_class(val) for val in values]
return values |
def unpack_function(file_path, tmp_dir):
meta = {}
for password in PW_LIST:
execution_string = f'fakeroot {UNPACKER_EXECUTABLE} x -y -p{password} -o{tmp_dir} {file_path}'
output = execute_shell_command(execution_string)
meta['output'] = output
if ('Wrong password' not in output):
if ('AES' in output):
meta['password'] = password
break
if ('Wrong password' in meta['output']):
logging.warning(f'Password for {file_path} not found in fact_extractor/unpacker/passwords directory')
return meta |
def upgrade():
op.execute('alter type connectiontype rename to connectiontype_old')
op.execute("create type connectiontype as enum('postgres', 'mongodb', 'mysql', ' 'snowflake', 'redshift', 'mssql', 'mariadb', 'bigquery', 'saas', 'manual', 'email')")
op.execute('alter table connectionconfig alter column connection_type type connectiontype using connection_type::text::connectiontype')
op.execute('drop type connectiontype_old') |
class TpoolLongTests(tests.LimitedTestCase):
TEST_TIMEOUT = 60
def test_a_buncha_stuff(self):
class Dummy():
def foo(self, when, token=None):
assert (token is not None)
time.sleep((random.random() / 200.0))
return token
def sender_loop(loopnum):
obj = tpool.Proxy(Dummy())
count = 100
for n in range(count):
eventlet.sleep((random.random() / 200.0))
now = time.time()
token = ((loopnum * count) + n)
rv = obj.foo(now, token=token)
self.assertEqual(token, rv)
eventlet.sleep((random.random() / 200.0))
cnt = 10
pile = eventlet.GreenPile(cnt)
for i in range(cnt):
pile.spawn(sender_loop, i)
results = list(pile)
self.assertEqual(len(results), cnt)
tpool.killall()
def test_leakage_from_tracebacks(self):
tpool.execute(noop)
gc.collect()
initial_objs = len(gc.get_objects())
for i in range(10):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
gc.collect()
middle_objs = len(gc.get_objects())
for i in range(100):
self.assertRaises(RuntimeError, tpool.execute, raise_exception)
first_created = (middle_objs - initial_objs)
gc.collect()
second_created = (len(gc.get_objects()) - middle_objs)
assert ((second_created - first_created) < 10), 'first loop: {}, second loop: {}'.format(first_created, second_created)
tpool.killall() |
class TestPointsReader(DataReaderTestBase):
def setup_reader(self):
r = PolyDataReader()
r.initialize(get_example_data('points.txt'))
self.e.add_source(r)
self.bounds = (0.0, 1.0, 0.0, 1.0, 0.0, 1.0)
def test_points_data_reader(self):
self.check(self.scene, self.bounds)
def test_save_and_restore(self):
self.check_saving(self.e, self.scene, self.bounds)
def test_deepcopied(self):
self.check_deepcopying(self.scene, self.bounds) |
.backend_config_overwrite({'plugin': {'printable_strings': {'name': 'printable_strings', 'min-length': '4'}}})
.AnalysisPluginTestConfig(plugin_class=AnalysisPlugin)
class TestAnalysisPlugInPrintableStrings():
strings = ['first string', 'second<>_$tring!', 'third:?-+012345/\\string']
offsets = [(3, strings[0]), (21, strings[1]), (61, strings[2])]
def test_process_object(self, analysis_plugin):
fo = FileObject(file_path=os.path.join(TEST_DATA_DIR, 'string_find_test_file2'))
fo = analysis_plugin.process_object(fo)
results = fo.processed_analysis[analysis_plugin.NAME]
for item in self.strings:
assert (item in results['strings']), f'{item} not found'
assert (len(results['strings']) == len(self.strings)), 'number of found strings not correct'
for item in self.offsets:
assert (item in results['offsets']), f'offset {item} not found'
assert (len(results['offsets']) == len(self.offsets)), 'number of offsets not correct'
def test_process_object__no_strings(self, analysis_plugin):
fo = FileObject(file_path=os.path.join(TEST_DATA_DIR, 'string_find_test_file_no_strings'))
fo = analysis_plugin.process_object(fo)
results = fo.processed_analysis[analysis_plugin.NAME]
assert ('strings' in results)
assert ('offsets' in results)
assert (len(results['strings']) == 0), 'number of found strings not correct'
assert (len(results['offsets']) == 0), 'number of offsets not correct'
def test_match_with_offset(self, analysis_plugin):
regex = analysis_plugin.regexes[0][0]
for (test_input, expected_output) in [(b'\xffabcdefghij\xff', [(1, 'abcdefghij')]), (b'!"$%&/()=?+*#-.,\t\n\r', [(0, '!"$%&/()=?+*#-.,\t\n\r')]), (b'\xff\xffabc\xff\xff', []), (b'abcdefghij\xff', [(0, 'abcdefghij'), (11, '')])]:
result = AnalysisPlugin._match_with_offset(regex, test_input)
assert (result == expected_output)
def test_match_with_offset__16bit(self, analysis_plugin):
(regex, encoding) = analysis_plugin.regexes[1]
test_input = b'01234a\x00b\x00c\x00d\x00e\x00f\x00g\x00h\x00i\x00j\x0005678'
result = AnalysisPlugin._match_with_offset(regex, test_input, encoding)
assert (result == [(5, 'abcdefghij')]) |
def test_bundler_inserter(fc_node_builder):
node0 = OperatorNode({}, {'name': 'test0', 'type': 'none'})
node1 = OperatorNode({}, {'name': 'test1', 'type': 'none'})
node2 = OperatorNode({}, {'name': 'test2', 'type': 'none', 'upstream_dependencies': ['test0']})
node3 = OperatorNode({}, {'name': 'test3', 'type': 'none', 'upstream_dependencies': ['test1', 'test2']})
node4 = OperatorNode({}, {'name': 'test4', 'type': 'none', 'upstream_dependencies': ['test2']})
graph = _GraphUtil.build_subgraph([node0, node1, node2, node3, node4])
dep_set_0 = frozenset([node1, node2])
dep_set_1 = frozenset([node0])
bundlers = _GraphUtil.insert_bundler_nodes([node2, node3], graph, fc_node_builder=fc_node_builder)
assert (set(bundlers.keys()) == set([dep_set_0, dep_set_1]))
bundler_0 = bundlers[dep_set_0]
assert (bundler_0.name == 'test1-test2-322ef2-fc')
assert (_GraphUtil.upstream_dependency_set(bundler_0, graph) == dep_set_0)
assert (_GraphUtil.downstream_dependency_set(bundler_0, graph) == frozenset([node3]))
bundler_1 = bundlers[dep_set_1]
assert (bundler_1.name == 'test0-f6f406-fc')
assert (_GraphUtil.upstream_dependency_set(bundler_1, graph) == dep_set_1)
assert (_GraphUtil.downstream_dependency_set(bundler_1, graph) == frozenset([node2])) |
class G2MessageAugmentation(BaseG2Ciphersuite):
DST = b'BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_AUG_'
def Sign(cls, SK: int, message: bytes) -> BLSSignature:
PK = cls.SkToPk(SK)
return cls._CoreSign(SK, (PK + message), cls.DST)
def Verify(cls, PK: BLSPubkey, message: bytes, signature: BLSSignature) -> bool:
return cls._CoreVerify(PK, (PK + message), signature, cls.DST)
def AggregateVerify(cls, PKs: Sequence[BLSPubkey], messages: Sequence[bytes], signature: BLSSignature) -> bool:
if (len(PKs) != len(messages)):
return False
messages = [(pk + msg) for (pk, msg) in zip(PKs, messages)]
return cls._CoreAggregateVerify(PKs, messages, signature, cls.DST) |
def _is_valid_gemm_op(tensor: Tensor, f_check_src_op: Callable) -> bool:
if ((len(tensor.dst_ops()) != 1) or (len(tensor.src_ops()) != 1)):
return False
if tensor._attrs['is_output']:
return False
gemm_op = list(tensor.src_ops())[0]
if (gemm_op._attrs['op'] != 'gemm_rcr_bias'):
return False
(gemm_input, weight, bias) = gemm_op._attrs['inputs']
if (not transform_utils.can_be_constant_folded([weight, bias])):
return False
if ((len(gemm_input.dst_ops()) != 1) or (len(gemm_input.src_ops()) != 1)):
return False
if ((gemm_input._rank() != 2) or (weight._rank() != 2) or (bias._rank() != 1)):
return False
if (not is_static_dimension(gemm_input.shape(), 1)):
return False
if ((not is_static_dimension(weight.shape(), 0)) or (not is_static_dimension(weight.shape(), 1))):
return False
if (not is_static_dimension(bias.shape(), 0)):
return False
src_op = list(gemm_input.src_ops())[0]
if (not f_check_src_op(src_op)):
return False
return True |
(scope='module')
def simple_nonsingular_sparse_mat():
nr = 3
nc = 3
nnz = 7
nzval = np.array([1.0, 2.0, (- 1.0), 2.0, 4.0, 1.0, (- 1.0)], dtype=np.float64)
rowptr = np.array([0, 3, 5, 7], dtype=np.int32)
colind = np.array([0, 1, 2, 0, 1, 1, 2], dtype=np.int32)
A = superluWrappers.SparseMatrix(nr, nc, nnz, nzval, colind, rowptr)
(yield A) |
class Crater(Tagged):
def __init__(self, tags: dict[(str, str)], coordinates: np.ndarray, point: np.ndarray) -> None:
super().__init__(tags)
self.coordinates: np.ndarray = coordinates
self.point: np.ndarray = point
def draw(self, svg: Drawing, flinger: Flinger) -> None:
scale: float = flinger.get_scale(self.coordinates)
assert ('diameter' in self.tags)
radius: float = (float(self.tags['diameter']) / 2.0)
radial_gradient = svg.radialGradient(center=(self.point + np.array((0.0, ((radius * scale) / 7.0)))), r=(radius * scale), gradientUnits='userSpaceOnUse')
color: Color = Color('#000000')
gradient = svg.defs.add(radial_gradient)
gradient.add_stop_color(0.0, color.hex, opacity=0.2).add_stop_color(0.7, color.hex, opacity=0.2).add_stop_color(1.0, color.hex, opacity=1.0)
circle = svg.circle(self.point, (radius * scale), fill=gradient.get_funciri(), opacity=0.2)
svg.add(circle) |
def count_versions(obj):
session = sa.orm.object_session(obj)
if (session is None):
return 0
manager = get_versioning_manager(obj)
table_name = (manager.option(obj, 'table_name') % obj.__table__.name)
criteria = [('%s = %r' % (pk, getattr(obj, pk))) for pk in get_primary_keys(obj)]
query = ('SELECT COUNT(1) FROM %s WHERE %s' % (table_name, ' AND '.join(criteria)))
return session.execute(query).scalar() |
class ITask():
decider: ReplayDecider = None
task: Task = None
status: Status = Status.CREATED
awaited: Future = None
def is_done(self):
return (self.status == Status.DONE)
def destroy(self):
if (self.status == Status.RUNNING):
self.status = Status.DONE
self.task.cancel()
def start(self):
pass
async def await_till(self, c: Callable, timeout_seconds: int=0) -> bool:
timer_cancellation_handler: TimerCancellationHandler = None
timer_fired = False
def timer_callback(ex: Exception):
nonlocal timer_fired
if (not ex):
timer_fired = True
if timeout_seconds:
timer_cancellation_handler = self.decider.decision_context.create_timer(delay_seconds=timeout_seconds, callback=timer_callback)
while ((not c()) and (not timer_fired)):
self.awaited = self.decider.event_loop.create_future()
(await self.awaited)
assert self.awaited.done()
self.awaited = None
if timer_fired:
return False
if timer_cancellation_handler:
timer_cancellation_handler.accept(None)
return True
def unblock(self):
if self.awaited:
self.awaited.set_result(None)
def current() -> ITask:
return current_task.get() |
def extractWondabunnyCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('hi, my sweet little wife', 'Hi, my sweet little wife', 'translated'), ("the idol's spoiled wife", 'The Idols spoiled Wife', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class Proxy():
def __init__(self, url: URLTypes, *, ssl_context: typing.Optional[ssl.SSLContext]=None, auth: typing.Optional[typing.Tuple[(str, str)]]=None, headers: typing.Optional[HeaderTypes]=None) -> None:
url = URL(url)
headers = Headers(headers)
if (url.scheme not in (' ' 'socks5')):
raise ValueError(f'Unknown scheme for proxy URL {url!r}')
if (url.username or url.password):
auth = (url.username, url.password)
url = url.copy_with(username=None, password=None)
self.url = url
self.auth = auth
self.headers = headers
self.ssl_context = ssl_context
def raw_auth(self) -> typing.Optional[typing.Tuple[(bytes, bytes)]]:
return (None if (self.auth is None) else (self.auth[0].encode('utf-8'), self.auth[1].encode('utf-8')))
def __repr__(self) -> str:
auth = ((self.auth[0], '') if self.auth else None)
url_str = f'{str(self.url)!r}'
auth_str = (f', auth={auth!r}' if auth else '')
headers_str = (f', headers={dict(self.headers)!r}' if self.headers else '')
return f'Proxy({url_str}{auth_str}{headers_str})' |
def award_data_fixture(db):
baker.make('search.TransactionSearch', is_fpds=True, transaction_id=1, award_id=1, action_date='2010-10-01', type='A', recipient_location_zip5='abcde', piid='IND12PB00323', recipient_location_county_code='059', recipient_location_state_code='VA', recipient_location_congressional_code='11', recipient_location_country_code='USA', pop_state_code='VA', pop_congressional_code='11', place_of_performance_code='USA', naics_code='331122', product_or_service_code='1510', type_set_aside='8AN', type_of_contract_pricing='2', extent_competed='F')
baker.make('search.TransactionSearch', is_fpds=False, transaction_id=2, award_id=2, action_date='2016-10-01', type='02', fain='P063P100612', cfda_number='84.063')
baker.make('references.ToptierAgency', toptier_agency_id=1, name='Department of Transportation', _fill_optional=True)
baker.make('references.SubtierAgency', subtier_agency_id=1, name='Department of Transportation', _fill_optional=True)
baker.make('references.Agency', id=1, toptier_agency_id=1, subtier_agency_id=1, _fill_optional=True)
baker.make('search.AwardSearch', award_id=1, latest_transaction_id=1, is_fpds=True, type='A', piid='IND12PB00323', display_award_id='IND12PB00323', description='pop tarts and assorted cereals', total_obligation=500000.0, date_signed='2010-10-1', awarding_agency_id=1, funding_agency_id=1, action_date='2010-10-1', recipient_location_county_code='059', recipient_location_state_code='VA', recipient_location_congressional_code='11', recipient_location_country_code='USA', pop_state_code='VA', pop_congressional_code='11', pop_country_code='USA', naics_code='331122', product_or_service_code='1510', type_set_aside='8AN', type_of_contract_pricing='2', extent_competed='F', tas_paths='{aid=097main=4930ata=sub=000bpoa=epoa=a=X}', funding_toptier_agency_name='Department of Transportation', disaster_emergency_fund_codes='{L}')
baker.make('search.AwardSearch', award_id=2, latest_transaction_id=2, is_fpds=False, type='02', fain='P063P100612', display_award_id='P063P100612', cfda_number='84.063', total_obligation=1000000.0, date_signed='2016-10-1', action_date='2016-10-1')
baker.make('accounts.FederalAccount', id=1, parent_toptier_agency_id=1, agency_identifier='1', main_account_code='0001')
baker.make('accounts.TreasuryAppropriationAccount', treasury_account_identifier=1, agency_id='097', main_account_code='4930', federal_account_id=1)
baker.make('submissions.SubmissionAttributes', submission_id=1, reporting_fiscal_year=2020, reporting_fiscal_period=12, reporting_period_start='2020-04-21', reporting_period_end='2020-04-30', quarter_format_flag=True, submission_window_id=2020121)
baker.make('submissions.DABSSubmissionWindowSchedule', id=2020121, submission_fiscal_year=2020, submission_fiscal_month=12, is_quarter=True, period_start_date='2020-04-21', submission_reveal_date='2020-04-30')
code = baker.make('references.DisasterEmergencyFundCode', code='L', group_name='covid_19')
baker.make('awards.FinancialAccountsByAwards', financial_accounts_by_awards_id=1, award_id=1, treasury_account_id=1, submission_id=1, disaster_emergency_fund=code, gross_outlay_amount_by_award_cpe=100, transaction_obligated_amount=100)
baker.make('references.RefCountryCode', country_code='USA', country_name='UNITED STATES') |
('This IE test is not working. We need to move it to using some other win32 API.')
class TestCase(unittest.TestCase):
def setUp(self):
self.ie = CreateObject('InternetExplorer.application')
def tearDown(self):
self.ie.Quit()
del self.ie
def test(self):
ie = self.ie
ie.navigate2('about:blank', 0)
sp = ie.Document.Body.QueryInterface(comtypes.IServiceProvider)
pacc = sp.QueryService(IAccessible._iid_, IAccessible)
self.assertEqual(type(pacc), POINTER(IAccessible)) |
def merge(timings_list, site):
merged_timings = dict()
for timings in timings_list:
divider = '/'
if ('divider' in timings['header']):
divider = timings['header']['divider']
for cell in timings['cells']:
for cell_instance in timings['cells'][cell]:
if (site in cell_instance.split(divider)):
if ('cells' not in merged_timings):
merged_timings['cells'] = dict()
if (cell not in merged_timings['cells']):
merged_timings['cells'][cell] = dict()
if (cell_instance not in merged_timings['cells'][cell]):
merged_timings['cells'][cell][cell_instance] = dict()
if (cell_instance in merged_timings['cells'][cell][cell_instance]):
assert (merged_timings['cells'][cell][cell_instance] == timings['cells'][cell][cell_instance]), 'Attempting to merge differing cells'
merged_timings['cells'][cell][cell_instance] = timings['cells'][cell][cell_instance]
return merged_timings |
def extractBookloversZone(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('juliet marillier' in item['tags']):
return None
tagmap = [('how to escape from an implacable man', 'How to Escape from the Implacable Man', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class ReduceCursor(StmtCursor):
def name(self) -> str:
assert isinstance(self._impl, C.Node)
assert isinstance(self._impl._node, LoopIR.Reduce)
return self._impl._node.name.name()
def idx(self) -> ExprListCursor:
assert isinstance(self._impl, C.Node)
assert isinstance(self._impl._node, LoopIR.Reduce)
return ExprListCursor(self._impl._child_block('idx'), self._proc)
def rhs(self) -> ExprCursor:
assert isinstance(self._impl, C.Node)
assert isinstance(self._impl._node, LoopIR.Reduce)
return self._child_node('rhs') |
def generate_contract_objects(contract_to_build_data, hash_to_func_name):
contract_name_to_contract = {}
lib_name_to_address = {}
utils.get_next_lib_address()
for (contract_name, contract_json_data) in contract_to_build_data.items():
src_code = contract_json_data['source']
runtime_bytecode = contract_json_data['deployedBytecode']
bytecode = contract_json_data['bytecode']
for lib_name in (find_library_names(bytecode) | find_library_names(runtime_bytecode)):
lib_name_to_address.setdefault(lib_name, utils.get_next_lib_address())
abi = contract_json_data['abi']
runtime_src_map = contract_json_data['deployedSourceMap']
src_map = contract_json_data['sourceMap']
contract = SolidityContract(contract_name, abi, bytecode, runtime_bytecode, src_map, runtime_src_map, src_code, hash_to_func_name)
contract_name_to_contract[contract_name] = contract
return (contract_name_to_contract, lib_name_to_address) |
class Common():
def __init__(self):
self.working_dir = 'mega/working_dir'
self.on_heroku = False
self.is_env = bool(os.environ.get('ENV', None))
if self.is_env:
self.tg_app_id = int(os.environ.get('TG_APP_ID'))
self.tg_api_key = os.environ.get('TG_API_HASH')
self.bot_session = ':memory:'
self.bot_api_key = os.environ.get('TG_BOT_TOKEN')
self.bot_dustbin = int(os.environ.get('TG_DUSTBIN_CHAT', '-100'))
self.allowed_users = ast.literal_eval(os.environ.get('ALLOWED_USERS', '[]'))
self.is_atlas = os.environ.get('IS_ATLAS', None)
self.db_host = os.environ.get('DATABASE_DB_HOST')
self.db_username = os.environ.get('DATABASE_DB_USERNAME')
self.db_password = os.environ.get('DATABASE_DB_PASSWORD')
self.db_name = os.environ.get('DATABASE_DB_NAME')
self.web_port = os.environ.get('WEB_SERVER_PORT', 8080)
if ('DYNO' in os.environ):
self.on_heroku = True
self.web_port = os.getenv('PORT', 8080)
self.web_bind_address = os.environ.get('WEB_SERVER_BIND_ADDRESS', '0.0.0.0')
self.web_fqdn = os.environ.get('WEB_SERVER_FQDN', self.web_bind_address)
else:
self.app_config = configparser.ConfigParser()
self.app_config_file = 'mega/working_dir/config.ini'
self.app_config.read(self.app_config_file)
self.tg_app_id = int(self.app_config.get('pyrogram', 'api_id'))
self.tg_api_key = self.app_config.get('pyrogram', 'api_hash')
self.bot_session = self.app_config.get('bot-configuration', 'session')
self.bot_api_key = self.app_config.get('bot-configuration', 'api_key')
self.bot_dustbin = int(self.app_config.get('bot-configuration', 'dustbin'))
self.allowed_users = ast.literal_eval(self.app_config.get('bot-configuration', 'allowed_users', fallback='[]'))
self.is_atlas = self.app_config.getboolean('database', 'is_atlas', fallback=False)
self.db_host = self.app_config.get('database', 'db_host')
self.db_username = self.app_config.get('database', 'db_username', fallback=None)
self.db_password = self.app_config.get('database', 'db_password', fallback=None)
self.db_name = self.app_config.get('database', 'db_name')
self.web_bind_address = self.app_config.get('web_server', 'bind_address', fallback='0.0.0.0')
self.web_port = int(self.app_config.get('web_server', 'port', fallback=8080))
self.web_fqdn = self.app_config.get('web_server', 'fqdn', fallback=self.web_bind_address) |
def test_encode_structured_data_ignores_additional_data_in_a_custom_type():
message_with_additonal_data = {'types': {'EIP712Domain': [{'name': 'name', 'type': 'string'}], 'Person': [{'name': 'name', 'type': 'string'}]}, 'primaryType': 'Person', 'domain': {'name': 'Name'}, 'message': {'name': 'Bob', 'pet': {'animal': 'cat', 'age': 3}}}
message_without_additonal_data = deepcopy(message_with_additonal_data)
message_without_additonal_data['message'].pop('pet')
assert (message_without_additonal_data['message'].get('pet') is None)
assert (encode_structured_data(message_with_additonal_data) == encode_structured_data(message_without_additonal_data)) |
(name='info')
_errors
('name')
_option
_options(openid_options)
_option
_option
def info_release(name: str, url: str, id_provider: str, client_id: str, **kwargs):
client = bindings.BodhiClient(base_url=url, client_id=client_id, id_provider=id_provider, staging=kwargs['staging'])
res = client.send_request(f'releases/{name}', verb='GET', auth=False)
if ('errors' in res):
print_errors(res)
else:
click.echo('Release:')
print_release(res) |
def _get_highlights(build, coverage_eval):
results = {'statements': {}, 'branches': {}}
for (name, eval_) in coverage_eval.items():
try:
coverage_map = build.get(name)['coverageMap']
except KeyError:
continue
results['statements'][name] = _statement_highlights(eval_, coverage_map['statements'])
results['branches'][name] = _branch_highlights(eval_, coverage_map['branches'])
return results |
class OptionSeriesTreegraphSonificationContexttracksMappingPan(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class OptionPlotoptionsCylinderSonificationDefaultinstrumentoptionsMappingLowpass(Options):
def frequency(self) -> 'OptionPlotoptionsCylinderSonificationDefaultinstrumentoptionsMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionPlotoptionsCylinderSonificationDefaultinstrumentoptionsMappingLowpassFrequency)
def resonance(self) -> 'OptionPlotoptionsCylinderSonificationDefaultinstrumentoptionsMappingLowpassResonance':
return self._config_sub_data('resonance', OptionPlotoptionsCylinderSonificationDefaultinstrumentoptionsMappingLowpassResonance) |
class OptionSeriesBubbleStatesInactive(Options):
def animation(self) -> 'OptionSeriesBubbleStatesInactiveAnimation':
return self._config_sub_data('animation', OptionSeriesBubbleStatesInactiveAnimation)
def enabled(self):
return self._config_get(True)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def opacity(self):
return self._config_get(0.2)
def opacity(self, num: float):
self._config(num, js_type=False) |
def test_bad_creation() -> None:
with pytest.raises(ValueError):
staticmaps.Line([])
with pytest.raises(ValueError):
staticmaps.Line([staticmaps.create_latlng(48, 8)])
with pytest.raises(ValueError):
staticmaps.Line([staticmaps.create_latlng(48, 8), staticmaps.create_latlng(49, 9)], width=(- 123)) |
class TrainConfig(WorkflowConfig):
output: str
checkpoint: (str | None)
seed: int
debug: bool
n_steps: int
validation: dict[(str, ValidationConfig)]
fg_losses: dict[(str, ObjectConfig)]
bg_losses: dict[(str, ObjectConfig)]
scheduler: SchedulerConfig
save_checkpoint: CheckpointConfig
save_pretrain_checkpoint: bool
save_final_checkpoint: bool
load_fg: bool
load_bg: bool
reset_global_step: bool
reset_bg_optimization: bool |
class Generator(AbstractODSGenerator):
OUTPUT_FILE: str = 'open_positions.ods'
HEADER_ROWS = 3
__legend: List[List[str]] = []
__asset_header_names_row_1: List[str] = []
__asset_header_names_row_2: List[str] = []
__asset_exchange_header_names_row_1: List[str] = []
__asset_exchange_header_names_row_2: List[str] = []
__input_header_names_row_1: List[str] = []
__input_header_names_row_2: List[str] = []
def _setup_text_data(self, country: AbstractCountry) -> None:
currency_code: str = country.currency_iso_code.upper()
self.__legend: List[List[str]] = [[_('Open Positions / Unrealized Gains')], [_('Fill in Asset Prices in the Input Tab for calculations')], [_('This report leverages the details of your transactions to give you a snapshot of the value of your unsold holdings')], [_('This file contains two versions of the same report. One version is grouped by Asset, the other by Asset and Exchange (or wallet)')], [''], [_('General')], [_('Accounting Method')], [_('From Date Filter')], [_('To Date Filter')], [''], [_('Report Fields')], [_('Asset'), _('Crypto / asset symbol')], [_('Holder'), _('Asset holder')], [_('Exchange'), _('Exchange or wallet where balance is held (Asset - Exchange tab only)')], [_('Crypto Balance'), _('Amount of given asset held')], [_('{} Per Unit Cost Basis').format(currency_code), _('Sum of fiat cost to acquire the balance of crypto based on in-flow transactions divided by the crypto balance')], [_('{} Unrealized Cost Basis').format(currency_code), _('Sum of fiat cost to acquire the balance of crypto based on in-flow transactions')], [_('Cost Basis Weight %'), _("Size of investment relative to other assets for cost basis ({} Unrealized Cost Basis divided by the sum of all assets' {} Unrealized Cost Basis)").format(currency_code, currency_code)], [_('{} Per Unit Input Price').format(currency_code), _('Looked up from the Input tab for the given asset')], [_('{} Unrealized Value').format(currency_code), _('Crypto Balance * Input Price')], [_('{} Unrealized Gain / Loss').format(currency_code), _('Unrealized Value - Unrealized Cost Basis')], [_('{} Unrealized Gain / Loss %').format(currency_code), _('Percent change between Unrealized Value and Unrealized Cost Basis')], [_('Unrealized G/L of Total Cost Basis %'), _("Indicates the percent of which this asset's unrealized gain/loss contributed to the whole portfolio's Unrealized Gain / Loss % (Gain/Loss relative to total cash investment)")], [_('{} Unrealized Value Weight %').format(currency_code), _("Size of investment relative to the portfolio's unrealized value (Unrealized Value divided by sum of all assets' Unrealized Values)")], [''], [_('Color Code')], [_('Gray'), _('Information from transactions')], [_('Yellow'), _('Information calculated based on values entered in the Input tab')]]
self.__asset_header_names_row_1: List[str] = ['', '', _('Crypto'), _('{} Per Unit').format(currency_code), _('{} Unrealized').format(currency_code), _('Cost Basis'), _('{} Per Unit').format(currency_code), _('{} Unrealized').format(currency_code), _('{} Unrealized').format(currency_code), _('{} Unrealized').format(currency_code), _('Unrealized G/L of'), _('{} Unrealized').format(currency_code)]
self.__asset_header_names_row_2: List[str] = [_('Asset'), _('Holder'), _('Balance'), _('Cost Basis'), _('Cost Basis'), _('Weight %'), _('Input Price'), _('Value'), _('Gain / Loss'), _('Gain / Loss %'), _('Total Cost Basis %'), _('Value Weight %')]
self.__asset_exchange_header_names_row_1: List[str] = ['', '', '', _('Crypto'), _('{} Per Unit').format(currency_code), _('{} Unrealized').format(currency_code), _('Cost Basis'), _('{} Per Unit').format(currency_code), _('{} Unrealized').format(currency_code), _('{} Unrealized').format(currency_code), _('{} Unrealized').format(currency_code), _('Unrealized G/L of'), _('{} Unrealized').format(currency_code)]
self.__asset_exchange_header_names_row_2: List[str] = [_('Asset'), _('Holder'), _('Exchange'), _('Balance'), _('Cost Basis'), _('Cost Basis'), _('Weight %'), _('Input Price'), _('Value'), _('Gain / Loss'), _('Gain / Loss %'), _('Total Cost Basis %'), _('Value Weight %')]
self.__input_header_names_row_1: List[str] = [_('Crypto'), '']
self.__input_header_names_row_2: List[str] = [_('Asset'), _('Price')]
def generate(self, country: AbstractCountry, years_2_accounting_method_names: Dict[(int, str)], asset_to_computed_data: Dict[(str, ComputedData)], output_dir_path: str, output_file_prefix: str, from_date: date, to_date: date, generation_language: str) -> None:
if (not isinstance(asset_to_computed_data, Dict)):
raise RP2TypeError(f"Parameter 'asset_to_computed_data' has non-Dict value {asset_to_computed_data}")
self._setup_text_data(country)
template_path: str = self._get_template_path('open_positions', country, generation_language)
output_file: Any
output_file = self._initialize_output_file(country=country, legend_data=self.__legend, years_2_accounting_method_names=years_2_accounting_method_names, output_dir_path=output_dir_path, output_file_prefix=output_file_prefix, output_file_name=self.OUTPUT_FILE, template_path=template_path, template_sheets_to_keep=_TEMPLATE_SHEETS_TO_KEEP, from_date=from_date, to_date=to_date)
asset: str
computed_data: ComputedData
asset_sheet = output_file.sheets[_ASSET]
asset_exchange_sheet = output_file.sheets[_ASSET_EXCHANGE]
input_sheet = output_file.sheets[_INPUT]
self._fill_header(_('Open Positions by Asset'), self.__asset_header_names_row_1, self.__asset_header_names_row_2, asset_sheet, 0, 0, apply_style=False)
self._fill_cell(asset_sheet, 0, 6, _('ENTER PRICES ON INPUT TAB'), apply_style=False)
self._fill_header(_('Open Positions by Asset and Exchange'), self.__asset_exchange_header_names_row_1, self.__asset_exchange_header_names_row_2, asset_exchange_sheet, 0, 0, apply_style=False)
self._fill_cell(asset_exchange_sheet, 0, 7, _('ENTER PRICES ON INPUT TAB'), apply_style=False)
self._fill_header(_('Asset Price Lookup Table'), self.__input_header_names_row_1, self.__input_header_names_row_2, input_sheet, 0, 0, apply_style=False)
row_indexes: Dict[(str, int)] = {sheet_name: self.HEADER_ROWS for sheet_name in _TEMPLATE_SHEETS}
total_cost_basis = ZERO
asset_cost_bases: Dict[(str, RP2Decimal)] = {}
holders: List[str] = []
asset_crypto_balance_holder: Dict[(str, Dict[(str, RP2Decimal)])] = {}
asset_crypto_balance_holder_exchange: Dict[(str, Dict[(str, Dict[(str, RP2Decimal)])])] = {}
for (asset, computed_data) in asset_to_computed_data.items():
if (not isinstance(asset, str)):
raise RP2TypeError(f"Parameter 'asset' has non-string value {asset}")
ComputedData.type_check('computed_data', computed_data)
for current_transaction in computed_data.in_transaction_set:
in_transaction = cast(InTransaction, current_transaction)
sold_percent: RP2Decimal = computed_data.get_in_lot_sold_percentage(in_transaction)
transaction_cost_basis: RP2Decimal = (in_transaction.fiat_in_with_fee * (RP2Decimal('1') - sold_percent))
if (transaction_cost_basis > ZERO):
value = asset_cost_bases.setdefault(asset, ZERO)
value += transaction_cost_basis
asset_cost_bases[asset] = value
total_cost_basis += transaction_cost_basis
for balance_set in computed_data.balance_set:
if (balance_set.final_balance > ZERO):
if (balance_set.holder not in holders):
holders.append(balance_set.holder)
if (asset not in asset_crypto_balance_holder):
asset_crypto_balance_holder[asset] = {}
asset_crypto_balance_holder_exchange[asset] = {}
if (balance_set.holder not in asset_crypto_balance_holder[asset]):
asset_crypto_balance_holder[asset][balance_set.holder] = ZERO
asset_crypto_balance_holder_exchange[asset][balance_set.holder] = {}
asset_crypto_balance_holder[asset][balance_set.holder] += balance_set.final_balance
if (balance_set.exchange not in asset_crypto_balance_holder_exchange[asset][balance_set.holder]):
asset_crypto_balance_holder_exchange[asset][balance_set.holder][balance_set.exchange] = balance_set.final_balance
for (asset, asset_cost_basis) in asset_cost_bases.items():
total_crypto_balance = ZERO
for crypto_balance in asset_crypto_balance_holder[asset].values():
total_crypto_balance += crypto_balance
unit_cost_basis: RP2Decimal = (asset_cost_basis / total_crypto_balance)
unit_data_style: str = 'fiat'
if (_FIAT_UNIT_DATA_STYLE_4_DECIMAL_MINIMUM <= unit_cost_basis < _FIAT_UNIT_DATA_STYLE_2_DECIMAL_MINIMUM):
unit_data_style = 'fiat_unit_4'
elif (unit_cost_basis < _FIAT_UNIT_DATA_STYLE_4_DECIMAL_MINIMUM):
unit_data_style = 'fiat_unit_7'
input_sheet.append_rows(1)
input_row_index: int = row_indexes[_INPUT]
self._fill_cell(input_sheet, input_row_index, 0, asset)
self._fill_cell(input_sheet, input_row_index, 1, _INPUT_VALUE_STRING, data_style='fiat_unit_7')
row_indexes[_INPUT] = (input_row_index + 1)
_vlookup_formula: str = ''
_lookup_field: str = ''
for (holder, holder_crypto_balance) in asset_crypto_balance_holder[asset].items():
holder_cost_basis: RP2Decimal = (holder_crypto_balance * unit_cost_basis)
asset_sheet.append_rows(1)
asset_row_index: int = row_indexes[_ASSET]
_vlookup_formula = f"VLOOKUP(A{(asset_row_index + 1)};${_('Input')}.A:B;2;0)"
_lookup_field = f'=IF({_vlookup_formula}="{_INPUT_VALUE_STRING}";"{_REPORT_INPUT_VALUE_STRING}";{_vlookup_formula}'
self._fill_cell(asset_sheet, asset_row_index, 0, asset)
self._fill_cell(asset_sheet, asset_row_index, 1, holder)
self._fill_cell(asset_sheet, asset_row_index, 2, holder_crypto_balance, data_style='crypto')
self._fill_cell(asset_sheet, asset_row_index, 3, unit_cost_basis, data_style=unit_data_style)
self._fill_cell(asset_sheet, asset_row_index, 4, holder_cost_basis, data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 5, (holder_cost_basis / total_cost_basis), data_style='percent')
self._fill_cell(asset_sheet, asset_row_index, 6, _lookup_field, data_style=unit_data_style)
self._fill_cell(asset_sheet, asset_row_index, 7, f'=C{(asset_row_index + 1)}*G{(asset_row_index + 1)}', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 8, f'=H{(asset_row_index + 1)}-E{(asset_row_index + 1)}', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 9, f'=(H{(asset_row_index + 1)}-E{(asset_row_index + 1)})/E{(asset_row_index + 1)}', data_style='percent')
row_indexes[_ASSET] = (asset_row_index + 1)
for (holder, exchanges) in asset_crypto_balance_holder_exchange[asset].items():
for (exchange, crypto_exchange_balance) in exchanges.items():
exchange_cost_basis: RP2Decimal = (crypto_exchange_balance * unit_cost_basis)
asset_exchange_sheet.append_rows(1)
asset_exchange_row_index: int = row_indexes[_ASSET_EXCHANGE]
_vlookup_formula = f"VLOOKUP(A{(asset_exchange_row_index + 1)};${_('Input')}.A:B;2;0)"
_lookup_field = f'=IF({_vlookup_formula}="{_INPUT_VALUE_STRING}";"{_REPORT_INPUT_VALUE_STRING}";{_vlookup_formula}'
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 0, asset)
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 1, holder)
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 2, exchange)
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 3, crypto_exchange_balance, data_style='crypto')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 4, unit_cost_basis, data_style=unit_data_style)
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 5, exchange_cost_basis, data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 6, (exchange_cost_basis / total_cost_basis), data_style='percent')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 7, _lookup_field, data_style=unit_data_style)
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 8, f'=D{(asset_exchange_row_index + 1)}*H{(asset_exchange_row_index + 1)}', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 9, f'=I{(asset_exchange_row_index + 1)}-F{(asset_exchange_row_index + 1)}', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 10, f'=(I{(asset_exchange_row_index + 1)}-F{(asset_exchange_row_index + 1)})/F{(asset_exchange_row_index + 1)}', data_style='percent')
row_indexes[_ASSET_EXCHANGE] = (asset_exchange_row_index + 1)
asset_row_index = row_indexes[_ASSET]
for row_idx in range(self.HEADER_ROWS, row_indexes[_ASSET]):
self._fill_cell(asset_sheet, row_idx, 10, f'=I{(row_idx + 1)}/SUM(E${(self.HEADER_ROWS + 1)}:E${asset_row_index})', data_style='percent')
self._fill_cell(asset_sheet, row_idx, 11, f'=H{(row_idx + 1)}/SUM(H${(self.HEADER_ROWS + 1)}:H${asset_row_index})', data_style='percent')
asset_exchange_row_index = row_indexes[_ASSET_EXCHANGE]
for row_idx in range(self.HEADER_ROWS, row_indexes[_ASSET_EXCHANGE]):
self._fill_cell(asset_exchange_sheet, row_idx, 11, f'=J{(row_idx + 1)}/SUM(F${(self.HEADER_ROWS + 1)}:F${asset_exchange_row_index})', data_style='percent')
self._fill_cell(asset_exchange_sheet, row_idx, 12, f'=I{(row_idx + 1)}/SUM(I${(self.HEADER_ROWS + 1)}:I${asset_exchange_row_index})', data_style='percent')
last_data_row_indexes = row_indexes.copy()
if (len(holders) > 1):
for holder in holders:
asset_sheet.append_rows(1)
asset_row_index = row_indexes[_ASSET]
last_data_index = last_data_row_indexes[_ASSET]
self._fill_cell(asset_sheet, asset_row_index, 0, _('Total'), visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 1, holder, visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 2, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 3, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 4, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";E${(self.HEADER_ROWS + 1)}:E${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 5, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 6, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 7, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";H${(self.HEADER_ROWS + 1)}:H${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 8, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";I${(self.HEADER_ROWS + 1)}:I${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 9, f'=(H{(asset_row_index + 1)}-E{(asset_row_index + 1)})/E{(asset_row_index + 1)}', visual_style='bold_border', data_style='percent')
self._fill_cell(asset_sheet, asset_row_index, 10, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 11, '', visual_style='bold_border')
row_indexes[_ASSET] = (asset_row_index + 1)
asset_sheet.append_rows(1)
asset_row_index = row_indexes[_ASSET]
last_data_index = last_data_row_indexes[_ASSET]
self._fill_cell(asset_sheet, asset_row_index, 0, _('Grand Total'), visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 1, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 2, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 3, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 4, f'=SUM(E${(self.HEADER_ROWS + 1)}:E${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 5, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 6, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 7, f'=SUM(H${(self.HEADER_ROWS + 1)}:H${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 8, f'=SUM(I${(self.HEADER_ROWS + 1)}:I${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_sheet, asset_row_index, 9, f'=(H{(asset_row_index + 1)}-E{(asset_row_index + 1)})/E{(asset_row_index + 1)}', visual_style='bold_border', data_style='percent')
self._fill_cell(asset_sheet, asset_row_index, 10, '', visual_style='bold_border')
self._fill_cell(asset_sheet, asset_row_index, 11, '', visual_style='bold_border')
row_indexes[_ASSET] = (asset_row_index + 1)
if (len(holders) > 1):
for holder in holders:
asset_exchange_sheet.append_rows(1)
asset_exchange_row_index = row_indexes[_ASSET_EXCHANGE]
last_data_index = last_data_row_indexes[_ASSET_EXCHANGE]
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 0, _('Total'), visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 1, holder, visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 2, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 3, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 4, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 5, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";F${(self.HEADER_ROWS + 1)}:F${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 6, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 7, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 8, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";I${(self.HEADER_ROWS + 1)}:I${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 9, f'=SUMIF(B${(self.HEADER_ROWS + 1)}:B${last_data_index};"{holder}";J${(self.HEADER_ROWS + 1)}:J${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 10, f'=(I{(asset_exchange_row_index + 1)}-F{(asset_exchange_row_index + 1)})/F{(asset_exchange_row_index + 1)}', visual_style='bold_border', data_style='percent')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 11, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 12, '', visual_style='bold_border')
row_indexes[_ASSET_EXCHANGE] = (asset_exchange_row_index + 1)
asset_exchange_sheet.append_rows(1)
asset_exchange_row_index = row_indexes[_ASSET_EXCHANGE]
last_data_index = last_data_row_indexes[_ASSET_EXCHANGE]
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 0, _('Grand Total'), visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 1, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 2, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 3, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 4, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 5, f'=SUM(F${(self.HEADER_ROWS + 1)}:F${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 6, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 7, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 8, f'=SUM(I${(self.HEADER_ROWS + 1)}:I${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 9, f'=SUM(J${(self.HEADER_ROWS + 1)}:J${last_data_index})', visual_style='bold_border', data_style='fiat')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 10, f'=(I{(asset_exchange_row_index + 1)}-F{(asset_exchange_row_index + 1)})/F{(asset_exchange_row_index + 1)}', visual_style='bold_border', data_style='percent')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 11, '', visual_style='bold_border')
self._fill_cell(asset_exchange_sheet, asset_exchange_row_index, 12, '', visual_style='bold_border')
row_indexes[_ASSET_EXCHANGE] = (asset_exchange_row_index + 1)
asset_sheet.name = _('Asset')
asset_exchange_sheet.name = _('Asset - Exchange')
input_sheet.name = _('Input')
output_file.save()
LOGGER.info("Plugin '%s' output: %s", __name__, Path(output_file.docname).resolve()) |
def test_underscore_execute_fall_back_remote_attributes(remote, mock_wf_exec):
mock_wf_exec.return_value = True
mock_client = MagicMock()
remote._client = mock_client
options = Options(raw_output_data_config=common_models.RawOutputDataConfig(output_location_prefix='raw_output'), security_context=security.SecurityContext(run_as=security.Identity(iam_role='iam:some:role')))
def local_assertions(*args, **kwargs):
execution_spec = args[3]
assert (execution_spec.security_context.run_as.iam_role == 'iam:some:role')
assert (execution_spec.raw_output_data_config.output_location_prefix == 'raw_output')
mock_client.create_execution.side_effect = local_assertions
mock_entity = MagicMock()
remote._execute(mock_entity, inputs={}, project='proj', domain='dev', options=options) |
def process_specimen(fasm_file, params_json):
(sites, diff_tiles) = create_sites_from_fasm(fasm_file)
with open(params_json) as f:
params = json.load(f)
count = 0
for p in params['tiles']:
tile = p['tile']
for site in p['site'].split(' '):
site_y = (int(site[(site.find('Y') + 1):]) % 2)
if generate.skip_broken_tiles(p):
continue
site_key = 'IOB_Y{}'.format(site_y)
if ((tile, site_key) not in sites):
assert (p['type'] is None), p
continue
site_from_fasm = sites[(tile, site_key)]
if ((site_y == 0) or (tile not in diff_tiles)):
assert (p['type'] in site_from_fasm['type']), (tile, site_key, p['type'], site_from_fasm['type'])
else:
assert (p['type'] is None), p
if (p['type'] is None):
continue
assert ('PULLTYPE' in p), p
assert ('PULLTYPE' in site_from_fasm), site_from_fasm
if (verilog.unquote(p['PULLTYPE']) == ''):
pulltype = verilog.quote('NONE')
else:
pulltype = p['PULLTYPE']
assert (pulltype == site_from_fasm['PULLTYPE']), (tile, site_key, p, site_from_fasm)
assert ('IOSTANDARDS' in site_from_fasm), (tile, site)
iostandard = verilog.unquote(p['IOSTANDARD'])
if iostandard.startswith('DIFF_'):
iostandard = iostandard[5:]
assert (iostandard in site_from_fasm['IOSTANDARDS']), (p['IOSTANDARD'], site_from_fasm['IOSTANDARDS'])
if (p['type'] not in ['IBUF', 'IBUFDS']):
if (verilog.unquote(p['SLEW']) == ''):
slew = verilog.quote('SLOW')
else:
slew = p['SLEW']
assert (slew == site_from_fasm['SLEW']), (tile, site_key, p, site_from_fasm)
assert ('DRIVES' not in p), p
assert ('DRIVES' in site_from_fasm), (tile, site, p['type'], site_from_fasm)
if (p['DRIVE'] is None):
assert (None in site_from_fasm['DRIVES']), (tile, site_key, p['DRIVE'], site_from_fasm['DRIVES'])
elif (p['DRIVE'] == ''):
if (None in site_from_fasm['DRIVES']):
pass
else:
assert ('I12' in site_from_fasm['DRIVES']), (tile, site_key, p['DRIVE'], site_from_fasm['DRIVES'])
else:
assert ('I{}'.format(p['DRIVE']) in site_from_fasm['DRIVES']), (tile, site_key, p['DRIVE'], site_from_fasm['DRIVES'])
count += 1
return count |
class UpDownButtonDelegate(QItemDelegate):
clicked = pyqtSignal(int, QtCore.QModelIndex)
UP = (- 1)
DOWN = 1
def paint(self, painter, option, index):
if (isinstance(self.parent(), QAbstractItemView) and (self.parent().model() is index.model())):
self.parent().openPersistentEditor(index)
def createEditor(self, parent, option, index):
w = QWidget(parent)
w.setContentsMargins(0, 0, 0, 0)
w.setAutoFillBackground(True)
layout = QVBoxLayout(w)
layout.setContentsMargins(0, 0, 0, 0)
btnUp = QPushButton(parent)
btnUp.setText('')
btnUp.setFlat(True)
btnUp.clicked.connect((lambda : self._cb_button_clicked(self.UP, index)))
btnDown = QPushButton(parent)
btnDown.setText('')
btnDown.setFlat(True)
btnDown.clicked.connect((lambda : self._cb_button_clicked(self.DOWN, index)))
layout.addWidget(btnUp)
layout.addWidget(btnDown)
return w
def _cb_button_clicked(self, action, idx):
self.clicked.emit(action, idx)
def updateEditorGeometry(self, editor, option, index):
rect = QtCore.QRect(option.rect)
minWidth = editor.minimumSizeHint().width()
if (rect.width() < minWidth):
rect.setWidth(minWidth)
editor.setGeometry(rect)
mask = QRegion(0, 0, option.rect.width(), option.rect.height())
editor.setProperty('offMask', mask)
editor.setMask(mask) |
def get_episodes(html, url):
id = re.search('manhua/([^/]+)', url).group(1)
pattern = '<a href="(/manhua/{}/\\d+.html)"[^>]+?>\\s*<span>([^<]+)'.format(id)
def create_ep(match):
(ep_url, title) = [unescape(t) for t in match.groups()]
return Episode(title, urljoin(url, ep_url))
return [create_ep(m) for m in re.finditer(pattern, html)] |
class DynamicGroup(lg.Group):
_connections: List[List[str]] = []
_configs: dict = {}
def add_node(cls, name: str, _type: type, connection: List[str]=None, config: lg.Config=None) -> None:
setattr(cls, name, None)
cls.__annotations__[name] = _type
cls.__children_types__[name] = _type
if connection:
cls._connections.append(connection)
if config:
cls._configs[name] = config
def add_connection(cls, connection: List[str]) -> None:
cls._connections.append(connection)
def add_topic(cls, name: str, topic: lg.Topic) -> None:
setattr(cls, name, topic)
def setup(self) -> None:
for key in type(self)._configs:
self.__getattribute__(key).configure(type(self)._configs[key])
def connections(self) -> lg.Connections:
cons = []
for con_list in type(self)._connections:
node1: lg.Node = self.__getattribute__(con_list[0])
node2: lg.Node = self.__getattribute__(con_list[2])
cons.append((node1.__getattribute__(con_list[1]), node2.__getattribute__(con_list[3])))
return tuple(cons) |
class AbstractGroupForumPermission(BaseAuthForumPermission):
group = models.ForeignKey(Group, on_delete=models.CASCADE, verbose_name=_('Group'))
class Meta():
abstract = True
unique_together = ('permission', 'forum', 'group')
app_label = 'forum_permission'
verbose_name = _('Group forum permission')
verbose_name_plural = _('Group forum permissions')
def __str__(self):
if self.forum:
return '{} - {} - {}'.format(self.permission, self.group, self.forum)
return '{} - {}'.format(self.permission, self.group) |
.parametrize('cabin', [None, 'letter_only', 'drop'])
def test_cabin(cabin):
data = load_titanic(cabin=None)
assert ('cabin' in data.columns)
assert (list(data['cabin'].head(4).values) == ['B5', 'C22 C26', 'C22 C26', 'C22 C26'])
data = load_titanic(cabin='letter_only')
assert (list(data['cabin'].head(4).values) == ['B', 'C', 'C', 'C'])
data = load_titanic(cabin='drop')
assert ('cabin' not in data.columns) |
class ConstantShrinkWidgets(QtWidgets.QDialog):
_DEFAULT_WINDOW_WIDTH = 500
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.setModal(False)
self.setWindowTitle('constant shrink')
self.initUI()
def initUI(self):
self.setFixedWidth(self._DEFAULT_WINDOW_WIDTH)
set_font(self, font_size=BASE_FONT_SIZE)
base_layout = QtWidgets.QVBoxLayout()
layout = QtWidgets.QFormLayout()
layout.setLabelAlignment(QtCore.Qt.AlignRight)
lbl_mode = QtWidgets.QLabel('mode')
set_font(lbl_mode, font_size=LARGE_FONT_SIZE, bold=True)
self.cmb_mode = QtWidgets.QComboBox()
for m in MODE:
self.cmb_mode.addItem(m)
lbl_forced_extraction_op_names = QtWidgets.QLabel('forced_extraction_op_names')
set_font(lbl_forced_extraction_op_names, font_size=LARGE_FONT_SIZE, bold=True)
self.tb_forced_extraction_op_names = QtWidgets.QLineEdit()
self.tb_forced_extraction_op_names.setPlaceholderText("e.g. ['aaa','bbb','ccc']")
lbl_forced_extraction_constant_names = QtWidgets.QLabel('forced_extraction_constant_names')
set_font(lbl_forced_extraction_constant_names, font_size=LARGE_FONT_SIZE, bold=True)
self.tb_forced_extraction_constant_names = QtWidgets.QLineEdit()
self.tb_forced_extraction_constant_names.setPlaceholderText("e.g. ['aaa','bbb','ccc']")
layout.addRow(lbl_mode, self.cmb_mode)
layout.addRow(lbl_forced_extraction_op_names, self.tb_forced_extraction_op_names)
layout.addRow(lbl_forced_extraction_constant_names, self.tb_forced_extraction_constant_names)
layout2 = QtWidgets.QVBoxLayout()
self.check_auto_downcast = QtWidgets.QCheckBox('auto_downcast')
self.check_auto_downcast.setChecked(True)
layout2.addWidget(self.check_auto_downcast)
layout2.setAlignment(self.check_auto_downcast, QtCore.Qt.AlignRight)
base_layout.addLayout(layout)
base_layout.addLayout(layout2)
btn = QtWidgets.QDialogButtonBox((QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel))
btn.accepted.connect(self.accept)
btn.rejected.connect(self.reject)
base_layout.addWidget(btn)
self.setLayout(base_layout)
def get_properties(self) -> ConstantShrinkProperties:
mode = self.cmb_mode.currentText()
forced_extraction_op_names = []
forced_extraction_constant_names = []
disable_auto_downcast = (not self.check_auto_downcast.isChecked())
op_names = self.tb_forced_extraction_op_names.text()
if op_names:
try:
forced_extraction_op_names = literal_eval(op_names)
except Exception as e:
raise e
constant_names = self.tb_forced_extraction_constant_names.text()
if constant_names:
try:
forced_extraction_constant_names = literal_eval(constant_names)
except Exception as e:
raise e
return ConstantShrinkProperties(mode=mode, forced_extraction_op_names=forced_extraction_op_names, forced_extraction_constant_names=forced_extraction_constant_names, disable_auto_downcast=disable_auto_downcast)
def accept(self) -> None:
invalid = False
try:
props = self.get_properties()
print(props)
err_msgs = []
except Exception as e:
print(e)
return
if (not (props.mode in MODE)):
err_msgs.append(f"- mode is select from {'or'.join(MODE)}")
invalid = True
if invalid:
for m in err_msgs:
print(m)
MessageBox.error(err_msgs, 'constant shrink', parent=self)
return
return super().accept() |
class WeevelyFormatter(logging.Formatter):
FORMATS = {logging.DEBUG: '[D][%(module)s] %(message)s', logging.INFO: '%(message)s', logging.WARNING: '[-][%(module)s] %(message)s', logging.ERROR: '[!][%(module)s] %(message)s', logging.CRITICAL: '[!][%(module)s] %(message)s', 'DEFAULT': '[%(levelname)s] %(message)s'}
def format(self, record):
self._fmt = self.FORMATS.get(record.levelno, self.FORMATS['DEFAULT'])
return logging.Formatter.format(self, record) |
def radio_menu_item(name: str, after, display_name: str, groupname, selected_func, callback) -> 'RadioMenuItem':
def factory(menu, parent, context):
for (index, item) in enumerate(menu._items):
if (hasattr(item, 'groupname') and (item.groupname == groupname)):
break
else:
index = None
if (index is not None):
try:
group_parent = menu.get_children()[index]
if (not isinstance(group_parent, Gtk.RadioMenuItem)):
group_parent = None
except IndexError:
group_parent = None
if group_parent:
group = group_parent.get_group()
else:
group = None
item = Gtk.RadioMenuItem.new_with_mnemonic(group, display_name)
active = selected_func(name, parent, context)
item.set_active(active)
item.connect('activate', callback, name, parent, context)
return item
return RadioMenuItem(name, factory, after=after, groupname=groupname) |
def __nfur_func(topology, edges, betweenness):
nfur = betweenness.copy()
topology = topology.copy()
for (u, v) in edges:
edge_attr = topology.adj[u][v]
topology.remove_edge(u, v)
betw = nx.betweenness_centrality(topology, normalized=False, weight='weight')
for node in betw.keys():
if (betw[node] > nfur[node]):
nfur[node] = betw[node]
topology.add_edge(u, v, **edge_attr)
return nfur |
def external_h_top_matter(out, name):
common_top_matter(out, name)
out.write('\n#include <loci/loci_base.h>\n#include <loci/of_message.h>\n#include <loci/of_match.h>\n#include <loci/of_object.h>\n#include <loci/loci_classes.h>\n#include <loci/loci_class_metadata.h>\n\n/\n *\n * This file is divided into the following sections.\n *\n * A few object specific macros\n * Class typedefs (no struct definitions)\n * Per-data type accessor function typedefs\n * Per-class new/delete function typedefs\n * Per-class static delete functions\n * Per-class, per-member accessor declarations\n * Per-class structure definitions\n * Pointer set function declarations\n * Some special case macros\n *\n /\n') |
class TestHTMLFormRenderer(TestCase):
def setUp(self):
class TestSerializer(serializers.Serializer):
test_field = serializers.CharField()
self.renderer = HTMLFormRenderer()
self.serializer = TestSerializer(data={})
def test_render_with_default_args(self):
self.serializer.is_valid()
renderer = HTMLFormRenderer()
result = renderer.render(self.serializer.data)
self.assertIsInstance(result, SafeText)
def test_render_with_provided_args(self):
self.serializer.is_valid()
renderer = HTMLFormRenderer()
result = renderer.render(self.serializer.data, None, {})
self.assertIsInstance(result, SafeText) |
def _slice_cube_v2_resample(self, cube, zsurf=None, sampling='nearest', mask=True, deadtraces=True):
scube = xtgeo.surface_from_cube(cube, 0)
if self.compare_topology(scube, strict=False):
return _slice_cube_v2(self, cube, zsurf, sampling, mask, deadtraces)
scube.resample(self)
zcube = None
if zsurf:
zcube = scube.copy()
zcube.resample(zsurf)
istat = _slice_cube_v2(scube, cube=cube, zsurf=zcube, sampling=sampling, mask=mask, deadtraces=deadtraces)
self.resample(scube, mask=mask)
return istat |
def extractCavendishclubWordpressCom(item):
badwords = ['Manga']
if any([(bad in item['tags']) for bad in badwords]):
return None
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('The Nonsensical Witch and the Land of Fairies', 'The Nonsensical Witch and the Land of Fairies', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def extractDrizzlebumreadsHomeBlog(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Sect Master and Psycho', 'Sect Master and Psycho', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestIssueCertificatesWrongConnectionKey(BaseTestIssueCertificates):
def setup_class(cls):
super().setup_class()
cls.cert_id_1 = 'cert_id_1'
cls.cert_request_1 = CertRequest(identifier=cls.cert_id_1, ledger_id=FetchAICrypto.identifier, not_after='2020-01-02', not_before='2020-01-01', public_key='bad_ledger_id', message_format='{public_key}', save_path='path')
cls.add_cert_requests([cls.cert_request_1], DummyConnection.connection_id.name)
def test_run(self):
with pytest.raises(Exception, match="Cannot find connection private key with id 'bad_ledger_id'"):
self.run_cli_command('issue-certificates', cwd=self._get_cwd()) |
def custom_build_submit(copr, package, copr_dir=None):
storage = HookContentStorage()
if (not copr.active_copr_chroots):
return ('NO_ACTIVE_CHROOTS_IN_PROJECT\n', 500)
try:
build = BuildsLogic.rebuild_package(package, storage.rebuild_dict(), copr_dir=copr_dir)
db.session.commit()
except Exception:
log.exception('can not submit build from webhook')
storage.delete()
return ('BUILD_REQUEST_ERROR\n', 500)
return ((str(build.id) + '\n'), 200) |
def test_error_split_col_not_in_df(df):
transformer = DropHighPSIFeatures(variables=None, split_col='var_0')
data = df.copy()
data = data.drop(['var_0'], axis=1)
msg = 'var_0 is not in the dataframe.'
with pytest.raises(ValueError) as record:
transformer.fit(data)
assert (str(record.value) == msg) |
class OptionSeriesSunburstSonificationTracksMappingLowpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class UrlsRemoveQueryParamTests(TestCase):
def test_valid_unicode_removed(self):
q = '/?page=2345&q=%E6%9F%A5%E8%AF%A2'
key = 'page'
value = '2345'
removed_key = 'q'
assert (key in remove_query_param(q, removed_key))
assert (value in remove_query_param(q, removed_key))
assert ('%' not in remove_query_param(q, removed_key))
def test_invalid_unicode(self):
q = '/?from=login&page=2&%FF%FE%3C%73%63%72%69%70%74%3E%61%6C%65%72%74%28%33%31%33%29%3C%2F%73%63%72%69%70%74%3E=1'
key = 'from'
removed_key = 'page'
assert (key in remove_query_param(q, removed_key)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.