code stringlengths 281 23.7M |
|---|
class Strategy(object):
def __init__(self, **default_parameters):
self.filters = {}
self._default_parameters = default_parameters
self.set_parameters(default_parameters)
def __call__(self, func):
self.func = func
return self
def set_parameters(self, variables):
stop_vars = ['sl_stop', 'tp_stop', 'ts_stop']
for svar in stop_vars:
if hasattr(self, svar):
delattr(self, svar)
if self._default_parameters:
for (key, val) in self._default_parameters.items():
setattr(self, key, val)
if variables:
for (key, val) in variables.items():
setattr(self, key, val)
def show_parameters(self):
parameters = {}
for (key, val) in self._default_parameters.items():
parameters[key] = getattr(self, key)
print(parameters)
def _enumerate_filters(ohlcv, filters):
ret = {}
for (fname, f) in filters.items():
(filter_df, filter_figures) = f(ohlcv)
ret[fname] = (filter_df, filter_figures)
return ret
def _add_filters(entries, exits, fig_data, filters):
for (fname, (filter_df, filter_figures)) in filters.items():
filter_df.columns = filter_df.columns.set_names([((fname + '_') + n) for n in filter_df.columns.names])
entries = (filter_df.vbt.tile(entries.shape[1]).vbt & entries.vbt.repeat(filter_df.shape[1]).vbt)
exits = exits.vbt.repeat(filter_df.shape[1])
exits.columns = entries.columns
if (filter_figures is not None):
if ('figures' in filter_figures):
if ('figures' not in fig_data):
fig_data['figures'] = {}
for (name, fig) in filter_figures['figures'].items():
fig_data['figures'][((fname + '_') + name)] = fig
if ('overlaps' in filter_figures):
if ('overlaps' not in fig_data):
fig_data['overlaps'] = {}
for (name, fig) in filter_figures['overlaps'].items():
fig_data['overlaps'][((fname + '_') + name)] = fig
return (entries, exits, fig_data)
def _add_stops(ohlcv, entries, exits, variables):
(entries, exits) = stop_early(ohlcv, entries, exits, variables)
entries = entries.squeeze()
exits = exits.squeeze()
return (entries, exits)
def backtest(self, ohlcv, variables=None, filters=None, lookback=None, plot=False, signals=False, side='long', cscv_nbins=10, cscv_objective=(lambda r: r.mean()), html=None, compounded=True, execution_price='close', k_colors='world', **args):
variables = (variables or dict())
filters = (filters or dict())
variables_without_stop = copy.copy(variables)
exit_vars = ['sl_stop', 'ts_stop', 'tp_stop', 'sl_trail']
stop_vars = {}
for e in exit_vars:
if (e in variables_without_stop):
stop_vars[e] = variables[e]
variables_without_stop.pop(e)
ohlcv_lookback = (ohlcv.iloc[(- lookback):] if lookback else ohlcv)
variable_enumerate = enumerate_variables(variables_without_stop)
if (not variable_enumerate):
variable_enumerate = [self._default_parameters]
(entries, exits, fig_data) = enumerate_signal(ohlcv_lookback, self, variable_enumerate)
if filters:
filter_signals = self._enumerate_filters(ohlcv_lookback, filters)
(entries, exits, fig_data) = self._add_filters(entries, exits, fig_data, filter_signals)
(entries, exits) = self._add_stops(ohlcv_lookback, entries, exits, stop_vars)
if signals:
return (entries, exits, fig_data)
if (side == 'long'):
if (not compounded):
args['size'] = (vbt.settings.portfolio['init_cash'] / ohlcv_lookback.close[0])
assert ((execution_price == 'close') or (execution_price == 'open'))
price = (ohlcv_lookback[execution_price] if (execution_price == 'close') else ohlcv_lookback[execution_price].shift((- 1)).bfill())
portfolio = vbt.Portfolio.from_signals(ohlcv_lookback[execution_price], entries.fillna(False), exits.fillna(False), **args)
elif (side == 'short'):
raise Exception('Shorting is not support yet')
else:
raise Exception("side should be 'long' or 'short'")
if ((plot or (html is not None)) and isinstance(entries, pd.Series)):
plot_strategy(ohlcv_lookback, entries, exits, portfolio, fig_data, html=html, k_colors=k_colors)
elif (plot and isinstance(entries, pd.DataFrame)):
cscv = CSCV(n_bins=cscv_nbins, objective=cscv_objective)
cscv.add_daily_returns(portfolio.daily_returns())
cscv_result = cscv.estimate_overfitting(plot=False)
plot_combination(portfolio, cscv_result)
plt.show()
variable_visualization(portfolio)
return portfolio |
class CompilerIdiomHandling(PipelineStage):
name = 'compiler-idiom-handling'
TAG_PREFIX = 'compiler_idiom: '
def run(self, task: DecompilerTask):
for basic_block in task.graph:
for tagged_idiom in self._find_tagged_idioms(basic_block.instructions):
new_instruction = self._get_replacement_instruction(basic_block.instructions, tagged_idiom.tag, tagged_idiom.pos_start, tagged_idiom.pos_end)
if (not new_instruction):
continue
basic_block.replace_instruction(basic_block.instructions[tagged_idiom.pos_end], [new_instruction])
def _find_tagged_idioms(self, instructions: List[Instruction]) -> List[TaggedIdiom]:
result = []
current_tag = None
first_index_of_instruction_with_tag = None
for (index, instruction) in enumerate(instructions):
if (tag := self._get_compiler_idiom_tag_from_instruction(instruction)):
if (current_tag == tag):
continue
elif (current_tag is not None):
result.append(TaggedIdiom(first_index_of_instruction_with_tag, (index - 1), current_tag))
first_index_of_instruction_with_tag = index
current_tag = tag
elif (current_tag is not None):
result.append(TaggedIdiom(first_index_of_instruction_with_tag, (index - 1), current_tag))
first_index_of_instruction_with_tag = None
current_tag = None
if current_tag:
result.append(TaggedIdiom(first_index_of_instruction_with_tag, (len(instructions) - 1), current_tag))
return result
def _get_compiler_idiom_tag_from_instruction(self, instruction: Instruction) -> Optional[Tag]:
if instruction.tags:
for tag in instruction.tags:
if tag.name.startswith(self.TAG_PREFIX):
return tag
return None
def _get_replacement_instruction(self, instructions: List[Instruction], tag: Tag, first_index: int, last_index: int) -> Optional[Assignment]:
var = self._get_variable_from_first_instruction(instructions[first_index], tag)
const = self._get_constant_from_tag(tag)
if ((not var) or (not const)):
return None
operation_type = self._get_operation_type_from_tag(tag)
return Assignment(instructions[last_index].destination, BinaryOperation(operation_type, [var, const]))
def _get_constant_from_tag(self, tag: Tag) -> Constant:
if ((idiom_constant := tag.data.split(',')[1]) != 'None'):
return Constant(int(idiom_constant), vartype=self._get_constant_type(tag))
REGISTER_EQUIVALENTS = [['rax', 'eax'], ['rbx', 'ebx'], ['rcx', 'ecx'], ['rdx', 'edx'], ['rsi', 'esi'], ['rdi', 'edi'], ['rbp', 'ebp'], ['rsp', 'esp']]
def _get_equivalent_registers(self, register: str) -> List[str]:
for equivalents in self.REGISTER_EQUIVALENTS:
if (register in equivalents):
return equivalents
return [register]
def _get_variable_from_first_instruction(self, instruction: Instruction, tag: Tag) -> Variable:
registers = self._get_equivalent_registers(tag.data.split(',')[0])
for variable in instruction.requirements:
if any((variable.name.startswith(reg) for reg in registers)):
return variable
elif (variable := self._get_copy_destination_variable(instruction, registers)):
return variable
logging.warning(f"Couldn't get the compiler idiom variable [{tag}] from the first instruction {instruction}")
def _get_copy_destination_variable(self, instruction: Instruction, operand_registers: List[str]) -> Variable:
for variable in instruction.definitions:
if any((variable.name.startswith(reg) for reg in operand_registers)):
if (len(instruction.requirements) == 1):
return instruction.requirements[0]
OPERATION_TYPES = {'multiplication': OperationType.multiply, 'unsigned_multiplication': OperationType.multiply_us, 'division': OperationType.divide, 'division unsigned': OperationType.divide_us, 'modulo': OperationType.modulo, 'modulo unsigned': OperationType.modulo_us}
def _get_operation_type_from_tag(self, tag: Tag) -> OperationType:
return self.OPERATION_TYPES[tag.name[len(self.TAG_PREFIX):]]
def _get_constant_type(self, tag: Tag) -> Integer:
if tag.name[len(self.TAG_PREFIX):].startswith('unsigned_'):
return Integer.uint32_t()
return Integer.int32_t() |
class PlotlyWidget(Widget):
data = event.ListProp(settable=True, doc='\n The data (list of dicts) that describes the plot.\n This can e.g. be the output of the Python plotly API call.\n ')
layout = event.DictProp(settable=True, doc='\n The layout dict to style the plot.\n ')
config = event.DictProp(settable=True, doc='\n The config for the plot.\n ')
def __relayout(self):
global Plotly
(w, h) = self.size
if (len(self.node.children) > 0):
Plotly.relayout(self.node, dict(width=w, height=h))
def _init_plot(self):
global Plotly
Plotly.newPlot(self.node, self.data, self.layout, self.config) |
class TRACE_EVENT_INFO(ct.Structure):
_fields_ = [('ProviderGuid', GUID), ('EventGuid', GUID), ('EventDescriptor', ep.EVENT_DESCRIPTOR), ('DecodingSource', DECODING_SOURCE), ('ProviderNameOffset', ct.c_ulong), ('LevelNameOffset', ct.c_ulong), ('ChannelNameOffset', ct.c_ulong), ('KeywordsNameOffset', ct.c_ulong), ('TaskNameOffset', ct.c_ulong), ('OpcodeNameOffset', ct.c_ulong), ('EventMessageOffset', ct.c_ulong), ('ProviderMessageOffset', ct.c_ulong), ('BinaryXMLOffset', ct.c_ulong), ('BinaryXMLSize', ct.c_ulong), ('ActivityIDNameOffset', ct.c_ulong), ('RelatedActivityIDNameOffset', ct.c_ulong), ('PropertyCount', ct.c_ulong), ('TopLevelPropertyCount', ct.c_ulong), ('Flags', ct.c_ulong), ('EventPropertyInfoArray', (EVENT_PROPERTY_INFO * 0))] |
def make_composed_qualified_table_name(table_name: str, schema_name: Optional[str]=None, alias: Optional[str]=None) -> Composed:
template = '{}'
if (schema_name is not None):
template += '.{}'
if (alias is not None):
template = (template + ' as {}')
objects = [Identifier(o) for o in [schema_name, table_name, alias] if (o is not None)]
return SQL(template).format(*objects) |
class Composite(DC):
chartFnc = 'CompositeChart'
def x(self):
pass
def xUnits(self):
pass
def elasticY(self, flag):
return self.fnc(('elasticY(%s)' % JsUtils.jsConvertData(flag, None)))
def renderHorizontalGridLines(self, flag):
return self.fnc(('renderHorizontalGridLines(%s)' % JsUtils.jsConvertData(flag, None)))
def compose(self, dc_charts):
pass |
(max_examples=250)
(string_value=st.one_of(st.none(), st.text(min_size=0, max_size=256), st.binary(min_size=0, max_size=256)))
def test_encode_byte_string(string_value):
encoder = ByteStringEncoder()
if (not is_bytes(string_value)):
with pytest.raises(EncodingTypeError) as exception_info:
encoder(string_value)
assert ('ByteStringEncoder' in str(exception_info.value))
return
expected_value = (encode_uint_256(len(string_value)) + (zpad_right(string_value, ceil32(len(string_value))) if string_value else b''))
encoded_value = encoder(string_value)
assert (encoded_value == expected_value) |
class TestStreamingBulk(object):
async def test_actions_remain_unchanged(self, async_client):
actions = [{'_id': 1}, {'_id': 2}]
async for (ok, item) in helpers.async_streaming_bulk(async_client, actions, index='test-index'):
assert ok
assert ([{'_id': 1}, {'_id': 2}] == actions)
async def test_all_documents_get_inserted(self, async_client):
docs = [{'answer': x, '_id': x} for x in range(100)]
async for (ok, item) in helpers.async_streaming_bulk(async_client, docs, index='test-index', refresh=True):
assert ok
assert (100 == (await async_client.count(index='test-index'))['count'])
assert ({'answer': 42} == (await async_client.get(index='test-index', id=42))['_source'])
async def test_documents_data_types(self, async_client):
async def async_gen():
for x in range(100):
(await asyncio.sleep(0))
(yield {'answer': x, '_id': x})
def sync_gen():
for x in range(100):
(yield {'answer': x, '_id': x})
async for (ok, item) in helpers.async_streaming_bulk(async_client, async_gen(), index='test-index', refresh=True):
assert ok
assert (100 == (await async_client.count(index='test-index'))['count'])
assert ({'answer': 42} == (await async_client.get(index='test-index', id=42))['_source'])
(await async_client.delete_by_query(index='test-index', body={'query': {'match_all': {}}}))
async for (ok, item) in helpers.async_streaming_bulk(async_client, sync_gen(), index='test-index', refresh=True):
assert ok
assert (100 == (await async_client.count(index='test-index'))['count'])
assert ({'answer': 42} == (await async_client.get(index='test-index', id=42))['_source'])
async def test_all_errors_from_chunk_are_raised_on_failure(self, async_client):
(await async_client.indices.create(index='i', mappings={'properties': {'a': {'type': 'integer'}}}, settings={'number_of_shards': 1, 'number_of_replicas': 0}))
(await async_client.cluster.health(wait_for_status='yellow'))
try:
async for (ok, item) in helpers.async_streaming_bulk(async_client, [{'a': 'b'}, {'a': 'c'}], index='i', raise_on_error=True):
assert ok
except helpers.BulkIndexError as e:
assert (2 == len(e.errors))
else:
assert False, 'exception should have been raised'
async def test_different_op_types(self, async_client):
(await async_client.index(index='i', id=45, body={}))
(await async_client.index(index='i', id=42, body={}))
docs = [{'_index': 'i', '_id': 47, 'f': 'v'}, {'_op_type': 'delete', '_index': 'i', '_id': 45}, {'_op_type': 'update', '_index': 'i', '_id': 42, 'doc': {'answer': 42}}]
async for (ok, item) in helpers.async_streaming_bulk(async_client, docs):
assert ok
assert (not (await async_client.exists(index='i', id=45)))
assert ({'answer': 42} == (await async_client.get(index='i', id=42))['_source'])
assert ({'f': 'v'} == (await async_client.get(index='i', id=47))['_source'])
async def test_transport_error_can_becaught(self, async_client):
failing_client = FailingBulkClient(async_client)
docs = [{'_index': 'i', '_id': 47, 'f': 'v'}, {'_index': 'i', '_id': 45, 'f': 'v'}, {'_index': 'i', '_id': 42, 'f': 'v'}]
results = [x async for x in helpers.async_streaming_bulk(failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1)]
assert (3 == len(results))
assert ([True, False, True] == [r[0] for r in results])
exc = results[1][1]['index'].pop('exception')
assert isinstance(exc, ApiError)
assert (599 == exc.status_code)
assert ({'index': {'_index': 'i', '_id': 45, 'data': {'f': 'v'}, 'error': "ApiError(599, 'Error!')", 'status': 599}} == results[1][1])
async def test_rejected_documents_are_retried(self, async_client):
failing_client = FailingBulkClient(async_client, fail_with=ApiError(message='Rejected!', body={}, meta=ApiResponseMeta(status=429, headers={}, duration=0, node=None)))
docs = [{'_index': 'i', '_id': 47, 'f': 'v'}, {'_index': 'i', '_id': 45, 'f': 'v'}, {'_index': 'i', '_id': 42, 'f': 'v'}]
results = [x async for x in helpers.async_streaming_bulk(failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0)]
assert (3 == len(results))
assert ([True, True, True] == [r[0] for r in results])
(await async_client.indices.refresh(index='i'))
res = (await async_client.search(index='i'))
assert ({'value': 3, 'relation': 'eq'} == res['hits']['total'])
assert (4 == failing_client._called)
async def test_rejected_documents_are_retried_at_most_max_retries_times(self, async_client):
failing_client = FailingBulkClient(async_client, fail_at=(1, 2), fail_with=ApiError(message='Rejected!', body={}, meta=ApiResponseMeta(status=429, headers={}, duration=0, node=None)))
docs = [{'_index': 'i', '_id': 47, 'f': 'v'}, {'_index': 'i', '_id': 45, 'f': 'v'}, {'_index': 'i', '_id': 42, 'f': 'v'}]
results = [x async for x in helpers.async_streaming_bulk(failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0)]
assert (3 == len(results))
assert ([False, True, True] == [r[0] for r in results])
(await async_client.indices.refresh(index='i'))
res = (await async_client.search(index='i'))
assert ({'value': 2, 'relation': 'eq'} == res['hits']['total'])
assert (4 == failing_client._called)
async def test_transport_error_is_raised_with_max_retries(self, async_client):
failing_client = FailingBulkClient(async_client, fail_at=(1, 2, 3, 4), fail_with=ApiError(message='Rejected!', body={}, meta=ApiResponseMeta(status=429, headers={}, duration=0, node=None)))
async def streaming_bulk():
results = [x async for x in helpers.async_streaming_bulk(failing_client, [{'a': 42}, {'a': 39}], raise_on_exception=True, max_retries=3, initial_backoff=0)]
return results
with pytest.raises(ApiError):
(await streaming_bulk())
assert (4 == failing_client._called) |
def test_init_with_overridden_dependency():
class _Container(containers.DeclarativeContainer):
p1 = providers.Dependency(instance_of=int)
p2 = providers.Dependency(object)
p2.override(providers.Factory(dict, p1=p1))
container = _Container(p1=1)
assert (container.p2() == {'p1': 1})
assert (container.p2.last_overriding.kwargs['p1'] is container.p1)
assert (container.p2.last_overriding.kwargs['p1'] is not _Container.p1)
assert (_Container.p2.last_overriding.kwargs['p1'] is _Container.p1) |
.django_db
def test_latest_submission_per_agency_used(client, agency_account_data, helpers):
query_params = f'?fiscal_year=2016'
resp = client.get(url.format(code='011', query_params=query_params))
expected_result = {'fiscal_year': 2016, 'results': [{'children': [{'code': '005-X-0000-000', 'gross_outlay_amount': 7000.0, 'name': 'TA 7', 'obligated_amount': 700.0}], 'code': '005-0000', 'gross_outlay_amount': 7000.0, 'name': 'FA 5', 'obligated_amount': 700.0}], 'toptier_code': '011', 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 1}, 'messages': ['Account data powering this endpoint were first collected in FY2017 Q2 under the DATA Act; as such, there are no data available for prior fiscal years.']}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result)
query_params = f'?fiscal_year=2017'
resp = client.get(url.format(code='011', query_params=query_params))
expected_result = {'fiscal_year': 2017, 'results': [], 'toptier_code': '011', 'page_metadata': {'hasNext': False, 'hasPrevious': False, 'limit': 10, 'next': None, 'page': 1, 'previous': None, 'total': 0}, 'messages': []}
assert (resp.status_code == status.HTTP_200_OK)
assert (resp.json() == expected_result) |
def client_node_configs(hosts: Optional[_TYPE_HOSTS], cloud_id: Optional[str], requests_session_auth: Optional[Any]=None, **kwargs: Any) -> List[NodeConfig]:
if (cloud_id is not None):
if (hosts is not None):
raise ValueError("The 'cloud_id' and 'hosts' parameters are mutually exclusive")
node_configs = cloud_id_to_node_configs(cloud_id)
else:
assert (hosts is not None)
node_configs = hosts_to_node_configs(hosts)
node_options = {k: v for (k, v) in kwargs.items() if (v is not DEFAULT)}
headers = HttpHeaders(node_options.pop('headers', ()))
headers.setdefault('user-agent', USER_AGENT)
node_options['headers'] = headers
if (requests_session_auth is not None):
node_options.setdefault('_extras', {})['requests.session.auth'] = requests_session_auth
def apply_node_options(node_config: NodeConfig) -> NodeConfig:
nonlocal node_options
headers = node_config.headers.copy()
headers_to_add = node_options.pop('headers', ())
if headers_to_add:
headers.update(headers_to_add)
headers.setdefault('user-agent', USER_AGENT)
headers.freeze()
node_options['headers'] = headers
return node_config.replace(**node_options)
return [apply_node_options(node_config) for node_config in node_configs] |
class TestIP(unittest.TestCase):
def test(self):
dut = DUT()
generators = {'sys': [main_generator(dut)], 'eth_tx': [dut.phy_model.phy_sink.generator(), dut.phy_model.generator()], 'eth_rx': dut.phy_model.phy_source.generator()}
clocks = {'sys': 10, 'eth_rx': 10, 'eth_tx': 10}
run_simulation(dut, generators, clocks, vcd_name='sim.vcd') |
class ServiceInvitationResponseAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'data': (ServiceInvitationResponseAllOfData,)}
_property
def discriminator():
return None
attribute_map = {'data': 'data'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
def get_data_after_gauntlets() -> list[dict[(str, int)]]:
data: list[dict[(str, int)]] = []
data.append(next_int_len((4 * 2)))
data.append(next_int_len((1 * 3)))
val_4 = next_int_len(1)
data.append(val_4)
for _ in range(val_4['Value']):
data.append(next_int_len(4))
data.append(next_int_len(4))
data.append(next_int_len(1))
data.append(next_int_len(8))
return data |
class TestSorter():
value_test_cases = [({'k1': 1, 'k2': 2, 'k3': 3}, ['k1', 'k2', 'k3']), ({'k1': 3, 'k2': 2, 'k3': 1}, ['k3', 'k2', 'k1']), ({'k1': 3, 'k2': 1, 'k3': 2}, ['k2', 'k3', 'k1']), ({'k1': 3, 'k2': 1, 'k3': 1}, ['k2', 'k3', 'k1']), ({'k1': 1, 'k2': 2, 'k3': 1}, ['k1', 'k3', 'k2']), ({'k1': 2, 'k2': 2, 'k3': 1}, ['k3', 'k1', 'k2']), ({'k1': 'foo', 'k2': 'bar', 'k3': 'baz'}, ['k2', 'k3', 'k1']), ({'k1': 'foo', 'k2': 'bar', 'k3': 10}, ['k3', 'k2', 'k1']), ({'k1': 'foo', 'k2': 'bar', 'k3': None}, ['k3', 'k2', 'k1']), ({'k1': 5, 'k2': 'bar', 'k3': None}, ['k3', 'k1', 'k2']), ({'k1': False, 'k2': 'bar', 'k3': None}, ['k3', 'k1', 'k2']), ({'k1': False, 'k2': 1, 'k3': None}, ['k3', 'k1', 'k2']), ({'k1': True, 'k2': 0, 'k3': None, 'k4': 'foo'}, ['k3', 'k1', 'k2', 'k4']), ({'k1': True, 'k2': 0, 'k3': None, 'k4': 'foo', 'k5': False, 'k6': dict()}, ['k3', 'k5', 'k1', 'k2', 'k4', 'k6']), ({'k1': True, 'k2': 0, 'k3': 'foo', 'k4': 'foo', 'k5': False, 'k6': dict()}, ['k5', 'k1', 'k2', 'k3', 'k4', 'k6'])]
list_test_cases = [([], []), ([1, 2, 3], [1, 2, 3]), ([3, 2, 1], [1, 2, 3]), ([1, 3, 2], [1, 2, 3]), ([1, 3, 3], [1, 3, 3]), ([2, 3, 2], [2, 2, 3]), (['foo', 'bar', 'baz'], ['bar', 'baz', 'foo']), (['foo', 1, False, None, 0, True], [None, False, True, 0, 1, 'foo'])]
.parametrize('result, expected', value_test_cases)
def test_order_by_value(self, result, expected):
ordered = db._Sorter(result, '$value').get()
assert isinstance(ordered, collections.OrderedDict)
assert (list(ordered.keys()) == expected)
.parametrize('result, expected', list_test_cases)
def test_order_by_value_with_list(self, result, expected):
ordered = db._Sorter(result, '$value').get()
assert isinstance(ordered, list)
assert (ordered == expected)
.parametrize('value', [None, False, True, 0, 1, 'foo'])
def test_invalid_sort(self, value):
with pytest.raises(ValueError):
db._Sorter(value, '$value')
.parametrize('result, expected', [({'k1': 1, 'k2': 2, 'k3': 3}, ['k1', 'k2', 'k3']), ({'k3': 3, 'k2': 2, 'k1': 1}, ['k1', 'k2', 'k3']), ({'k1': 3, 'k3': 1, 'k2': 2}, ['k1', 'k2', 'k3'])])
def test_order_by_key(self, result, expected):
ordered = db._Sorter(result, '$key').get()
assert isinstance(ordered, collections.OrderedDict)
assert (list(ordered.keys()) == expected)
.parametrize('result, expected', value_test_cases)
def test_order_by_child(self, result, expected):
nested = {}
for (key, val) in result.items():
nested[key] = {'child': val}
ordered = db._Sorter(nested, 'child').get()
assert isinstance(ordered, collections.OrderedDict)
assert (list(ordered.keys()) == expected)
.parametrize('result, expected', value_test_cases)
def test_order_by_grand_child(self, result, expected):
nested = {}
for (key, val) in result.items():
nested[key] = {'child': {'grandchild': val}}
ordered = db._Sorter(nested, 'child/grandchild').get()
assert isinstance(ordered, collections.OrderedDict)
assert (list(ordered.keys()) == expected)
.parametrize('result, expected', [({'k1': {'child': 1}, 'k2': {}}, ['k2', 'k1']), ({'k1': {'child': 1}, 'k2': {'child': 0}}, ['k2', 'k1']), ({'k1': {'child': 1}, 'k2': {'child': {}}, 'k3': {}}, ['k3', 'k1', 'k2'])])
def test_child_path_resolution(self, result, expected):
ordered = db._Sorter(result, 'child').get()
assert isinstance(ordered, collections.OrderedDict)
assert (list(ordered.keys()) == expected) |
class AircrackOnly(plugins.Plugin):
__author__ = 'pwnagotchi [at] rossmarks [dot] uk'
__version__ = '1.0.1'
__license__ = 'GPL3'
__description__ = 'confirm pcap contains handshake/PMKID or delete it'
def __init__(self):
self.text_to_set = ''
def on_loaded(self):
logging.info('aircrackonly plugin loaded')
if ('face' not in self.options):
self.options['face'] = '(>.<)'
check = subprocess.run("/usr/bin/dpkg -l aircrack-ng | grep aircrack-ng | awk '{print $2, $3}'", shell=True, stdout=subprocess.PIPE)
check = check.stdout.decode('utf-8').strip()
if (check != 'aircrack-ng <none>'):
logging.info(('aircrackonly: Found ' + check))
else:
logging.warning('aircrack-ng is not installed!')
def on_handshake(self, agent, filename, access_point, client_station):
display = agent._view
todelete = 0
handshakeFound = 0
result = subprocess.run((('/usr/bin/aircrack-ng ' + filename) + ' | grep "1 handshake" | awk \'{print $2}\''), shell=True, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8').translate({ord(c): None for c in string.whitespace})
if result:
handshakeFound = 1
logging.info('[AircrackOnly] contains handshake')
if (handshakeFound == 0):
result = subprocess.run((('/usr/bin/aircrack-ng ' + filename) + ' | grep "PMKID" | awk \'{print $2}\''), shell=True, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8').translate({ord(c): None for c in string.whitespace})
if result:
logging.info('[AircrackOnly] contains PMKID')
else:
todelete = 1
if (todelete == 1):
os.remove(filename)
self.text_to_set = 'Removed an uncrackable pcap'
logging.warning(('Removed uncrackable pcap ' + filename))
display.update(force=True)
def on_ui_update(self, ui):
if self.text_to_set:
ui.set('face', self.options['face'])
ui.set('status', self.text_to_set)
self.text_to_set = '' |
.parametrize('label_type', ['boolean', 'integer', 'function'])
.parametrize('object_type', [LabelledForm, Term, Form, int])
def test_label(label_type, object_type, label_and_values, object_to_label):
(label, good_value, bad_value, new_value) = label_and_values
assert (label.label == 'foo'), 'Label has incorrect name'
if (object_type == int):
try:
labelled_object = label(object_to_label)
except ValueError:
return
assert False, 'Labelling an integer should throw an error'
if (label_type == 'boolean'):
labelled_object = label(object_to_label)
else:
try:
labelled_object = label(object_to_label, bad_value)
assert False, ('The labelling validator has not worked for ' + f'label_type {label_type} and object_type {object_type}')
except AssertionError:
labelled_object = label(object_to_label, good_value)
if (object_type == Term):
assert (type(labelled_object) == Term), ('Labelled Term should be a ' + f'be a Term and not type {type(labelled_object)}')
else:
assert (type(labelled_object) == LabelledForm), ('Labelled Form should ' + f'be a Labelled Form and not type {type(labelled_object)}')
if (object_type == Term):
assert (labelled_object.get(label) == good_value), ('Value of label ' + f'should be {good_value} and not {labelled_object.get(label)}')
else:
assert (labelled_object.terms[0].get(label) == good_value), (('Value of ' + f'label should be {good_value} and not ') + f'{labelled_object.terms[0].get(label)}')
try:
labelled_object = label.update_value(labelled_object, bad_value)
assert False, ('The validator has not worked for updating label of ' + f'label_type {label_type} and object_type {object_type}')
except AssertionError:
labelled_object = label.update_value(labelled_object, new_value)
if (object_type == Term):
assert (labelled_object.get(label) == new_value), ('Updated value of ' + f'label should be {new_value} and not {labelled_object.get(label)}')
else:
assert (labelled_object.terms[0].get(label) == new_value), (('Updated ' + f'value of label should be {new_value} and not ') + f'{labelled_object.terms[0].get(label)}')
labelled_object = label.remove(labelled_object)
if (object_type == Term):
label_value = labelled_object.get(label)
else:
label_value = labelled_object.terms[0].get(label)
assert (label_value is None), (f'The label {label_type} appears has not to ' + f'have been removed for object_type {object_type}') |
_toolkit([ToolkitName.qt, ToolkitName.wx])
class TestSetEditorMapping(BaseTestMixin, unittest.TestCase):
def setUp(self):
BaseTestMixin.setUp(self)
def tearDown(self):
BaseTestMixin.tearDown(self)
def setup_ui(self, model, view):
with create_ui(model, dict(view=view)) as ui:
(yield ui.get_editors('value')[0])
def test_simple_editor_mapping_values(self):
class IntListModel(HasTraits):
value = List()
set_editor_factory = SetEditor(values=[0, 1], format_func=(lambda v: str(bool(v)).upper()))
formatted_view = View(UItem('value', editor=set_editor_factory, style='simple'))
with reraise_exceptions(), self.setup_ui(IntListModel(), formatted_view) as editor:
self.assertEqual(editor.names, ['FALSE', 'TRUE'])
self.assertEqual(editor.mapping, {'FALSE': 0, 'TRUE': 1})
self.assertEqual(editor.inverse_mapping, {0: 'FALSE', 1: 'TRUE'})
set_editor_factory.values = [1, 0]
self.assertEqual(editor.names, ['TRUE', 'FALSE'])
self.assertEqual(editor.mapping, {'TRUE': 1, 'FALSE': 0})
self.assertEqual(editor.inverse_mapping, {1: 'TRUE', 0: 'FALSE'})
def test_simple_editor_mapping_name(self):
class IntListModel(HasTraits):
value = List()
possible_values = List([0, 1])
formatted_view = View(UItem('value', editor=SetEditor(name='object.possible_values', format_func=(lambda v: str(bool(v)).upper())), style='simple'))
model = IntListModel()
with reraise_exceptions(), self.setup_ui(model, formatted_view) as editor:
self.assertEqual(editor.names, ['FALSE', 'TRUE'])
self.assertEqual(editor.mapping, {'FALSE': 0, 'TRUE': 1})
self.assertEqual(editor.inverse_mapping, {0: 'FALSE', 1: 'TRUE'})
model.possible_values = [1, 0]
self.assertEqual(editor.names, ['TRUE', 'FALSE'])
self.assertEqual(editor.mapping, {'TRUE': 1, 'FALSE': 0})
self.assertEqual(editor.inverse_mapping, {1: 'TRUE', 0: 'FALSE'}) |
class Theme(models.Model):
name = models.CharField(unique=True, max_length=50, default='Django', verbose_name=_('name'))
active = models.BooleanField(default=True, verbose_name=_('active'))
title = models.CharField(max_length=50, default=_('Django administration'), blank=True, verbose_name=_('title'))
title_color = ColorField(blank=True, default='#F5DD5D', help_text='#F5DD5D', max_length=10, verbose_name=_('color'))
title_visible = models.BooleanField(default=True, verbose_name=_('visible'))
logo = models.FileField(upload_to='admin-interface/logo/', blank=True, validators=[FileExtensionValidator(allowed_extensions=['gif', 'jpg', 'jpeg', 'png', 'svg'])], help_text=_('Leave blank to use the default Django logo'), verbose_name=_('logo'))
logo_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('color'))
logo_max_width = models.PositiveSmallIntegerField(blank=True, default=400, verbose_name=_('max width'))
logo_max_height = models.PositiveSmallIntegerField(blank=True, default=100, verbose_name=_('max height'))
logo_visible = models.BooleanField(default=True, verbose_name=_('visible'))
favicon = models.FileField(upload_to='admin-interface/favicon/', blank=True, validators=[FileExtensionValidator(allowed_extensions=['gif', 'ico', 'jpg', 'jpeg', 'png', 'svg'])], help_text=_('(.ico|.png|.gif - 16x16|32x32 px)'), verbose_name=_('favicon'))
env_name = models.CharField(blank=True, max_length=50, verbose_name=_('name'))
env_color = ColorField(blank=True, default='#E74C3C', help_text=_('(red: #E74C3C, orange: #E67E22, yellow: #F1C40F, green: #2ECC71, blue: #3498DB)'), max_length=10, verbose_name=_('color'))
env_visible_in_header = models.BooleanField(default=True, verbose_name=_('visible in header (marker and name)'))
env_visible_in_favicon = models.BooleanField(default=True, verbose_name=_('visible in favicon (marker)'))
language_chooser_active = models.BooleanField(default=True, verbose_name=_('active'))
language_chooser_control_choices = (('default-select', _('Default Select')), ('minimal-select', _('Minimal Select')))
language_chooser_control = models.CharField(max_length=20, choices=language_chooser_control_choices, default='default-select', verbose_name=_('control'))
language_chooser_display_choices = (('code', _('code')), ('name', _('name')))
language_chooser_display = models.CharField(max_length=10, choices=language_chooser_display_choices, default='code', verbose_name=_('display'))
css_header_background_color = ColorField(blank=True, default='#0C4B33', help_text='#0C4B33', max_length=10, verbose_name=_('background color'))
css_header_text_color = ColorField(blank=True, default='#44B78B', help_text='#44B78B', max_length=10, verbose_name=_('text color'))
css_header_link_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('link color'))
css_header_link_hover_color = ColorField(blank=True, default='#C9F0DD', help_text='#C9F0DD', max_length=10, verbose_name=_('link hover color'))
css_module_background_color = ColorField(blank=True, default='#44B78B', help_text='#44B78B', max_length=10, verbose_name=_('background color'))
css_module_background_selected_color = ColorField(blank=True, default='#FFFFCC', help_text='#FFFFCC', max_length=10, verbose_name=_('background selected color'))
css_module_text_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('text color'))
css_module_link_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('link color'))
css_module_link_selected_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('link selected color'))
css_module_link_hover_color = ColorField(blank=True, default='#C9F0DD', help_text='#C9F0DD', max_length=10, verbose_name=_('link hover color'))
css_module_rounded_corners = models.BooleanField(default=True, verbose_name=_('rounded corners'))
css_generic_link_color = ColorField(blank=True, default='#0C3C26', help_text='#0C3C26', max_length=10, verbose_name=_('link color'))
css_generic_link_hover_color = ColorField(blank=True, default='#156641', help_text='#156641', max_length=10, verbose_name=_('link hover color'))
css_generic_link_active_color = ColorField(blank=True, default='#29B864', help_text='#29B864', max_length=10, verbose_name=_('link active color'))
css_save_button_background_color = ColorField(blank=True, default='#0C4B33', help_text='#0C4B33', max_length=10, verbose_name=_('background color'))
css_save_button_background_hover_color = ColorField(blank=True, default='#0C3C26', help_text='#0C3C26', max_length=10, verbose_name=_('background hover color'))
css_save_button_text_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('text color'))
css_delete_button_background_color = ColorField(blank=True, default='#BA2121', help_text='#BA2121', max_length=10, verbose_name=_('background color'))
css_delete_button_background_hover_color = ColorField(blank=True, default='#A41515', help_text='#A41515', max_length=10, verbose_name=_('background hover color'))
css_delete_button_text_color = ColorField(blank=True, default='#FFFFFF', help_text='#FFFFFF', max_length=10, verbose_name=_('text color'))
related_modal_active = models.BooleanField(default=True, verbose_name=_('active'))
related_modal_background_color = ColorField(blank=True, default='#000000', help_text='#000000', max_length=10, verbose_name=_('background color'))
related_modal_background_opacity_choices = (('0.1', '10%'), ('0.2', '20%'), ('0.3', '30%'), ('0.4', '40%'), ('0.5', '50%'), ('0.6', '60%'), ('0.7', '70%'), ('0.8', '80%'), ('0.9', '90%'))
related_modal_background_opacity = models.CharField(max_length=5, choices=related_modal_background_opacity_choices, default='0.3', help_text='20%', verbose_name=_('background opacity'))
related_modal_rounded_corners = models.BooleanField(default=True, verbose_name=_('rounded corners'))
related_modal_close_button_visible = models.BooleanField(default=True, verbose_name=_('close button visible'))
list_filter_highlight = models.BooleanField(default=True, verbose_name=_('highlight active'))
list_filter_dropdown = models.BooleanField(default=True, verbose_name=_('use dropdown'))
list_filter_sticky = models.BooleanField(default=True, verbose_name=_('sticky position'))
list_filter_removal_links = models.BooleanField(default=False, verbose_name=_('quick remove links for active filters at top of sidebar'))
foldable_apps = models.BooleanField(default=True, verbose_name=_('foldable apps'))
show_fieldsets_as_tabs = models.BooleanField(default=False, verbose_name=_('fieldsets as tabs'))
show_inlines_as_tabs = models.BooleanField(default=False, verbose_name=_('inlines as tabs'))
collapsible_stacked_inlines = models.BooleanField(default=False, verbose_name=_('collapsible stacked inlines'))
collapsible_stacked_inlines_collapsed = models.BooleanField(default=True, verbose_name=_('collapsible stacked inlines collapsed'))
collapsible_tabular_inlines = models.BooleanField(default=False, verbose_name=_('collapsible tabular inlines'))
collapsible_tabular_inlines_collapsed = models.BooleanField(default=True, verbose_name=_('collapsible tabular inlines collapsed'))
recent_actions_visible = models.BooleanField(default=True, verbose_name=_('visible'))
form_submit_sticky = models.BooleanField(default=False, verbose_name=_('sticky submit'))
form_pagination_sticky = models.BooleanField(default=False, verbose_name=_('sticky pagination'))
objects = ThemeQuerySet.as_manager()
def set_active(self):
self.active = True
self.save()
class Meta():
app_label = 'admin_interface'
verbose_name = _('Theme')
verbose_name_plural = _('Themes')
def __str__(self):
return force_str(self.name) |
class PipelineProcessor(LoggingConfigurable):
_type: RuntimeProcessorType = None
_name: str = None
root_dir: str = Unicode(allow_none=True)
enable_pipeline_info: bool = Bool(config=True, default_value=(os.getenv('ELYRA_ENABLE_PIPELINE_INFO', 'true').lower() == 'true'), help='Produces formatted logging of informational messages with durations\n (default=True). (ELYRA_ENABLE_PIPELINE_INFO env var)')
def type(self):
if (self._type is None):
raise NotImplementedError('_type must have a value!')
return self._type
def name(self):
if (self._name is None):
raise NotImplementedError('_name must have a value!')
return self._name
def get_components(self) -> List[Component]:
components: List[Component] = ComponentCache.get_generic_components()
components.extend(ComponentCache.instance().get_all_components(platform=self._type))
return components
def get_component(self, component_id: str) -> Optional[Component]:
if (component_id not in ('notebook', 'python-script', 'r-script')):
return ComponentCache.instance().get_component(platform=self._type, component_id=component_id)
return ComponentCache.get_generic_component(component_id)
def process(self, pipeline) -> PipelineProcessorResponse:
raise NotImplementedError()
def export(self, pipeline, pipeline_export_format, pipeline_export_path, overwrite):
raise NotImplementedError()
def log_pipeline_info(self, pipeline_name: str, action_clause: str, **kwargs):
if self.enable_pipeline_info:
duration = kwargs.get('duration')
duration_clause = (f'({duration:.3f} secs)' if duration else '')
operation_name = kwargs.get('operation_name')
op_clause = (f":'{operation_name}'" if operation_name else '')
self.log.info(f"{self._name} '{pipeline_name}'{op_clause} - {action_clause} {duration_clause}")
def _propagate_operation_inputs_outputs(pipeline: Pipeline, sorted_operations: List[Operation]) -> None:
for operation in sorted_operations:
parent_io = set()
for parent_operation_id in operation.parent_operation_ids:
parent_operation = pipeline.operations[parent_operation_id]
if parent_operation.inputs:
parent_io.update(parent_operation.inputs)
if parent_operation.outputs:
parent_io.update(parent_operation.outputs)
if parent_io:
parent_io.update(operation.inputs)
operation.inputs = list(parent_io)
def _sort_operations(operations_by_id: dict) -> List[Operation]:
ordered_operations = []
for operation in operations_by_id.values():
PipelineProcessor._sort_operation_dependencies(operations_by_id, ordered_operations, operation)
return ordered_operations
def _sort_operation_dependencies(operations_by_id: dict, ordered_operations: list, operation: Operation) -> None:
if (operation not in ordered_operations):
for parent_operation_id in operation.parent_operation_ids:
parent_operation = operations_by_id[parent_operation_id]
if (parent_operation not in ordered_operations):
PipelineProcessor._sort_operation_dependencies(operations_by_id, ordered_operations, parent_operation)
ordered_operations.append(operation) |
def password_set(username, password):
backend = default_backend()
salt = os.urandom(16)
kdf = Scrypt(salt=salt, length=32, n=(2 ** 14), r=8, p=1, backend=backend)
key = kdf.derive(password.encode())
conn = sqlite3.connect('db_users.sqlite')
conn.set_trace_callback(print)
conn.row_factory = sqlite3.Row
c = conn.cursor()
print('Changing password for', username)
c.execute('UPDATE users SET password = ?, salt = ? WHERE username = ?', (hexlify(key).decode(), hexlify(salt).decode(), username))
conn.commit() |
def pytest_generate_tests(metafunc):
errors_shapes_and_axes = [((10,), (0,)), ((11,), (0,)), ((9000,), (0,)), ((9001,), (0,)), ((128, 60), (0, 1)), ((127, 60), (0, 1)), ((127, 61), (0, 1)), ((100, 80, 60), (0, 1, 2)), ((101, 80, 61), (0, 1, 2)), ((101, 80, 61), (0, 2)), ((20, 31, 80, 61), (0, 2))]
perf_shapes = [((2 ** 4),), ((2 ** 18),), ((2 ** 4), (2 ** 4)), ((2 ** 9), (2 ** 9))]
perf_even_shapes_and_axes = []
perf_odd_shapes_and_axes = []
mem_limit = (2 ** 22)
for contigous in (True, False):
for shape in perf_shapes:
batch = (mem_limit // product(shape))
if contigous:
full_shape = ((batch,) + shape)
axes = tuple(range(1, (len(shape) + 1)))
else:
full_shape = (shape + (batch,))
axes = tuple(range(0, len(shape)))
perf_even_shapes_and_axes.append((full_shape, axes))
full_shape = list(full_shape)
for axis in axes:
full_shape[axis] -= 1
perf_odd_shapes_and_axes.append((tuple(full_shape), axes))
idgen = (lambda pair: ((str(pair[0]) + '_over_') + str(pair[1])))
if ('errors_shape_and_axes' in metafunc.fixturenames):
metafunc.parametrize('errors_shape_and_axes', errors_shapes_and_axes, ids=list(map(idgen, errors_shapes_and_axes)))
elif ('perf_even_shape_and_axes' in metafunc.fixturenames):
metafunc.parametrize('perf_even_shape_and_axes', perf_even_shapes_and_axes, ids=list(map(idgen, perf_even_shapes_and_axes)))
elif ('perf_odd_shape_and_axes' in metafunc.fixturenames):
metafunc.parametrize('perf_odd_shape_and_axes', perf_odd_shapes_and_axes, ids=list(map(idgen, perf_odd_shapes_and_axes))) |
def flatten(elements: List, logger=print) -> Etype:
is_json = re.compile('.*\\.json')
all_preds = [next(filter(is_json.match, [str(x) for x in x.paths])) for x in elements]
all_preds = [open_json(x) for x in all_preds]
preds = [x.get('labels') for x in all_preds if (isinstance(x, dict) and (x.get('labels') is not None))]
vls = [[(label, el_preds[label]) for label in el_preds.keys()] for el_preds in preds]
vls = [(x[0].id, x[1]) for x in zip(elements, vls)]
label_in_els = [(x[0], y[0], y[1]['frames'], y[1]['scores']) for x in vls for y in x[1]]
frames = [render_frame(x[0], x[1], y[0], y[1]) for x in label_in_els for y in zip(x[2], x[3])]
output = (WK_DIR / 'flattened.json')
if (not os.path.exists(WK_DIR)):
os.makedirs(WK_DIR)
with open(output, 'w') as f:
json.dump(frames, f)
logger('All frames aggregated, printed to flattened.json')
return Etype.Json('__FLATTENED', output) |
class OptionSeriesPyramidSonificationDefaultspeechoptionsMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
_routes.route('/<string:event_identifier>/sessions/languages', methods=['GET'])
_event_id
def get_languages(event_id):
language_list = list(zip(*db.session.query(Session.language).distinct().filter((Session.event_id == event_id), (Session.language != None)).order_by(asc(Session.language)).all()))
languages = list(map(str, (language_list[0] if language_list else [])))
return jsonify(languages) |
def main(args):
files = args.files
print(f' Parsed Arguments ')
print(f"Files: {', '.join(files)}")
print()
for file in files:
if exists(file):
exif = getExif(file)
hashes = getHashes(file)
outputType = getOutputType(file)
pdb = getPDB(file)
output = f'''{exif}
MD5: {hashes[0]}
SHA1: {hashes[1]}
SHA256: {hashes[2]}
Output Type: {outputType}'''
if pdb:
output += f'''
{pdb}'''
else:
output = f"[-] '{file}' does not exist"
print(f' {file} ')
print(output) |
def extractDawningHowls(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('Dragon Flies Phoenix Dances' in item['tags']):
return buildReleaseMessageWithType(item, 'Dragon Flies Phoenix Dances', vol, chp, frag=frag, postfix=postfix)
if ('Eastern Palace' in item['tags']):
return buildReleaseMessageWithType(item, 'Eastern Palace', vol, chp, frag=frag, postfix=postfix)
return False |
class NewtonRootInverseTest(unittest.TestCase):
def _test_newton_root_inverse(self, A, root, epsilon, max_iterations, A_tol, M_tol) -> None:
(X, M, flag, iteration, M_error) = _matrix_inverse_root_newton(A, root, epsilon, max_iterations, M_tol)
abs_A_error = torch.dist(torch.linalg.matrix_power(X, (- root)), A, p=torch.inf)
A_norm = torch.linalg.norm(A, ord=torch.inf)
rel_A_error = (abs_A_error / torch.maximum(torch.tensor(1.0), A_norm))
self.assertLessEqual(M_error, M_tol)
self.assertLessEqual(rel_A_error, A_tol)
def _test_newton_root_inverse_multi_dim(self, A, dims, roots, epsilons, max_iterations, A_tol, M_tol) -> None:
for (n, root, epsilon) in itertools.product(dims, roots, epsilons):
with self.subTest(f'With dim = {n}, root = {root}, epsilon = {epsilon}'):
self._test_newton_root_inverse(A(n), root, epsilon, max_iterations, A_tol, M_tol)
def test_newton_root_inverse_identity(self) -> None:
A_tol = 1e-06
M_tol = 1e-06
max_iterations = 1000
dims = [10, 100]
roots = [2, 4, 8]
epsilons = [0.0]
def A(n):
return torch.eye(n)
self._test_newton_root_inverse_multi_dim(A, dims, roots, epsilons, max_iterations, A_tol, M_tol)
def test_newton_root_inverse_tridiagonal_1(self) -> None:
A_tol = 0.0001
M_tol = 1e-06
max_iterations = 1000
dims = [10, 100]
roots = [2, 4, 8]
epsilons = [0.0]
for (alpha, beta) in itertools.product([0.001, 0.01, 0.1, 1.0, 10.0, 100.0], repeat=2):
if ((2 * beta) > alpha):
continue
with self.subTest(f'Test with alpha = {alpha}, beta = {beta}'):
def A(n):
diag = (alpha * torch.ones(n))
diag[0] += beta
diag[(n - 1)] += beta
off_diag = (beta * torch.ones((n - 1)))
return ((torch.diag(diag) + torch.diag(off_diag, diagonal=1)) + torch.diag(off_diag, diagonal=(- 1)))
self._test_newton_root_inverse_multi_dim(A, dims, roots, epsilons, max_iterations, A_tol, M_tol)
def test_newton_root_inverse_tridiagonal_2(self) -> None:
A_tol = 0.0001
M_tol = 1e-06
max_iterations = 1000
dims = [10, 100]
roots = [2, 4, 8]
epsilons = [0.0]
for (alpha, beta) in itertools.product([0.001, 0.01, 0.1, 1.0, 10.0, 100.0], repeat=2):
if ((2 * beta) > alpha):
continue
with self.subTest(f'Test with alpha = {alpha}, beta = {beta}'):
def A(n):
diag = (alpha * torch.ones(n))
diag[0] -= beta
off_diag = (beta * torch.ones((n - 1)))
return ((torch.diag(diag) + torch.diag(off_diag, diagonal=1)) + torch.diag(off_diag, diagonal=(- 1)))
self._test_newton_root_inverse_multi_dim(A, dims, roots, epsilons, max_iterations, A_tol, M_tol) |
def export_tags(exaile):
uri = dialogs.save(parent=exaile.gui.main.window, output_fname='tags.json', output_setting='plugin/grouptagger/export_dir', title=_('Export tags to JSON'), extensions={'.json': 'grouptagger JSON export'})
if (uri is not None):
trackdata = {}
for strack in search.search_tracks_from_string(exaile.collection, ''):
track = strack.track
tags = list(sorted(get_track_groups(track)))
if tags:
trackdata[track.get_loc_for_io()] = tags
data = {'_meta': {'date': time.strftime('%Y-%m-%d %H:%M:%S'), 'exporter': ('Exaile/%s' % exaile.get_version()), 'version': 1}, 'tracks': trackdata}
with GioFileOutputStream(Gio.File.new_for_uri(uri), 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4, separators=(',', ': '))
logger.info('Exported tags of %s tracks to %s', len(trackdata), uri) |
class OptionSeriesBulletSonificationTracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class DPRoundReducer(RoundReducer):
def __init__(self, *, global_model: IFLModel, num_users_per_round: int, total_number_of_users: int, channel: Optional[IdentityChannel]=None, name: Optional[str]=None, **kwargs):
init_self_cfg(self, component_class=__class__, config_class=DPRoundReducerConfig, **kwargs)
super().__init__(global_model=global_model, num_users_per_round=num_users_per_round, total_number_of_users=total_number_of_users, channel=channel, name=name, **kwargs)
self.num_users_per_round = num_users_per_round
self.privacy_on = ((self.cfg.privacy_setting.noise_multiplier >= 0) and (self.cfg.privacy_setting.clipping.clipping_value < float('inf')))
self.clipping_value = self.cfg.privacy_setting.clipping.clipping_value
self.user_update_clipper = UserUpdateClipper(max_norm=self.clipping_value, precision=self.dtype)
if self.privacy_on:
self.privacy_engine: IPrivacyEngine = PrivacyEngineFactory.create(self.cfg.privacy_setting, num_users_per_round, total_number_of_users, noise_type=self.cfg.noise_type)
self.privacy_engine.attach(global_model=self.ref_model.fl_get_module())
self._privacy_budget = PrivacyBudget(delta=self.cfg.privacy_setting.target_delta)
def _set_defaults_in_cfg(cls, cfg):
pass
def update_reduced_module(self, delta_module: nn.Module, weight: float) -> None:
if self.privacy_on:
self.user_update_clipper.clip(delta_module)
super().update_reduced_module(delta_module, weight)
def reduce(self) -> Tuple[(nn.Module, float)]:
if (not self.privacy_on):
return super().reduce()
self._reduce_all(OperationType.SUM)
self.logger.debug(f'Sum of weights after aggregation: {self.sum_weights}')
if FLDistributedUtils.is_master_worker():
total_weights = float(self.sum_weights.item())
if (abs((total_weights - self.num_users_per_round)) > 1e-05):
self.logger.error(f'total weights {total_weights} is not equal to number of users {self.num_users_per_round}. Please make sure reduction_type=AVERAGE.')
"\n The final amount of noise added must be equal to\n (max_norm * noise_multiplier) / users_per_round, similar to\n Google's user-level DP Note that in the _generate_noise() function, the noise_multiplier\n is already multiplied.\n "
self.privacy_engine.add_noise(self.reduced_module, (self.clipping_value / total_weights))
state_dict = FLModelParamUtils.get_state_dict(self.reduced_module, only_federated_params=self.cfg.only_federated_params)
FLDistributedUtils.distributed_operation(chain([self.sum_weights], state_dict.values()), OperationType.BROADCAST)
self.logger.debug(f'Sum of client weights after reduction on worker: {self.sum_weights}')
self._privacy_budget = self.privacy_engine.get_privacy_spent()
self.logger.info(f'User Privacy Budget: {self._privacy_budget}')
return (self.reduced_module, float(self.sum_weights.item()))
def privacy_budget(self) -> PrivacyBudget:
return self._privacy_budget
def reset(self, ref_model: IFLModel) -> None:
super().reset(ref_model)
if self.privacy_on:
self.privacy_engine.attach(global_model=self.ref_model.fl_get_module()) |
class KiwoomOpenApiPlusConditionEventHandler(KiwoomOpenApiPlusEventHandlerForGrpc, Logging):
def __init__(self, control, request, context, screen_manager):
super().__init__(control, context)
self._request = request
self._screen_manager = screen_manager
self._screen_no = request.screen_no
self._condition_name = request.condition_name
self._condition_index = request.condition_index
self._search_type = request.search_type
self._request_name = (request.request_name or '')
self._with_info = request.flags.with_info
self._is_future_option = request.flags.is_future_option
self._type_flag = (3 if self._is_future_option else 0)
self._trcode = {0: 'OPTKWFID', 3: 'OPTFOFID'}[self._type_flag]
self._trinfo = KiwoomOpenApiPlusTrInfo.get_trinfo_by_code(self._trcode)
if (self._trinfo is None):
self.logger.error('Cannot find names for trcode %s', self._trinfo)
self._single_names = self._trinfo.get_single_output_names()
self._multi_names = self._trinfo.get_multi_output_names()
def on_enter(self):
self.control.EnsureConditionLoaded()
condition_names = self.control.GetConditionNameListAsList()
assert ((self._condition_index, self._condition_name) in condition_names)
self._screen_no = self._screen_manager.borrow_screen(self._screen_no)
self.add_callback(self._screen_manager.return_screen, self._screen_no)
self.add_callback(self.control.DisconnectRealData, self._screen_no)
self.add_callback(self.control.SendConditionStop, self._screen_no, self._condition_name, self._condition_index)
KiwoomOpenApiPlusError.try_or_raise_boolean(self.control.RateLimitedSendCondition.queuedCall(self._screen_no, self._condition_name, self._condition_index, self._search_type), 'Failed to send condition', except_callback=self.observer.on_error)
def OnReceiveTrCondition(self, scrnno, codelist, condition_name, condition_index, prevnext):
if ((scrnno, condition_name, condition_index) == (self._screen_no, self._condition_name, self._condition_index)):
response = KiwoomOpenApiPlusService_pb2.ListenResponse()
response.name = 'OnReceiveTrCondition'
response.arguments.add().string_value = scrnno
response.arguments.add().string_value = codelist
response.arguments.add().string_value = condition_name
response.arguments.add().long_value = condition_index
response.arguments.add().long_value = prevnext
self.observer.on_next(response)
if self._with_info:
if ('^' in codelist):
items = string_to_list(codelist, sep=';')
items = [string_to_list(item, sep='^') for item in items]
items = [tuple(item) for item in items]
codes = [item[0] for item in items]
else:
codes = string_to_list(codelist, sep=';')
KiwoomOpenApiPlusError.try_or_raise(self.control.RateLimitedCommKwRqData.queuedCall(codelist, 0, len(codes), self._type_flag, self._request_name, self._screen_no), except_callback=self.observer.on_error)
should_continue = (str(prevnext) not in ['', '0'])
should_not_complete = ((self._search_type == 1) or self._with_info or should_continue)
should_complete = (not should_not_complete)
if should_complete:
self.observer.on_completed()
return
elif should_continue:
try:
raise KiwoomOpenApiPlusError('Should not reach here')
self.control.RateLimitedSendCondition.queuedCall(self._screen_no, self._condition_name, self._condition_index, int(prevnext))
except KiwoomOpenApiPlusError as e:
self.observer.on_error(e)
return
def OnReceiveRealCondition(self, code, condition_type, condition_name, condition_index):
if ((condition_name, int(condition_index)) == (self._condition_name, self._condition_index)):
response = KiwoomOpenApiPlusService_pb2.ListenResponse()
response.name = 'OnReceiveRealCondition'
response.arguments.add().string_value = code
response.arguments.add().string_value = condition_type
response.arguments.add().string_value = condition_name
response.arguments.add().string_value = condition_index
self.observer.on_next(response)
if self._with_info:
codelist = code
codes = [code]
KiwoomOpenApiPlusError.try_or_raise(self.control.RateLimitedCommKwRqData.queuedCall(codelist, 0, len(codes), self._type_flag, self._request_name, self._screen_no), except_callback=self.observer.on_error)
def OnReceiveTrData(self, scrnno, rqname, trcode, recordname, prevnext, _datalength, _errorcode, _message, _splmmsg):
if ((scrnno, rqname) == (self._screen_no, self._request_name)):
response = KiwoomOpenApiPlusService_pb2.ListenResponse()
response.name = 'OnReceiveTrData'
response.arguments.add().string_value = scrnno
response.arguments.add().string_value = rqname
response.arguments.add().string_value = trcode
response.arguments.add().string_value = recordname
response.arguments.add().string_value = prevnext
should_continue = (str(prevnext) not in ['', '0'])
should_not_complete = ((self._search_type == 1) or should_continue)
should_complete = (not should_not_complete)
repeat_cnt = self.control.GetRepeatCnt(trcode, recordname)
assert (trcode.upper() == self._trcode)
if (repeat_cnt > 0):
if (len(self._multi_names) == 0):
self.logger.warning('Repeat count greater than 0, but no multi data names available, fallback to sigle data names')
multi_names = self._multi_names
self._multi_names = self._single_names
self._single_names = multi_names
if (len(self._multi_names) > 0):
rows = [[self.control.GetCommData(trcode, recordname, i, name).strip() for name in self._multi_names] for i in range(repeat_cnt)]
response.multi_data.names.extend(self._multi_names)
for row in rows:
response.multi_data.values.add().values.extend(row)
if (len(self._single_names) > 0):
values = [self.control.GetCommData(trcode, recordname, 0, name).strip() for name in self._single_names]
response.single_data.names.extend(self._single_names)
response.single_data.values.extend(values)
self.observer.on_next(response)
if should_complete:
self.observer.on_completed()
return
elif should_continue:
try:
raise KiwoomOpenApiPlusError('Should not reach here')
KiwoomOpenApiPlusError.try_or_raise(self.control.RateLimitedCommKwRqData.queuedCall(self._codelist, int(prevnext), len(self._codes), (3 if self._is_future_option else 0), self._request_name, self._screen_no), except_callback=self.observer.on_error)
except KiwoomOpenApiPlusError as e:
self.observer.on_error(e)
return |
def main():
a = ArgumentParser()
a.add_argument('-a', '--analyser', metavar='FSAFILE', required=True, help='load analyser from FSAFILE')
a.add_argument('-g', '--generator', metavar='FSAFILE', required=True, help='load analyser from FSAFILE')
a.add_argument('-i', '--input', metavar='INFILE', type=open, dest='infile', help='source of analysis data')
a.add_argument('-o', '--output', metavar='OUTFILE', type=FileType('w'), dest='outfile', help='log outputs to OUTFILE')
a.add_argument('-X', '--statistics', metavar='STATFILE', type=FileType('w'), dest='statfile', help='statistics')
a.add_argument('-v', '--verbose', action='store_true', default=False, help='Print verbosely while processing')
a.add_argument('-C', '--no-casing', action='store_true', default=False, help='Do not try to recase input and output when matching')
a.add_argument('-t', '--threshold', metavar='THOLD', default=99, help='if coverage is less than THOLD exit with error')
a.add_argument('-F', '--format', metavar='FMT', required=True, help='which SIGMORHON shared task format is used')
options = a.parse_args()
omorfi = Omorfi(options.verbose)
try:
if options.analyser:
if options.verbose:
print('reading analyser from', options.analyser)
omorfi.load_analyser(options.analyser)
if options.generator:
if options.verbose:
print('reading generator from', options.generator)
omorfi.load_generator(options.generator)
if (not options.infile):
options.infile = stdin
print('reading from <stdin>')
if (not options.statfile):
options.statfile = stdout
if (not options.outfile):
options.outfile = stdout
except IOError:
print('Could not process file', options.analyser, file=stderr)
exit(2)
correct = 0
incorrect = 0
oov = 0
lines = 0
realstart = perf_counter()
cpustart = process_time()
for line in options.infile:
fields = line.strip().split('\t')
if (len(fields) < 3):
print('ERROR: Skipping line', fields, file=stderr)
continue
omors = None
lemma = None
print('<<<', fields)
if (options.format == '1'):
lemma = fields[0]
omors = unimorph2omor(fields[1])
elif (options.format == '2'):
srcomors = unimorph2omor(fields[0])
srchyps = omorfi.analyse(fields[1])
for srchyp in srchyps:
if ((srcomors in srchyp.raw) and (len(srchyp.get_lemmas()) == 1)):
lemma = srchyp.get_lemmas()[0]
if (not lemma):
lemma = ''.join(srchyps[0].get_lemmas())
omors = unimorph2omor(fields[2])
elif (options.format == '3'):
srchyps = omorfi.analyse(fields[0])
for srchyp in srchyps:
if (len(srchyp.get_lemmas()) == 1):
lemma = srchyp.get_lemmas()[0]
if (not lemma):
lemma = ''.join(srchyps[0].get_lemmas())
omors = unimorph2omor(fields[1])
else:
print('format fail', options.format)
exit(1)
genomor = ((('[WORD_ID=' + lemma) + ']') + omors)
print('>>> ', genomor)
generations = omorfi.generate(genomor)
if ((not generations) or ('[' in generations)):
oov += 1
genat1 = lemma
print('OOV', genat1)
else:
genat1 = generations.split('/')[0]
print(' ', genat1)
if (options.format == '1'):
if (genat1 == fields[2]):
correct += 1
else:
print('MIS', genat1, '!=', fields[2])
incorrect += 1
elif (options.format == '2'):
if (genat1 == fields[3]):
correct += 1
else:
print('MIS', genat1, '!=', fields[2])
incorrect += 1
elif (options.format == '3'):
if (genat1 == fields[2]):
correct += 1
else:
print('MIS', genat1, '!=', fields[2])
incorrect += 1
lines += 1
if (options.verbose and ((lines % 1000) == 0)):
print(lines, '...')
realend = perf_counter()
cpuend = process_time()
print('CPU time:', (cpuend - cpustart), 'real time:', (realend - realstart))
if (lines == 0):
print('Needs more than 0 lines to determine something', file=stderr)
exit(2)
print('Lines', 'Corect', 'OOV', sep='\t', file=options.statfile)
print(lines, correct, oov, sep='\t', file=options.statfile)
print((((lines / lines) * 100) if (lines != 0) else 0), (((correct / lines) * 100) if (lines != 0) else 0), ((oov / lines) * 100), sep='\t', file=options.statfile)
exit(0) |
class PackageGetter(object):
def get(self):
pass
def get_name_version(self):
pass
def __del__(self):
if (hasattr(self, 'temp_dir') and os.path.exists(self.temp_dir)):
shutil.rmtree(self.temp_dir)
def save_dir_init(self, save_dir):
self.save_dir = (save_dir or settings.DEFAULT_PKG_SAVE_PATH)
if (self.save_dir == settings.DEFAULT_PKG_SAVE_PATH):
self.save_dir += '/SOURCES'
if (not os.path.exists(self.save_dir)):
if (self.save_dir != (settings.DEFAULT_PKG_SAVE_PATH + '/SOURCES')):
os.makedirs(self.save_dir)
else:
try:
subprocess.Popen('rpmdev-setuptree', stdout=subprocess.PIPE).wait()
logger.info('Using rpmdevtools package to make rpmbuild folders tree.')
except OSError:
self.save_dir = '/tmp'
logger.warning('Package rpmdevtools is missing , using default folder: {0} to store {1}.'.format(self.save_dir, self.name))
logger.warning('Specify folder to store a file (SAVE_DIR) or install rpmdevtools.')
logger.info('Using {0} as directory to save source.'.format(self.save_dir)) |
def test_shp_to_shp_records_geom_type_is_multilinestring(create_input_file, create_output_centerline_file):
EXPECTED_TYPE = 'MultiLineString'
input_polygon_shp = create_input_file('polygons', 'shp')
output_centerline_shp = create_output_centerline_file('shp')
runner = CliRunner()
runner.invoke(create_centerlines, [input_polygon_shp, output_centerline_shp])
with fiona.open(output_centerline_shp) as dst:
for record in dst:
assert (record.get('geometry').get('type') == EXPECTED_TYPE) |
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate tilegrid.json from bitstream deltas')
parser.add_argument('--verbose', action='store_true', help='')
parser.add_argument('--out', default='/dev/stdout', help='Output JSON')
parser.add_argument('--tiles', default='tiles.txt', help='Input tiles.txt tcl output', required=True)
parser.add_argument('--pin_func', help='List of sites with pin functions', required=True)
args = parser.parse_args()
run(args.tiles, args.pin_func, args.out, verbose=args.verbose) |
def join_data():
from .misc import data_preprocessing
print('Joining data')
sys.stdout.write(' data subfolder [optional, default=data]? ')
folder = input()
if (len(folder) <= 0):
folder = 'data'
start_with = input(' data files start with [optional]? ')
d = data_preprocessing.Aggregator(folder, start_with)
return d |
def get_bert(norm_text, word2ph, language, device, style_text, style_weight):
from .chinese_bert import get_bert_feature as zh_bert
from .english_bert_mock import get_bert_feature as en_bert
from .japanese_bert import get_bert_feature as jp_bert
lang_bert_func_map = {'ZH': zh_bert, 'EN': en_bert, 'JP': jp_bert}
bert = lang_bert_func_map[language](norm_text, word2ph, device, style_text, style_weight)
return bert |
class OptionSeriesBoxplotSonificationDefaultinstrumentoptionsMappingPlaydelay(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class _ReplyToMessage(_Message):
def __init__(self, *message_ids: Optional[int]):
self.message_ids = message_ids
def filter(self, event: EventCommon):
if (not super().filter(event)):
return False
message = cast(MessageEdited.Event, event).message
if (message.reply_to_msg_id is None):
return False
if self.message_ids:
return (message.reply_to_msg_id in self.message_ids)
return True
def __repr__(self):
return f'ReplyTo({self.message_ids})' |
class TicketLog(SimpleEntity):
from stalker import defaults
__tablename__ = 'TicketLogs'
__mapper_args__ = {'polymorphic_identity': 'TicketLog'}
ticket_log_id = Column('id', ForeignKey('SimpleEntities.id'), primary_key=True)
from_status_id = Column(Integer, ForeignKey('Statuses.id'))
to_status_id = Column(Integer, ForeignKey('Statuses.id'))
from_status = relationship('Status', primaryjoin='TicketLogs.c.from_status_id==Statuses.c.id')
to_status = relationship('Status', primaryjoin='TicketLogs.c.to_status_id==Statuses.c.id')
action = Column(Enum(*defaults.ticket_workflow.keys(), name='TicketActions'))
ticket_id = Column(Integer, ForeignKey('Tickets.id'))
ticket = relationship('Ticket', primaryjoin='TicketLogs.c.ticket_id==Tickets.c.id', back_populates='logs')
def __init__(self, ticket=None, from_status=None, to_status=None, action=None, **kwargs):
kwargs['name'] = ('TicketLog_' + uuid.uuid4().hex)
super(TicketLog, self).__init__(**kwargs)
self.ticket = ticket
self.from_status = from_status
self.to_status = to_status
self.action = action |
.integrationtest
.skipif((not has_postgres_configured), reason='PostgresSQL not configured')
def test_psycopg_rows_affected(instrument, postgres_connection, elasticapm_client):
cursor = postgres_connection.cursor()
try:
elasticapm_client.begin_transaction('web.django')
cursor.execute("INSERT INTO test VALUES (4, 'four')")
cursor.execute('SELECT * FROM test')
cursor.execute("UPDATE test SET name = 'five' WHERE id = 4")
cursor.execute('DELETE FROM test WHERE id = 4')
elasticapm_client.end_transaction(None, 'test-transaction')
finally:
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert (spans[0]['name'] == 'INSERT INTO test')
assert (spans[0]['context']['db']['rows_affected'] == 1)
assert (spans[1]['name'] == 'SELECT FROM test')
assert ('rows_affected' not in spans[1]['context']['db'])
assert (spans[2]['name'] == 'UPDATE test')
assert (spans[2]['context']['db']['rows_affected'] == 1)
assert (spans[3]['name'] == 'DELETE FROM test')
assert (spans[3]['context']['db']['rows_affected'] == 1) |
class OptionSeriesColumnDatasorting(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def matchByName(self):
return self._config_get(None)
def matchByName(self, flag: bool):
self._config(flag, js_type=False)
def sortKey(self):
return self._config_get('y')
def sortKey(self, text: str):
self._config(text, js_type=False) |
class BuildCOLRTest(object):
def test_automatic_version_all_solid_color_glyphs(self):
colr = builder.buildCOLR({'a': [('b', 0), ('c', 1)]})
assert (colr.version == 0)
assert hasattr(colr, 'ColorLayers')
assert (colr.ColorLayers['a'][0].name == 'b')
assert (colr.ColorLayers['a'][1].name == 'c')
def test_automatic_version_no_solid_color_glyphs(self):
colr = builder.buildCOLR({'a': (ot.PaintFormat.PaintColrLayers, [(ot.PaintFormat.PaintGlyph, {'Format': int(ot.PaintFormat.PaintRadialGradient), 'ColorLine': {'ColorStop': [(0.0, 0), (1.0, 1)], 'Extend': 'repeat'}, 'x0': 1, 'y0': 0, 'x1': 10, 'y1': 0, 'r0': 4, 'r1': 2}, 'b'), (ot.PaintFormat.PaintGlyph, {'Format': ot.PaintFormat.PaintSolid, 'PaletteIndex': 2, 'Alpha': 0.8}, 'c')]), 'd': (ot.PaintFormat.PaintColrLayers, [{'Format': ot.PaintFormat.PaintGlyph, 'Glyph': 'e', 'Paint': {'Format': ot.PaintFormat.PaintLinearGradient, 'ColorLine': {'ColorStop': [(0.0, 2), (1.0, 3)], 'Extend': 'reflect'}, 'x0': 1, 'y0': 2, 'x1': 3, 'y1': 4, 'x2': 2, 'y2': 2}}])})
assertIsColrV1(colr)
assert (colr.table.BaseGlyphRecordCount == 0)
assert (colr.table.BaseGlyphRecordArray is None)
assert (colr.table.LayerRecordCount == 0)
assert (colr.table.LayerRecordArray is None)
def test_automatic_version_mixed_solid_and_gradient_glyphs(self):
colr = builder.buildCOLR({'a': [('b', 0), ('c', 1)], 'd': (ot.PaintFormat.PaintColrLayers, [(ot.PaintFormat.PaintGlyph, {'Format': ot.PaintFormat.PaintLinearGradient, 'ColorLine': {'ColorStop': [(0.0, 2), (1.0, 3)]}, 'x0': 1, 'y0': 2, 'x1': 3, 'y1': 4, 'x2': 2, 'y2': 2}, 'e'), (ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 2, 0.8), 'f')])})
assertIsColrV1(colr)
assert (colr.table.VarStore is None)
assert (colr.table.BaseGlyphRecordCount == 1)
assert isinstance(colr.table.BaseGlyphRecordArray, ot.BaseGlyphRecordArray)
assert (colr.table.LayerRecordCount == 2)
assert isinstance(colr.table.LayerRecordArray, ot.LayerRecordArray)
assert isinstance(colr.table.BaseGlyphList, ot.BaseGlyphList)
assert (colr.table.BaseGlyphList.BaseGlyphCount == 1)
assert isinstance(colr.table.BaseGlyphList.BaseGlyphPaintRecord[0], ot.BaseGlyphPaintRecord)
assert (colr.table.BaseGlyphList.BaseGlyphPaintRecord[0].BaseGlyph == 'd')
assert isinstance(colr.table.LayerList, ot.LayerList)
assert (colr.table.LayerList.Paint[0].Glyph == 'e')
def test_explicit_version_0(self):
colr = builder.buildCOLR({'a': [('b', 0), ('c', 1)]}, version=0)
assert (colr.version == 0)
assert hasattr(colr, 'ColorLayers')
def test_explicit_version_1(self):
colr = builder.buildCOLR({'a': (ot.PaintFormat.PaintColrLayers, [(ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 0), 'b'), (ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 1), 'c')])}, version=1)
assert (colr.version == 1)
assert (not hasattr(colr, 'ColorLayers'))
assert hasattr(colr, 'table')
assert isinstance(colr.table, ot.COLR)
assert (colr.table.VarStore is None)
def test_paint_one_colr_layers(self):
colr = builder.buildCOLR({'a': (ot.PaintFormat.PaintColrLayers, [(ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 0), 'b')])})
assert (colr.table.LayerList is None), 'PaintColrLayers should be gone'
assert (colr.table.BaseGlyphList.BaseGlyphCount == 1)
paint = colr.table.BaseGlyphList.BaseGlyphPaintRecord[0].Paint
assert (paint.Format == ot.PaintFormat.PaintGlyph)
assert (paint.Paint.Format == ot.PaintFormat.PaintSolid)
def test_build_clip_list(self):
colr = builder.buildCOLR({'a': (ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 0), 'b'), 'c': (ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 1), 'd')}, clipBoxes={'a': (0, 0, 1000, 1000, 0), 'c': ((- 100.8), (- 200.4), 1100.1, 1200.5), 'e': (0, 0, 10, 10)})
assert (colr.table.ClipList.Format == 1)
clipBoxes = colr.table.ClipList.clips
assert ([(baseGlyph, clipBox.as_tuple()) for (baseGlyph, clipBox) in clipBoxes.items()] == [('a', (0, 0, 1000, 1000, 0)), ('c', ((- 101), (- 201), 1101, 1201)), ('e', (0, 0, 10, 10))])
assert (clipBoxes['a'].Format == 2)
assert (clipBoxes['c'].Format == 1)
assert (clipBoxes['e'].Format == 1)
def test_duplicate_base_glyphs(self):
layers = {'Format': ot.PaintFormat.PaintColrLayers, 'Layers': [(ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 0), 'd'), (ot.PaintFormat.PaintGlyph, (ot.PaintFormat.PaintSolid, 1), 'e')]}
colr = builder.buildCOLR({'a': layers, 'b': deepcopy(layers), 'c': deepcopy(layers)}).table
baseGlyphs = colr.BaseGlyphList.BaseGlyphPaintRecord
assert (len(baseGlyphs) == 3)
assert (baseGlyphs[0].BaseGlyph == 'a')
assert (baseGlyphs[1].BaseGlyph == 'b')
assert (baseGlyphs[2].BaseGlyph == 'c')
expected = {'Format': 1, 'FirstLayerIndex': 0, 'NumLayers': 2}
assert (baseGlyphs[0].Paint.__dict__ == expected)
assert (baseGlyphs[1].Paint.__dict__ == expected)
assert (baseGlyphs[2].Paint.__dict__ == expected) |
class Extractor():
def __init__(self, extractor_list):
def proc_instance(instance):
if (instance is not None):
self.log.info('Extractor initialized: %s', extractor)
self.extractor_list.append(instance)
else:
self.log.error('Misconfiguration: An unknown Extractor was found and will be ignored: %s', extractor)
self.log = logging.getLogger(__name__)
self.extractor_list = []
for extractor in extractor_list:
if isinstance(extractor, tuple):
extractor_module = extractor[0]
else:
extractor_module = extractor
module = importlib.import_module(((__package__ + '.extractors.') + extractor_module))
if isinstance(extractor, tuple):
proc_instance(getattr(module, extractor[1], None)())
else:
for member in inspect.getmembers(module, inspect.isclass):
if (issubclass(member[1], AbstractExtractor) and (member[0] != 'AbstractExtractor')):
proc_instance(getattr(module, member[0], None)())
self.cleaner = Cleaner()
self.comparer = Comparer()
def extract(self, item):
article_candidates = []
for extractor in self.extractor_list:
article_candidate = extractor.extract(item)
article_candidates.append(article_candidate)
article_candidates = self.cleaner.clean(article_candidates)
article = self.comparer.compare(item, article_candidates)
item['article_title'] = article.title
item['article_description'] = article.description
item['article_text'] = article.text
item['article_image'] = article.topimage
item['article_author'] = article.author
item['article_publish_date'] = article.publish_date
item['article_language'] = article.language
return item |
class OptionSeriesTimelineSonificationTracksMappingLowpass(Options):
def frequency(self) -> 'OptionSeriesTimelineSonificationTracksMappingLowpassFrequency':
return self._config_sub_data('frequency', OptionSeriesTimelineSonificationTracksMappingLowpassFrequency)
def resonance(self) -> 'OptionSeriesTimelineSonificationTracksMappingLowpassResonance':
return self._config_sub_data('resonance', OptionSeriesTimelineSonificationTracksMappingLowpassResonance) |
class GeoLeaflet():
def __init__(self, ui):
self.page = ui.page
self.chartFamily = 'Leaflet'
def map(self, record=None, y_column: list=None, x_axis: str=None, profile: Union[(dict, bool)]=None, options: dict=None, width: Union[(int, tuple)]=(100, '%'), height: Union[(int, tuple)]=(Defaults_html.CHARTS_HEIGHT_PX, 'px'), html_code: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
return geo.GeoLeaflet.GeoLeaflet(self.page, width, height, html_code, (options or {}), profile)
def europe(self, record=None, y_column: list=None, x_axis: str=None, profile: Union[(dict, bool)]=None, options: dict=None, width: Union[(int, tuple)]=(100, '%'), height: Union[(int, tuple)]=(Defaults_html.CHARTS_HEIGHT_PX, 'px'), html_code: str=None):
width = Arguments.size(width, unit='%')
height = Arguments.size(height, unit='px')
geo_chart = geo.GeoLeaflet.GeoLeaflet(self.page, width, height, html_code, (options or {}), profile)
return geo_chart |
def test_comp8():
string = write_rpc_request(1, 'initialize', {'rootPath': str(test_dir)})
file_path = ((test_dir / 'subdir') / 'test_inherit.f90')
string += comp_request(file_path, 10, 11)
(errcode, results) = run_request(string, ['--use_signature_help'])
assert (errcode == 0)
exp_results = ([1, 'val', 'REAL(8)'],)
assert (len(exp_results) == (len(results) - 1))
for (i, ref) in enumerate(exp_results):
validate_comp(results[(i + 1)], ref) |
def exposed_retrigger_feed_urls():
urls = set()
with db.session_context() as sess:
processor = WebMirror.processor.RssProcessor.RssProcessor(loggerPath='Main.RssDb', pageUrl=' pgContent='', type='application/atom+xml', transfer=False, debug_print=True, db_sess=sess, write_debug=False)
print('Loading posts....')
items = sess.query(db.RssFeedPost).all()
print(('Loaded %s rows' % len(items)))
have_content = [tmp for tmp in items if tmp.contents]
print(('%s rows have content' % len(have_content)))
pbar = tqdm.tqdm(items, desc='Retriggering RSS URLs')
for post in pbar:
if post.contenturl.startswith('tag:blogger.com'):
continue
if (post.contenturl and (not ('#comment_' in post.contenturl))):
urls.add(post.contenturl)
if (post.contents and (post.contents != 'Disabled?') and (post.contents != 'wat')):
soup = WebRequest.as_soup(post.contents)
soup = urlFuncs.canonizeUrls(soup, post.contenturl)
plainLinks = processor.extractLinks(soup, post.contenturl)
imageLinks = processor.extractImages(soup, post.contenturl)
urls.update(plainLinks)
urls.update(imageLinks)
urls = list(urls)
urld = {}
for url in [tmp for tmp in urls if tmp]:
nl = urllib.parse.urlsplit(url).netloc
if nl:
urld.setdefault(nl, [])
urld[nl].append(url)
print(('Extracted %s unique links for %s netlocs' % (len(urls), len(urld))))
try:
with db.session_context() as sess:
archiver = SiteArchiver(None, sess, None)
for (key, urls) in tqdm.tqdm(urld.items(), desc='Source Netlocs'):
sel_url = urls[0]
parsed = urllib.parse.urlparse(sel_url)
root = urllib.parse.urlunparse((parsed[0], parsed[1], '', '', '', ''))
job = db.WebPages(url=sel_url, starturl=root, netloc=key, distance=0, is_text=True, priority=db.DB_LOW_PRIORITY, type='unknown', fetchtime=datetime.datetime.now())
for chunk in chunks(urls, 500):
archiver.upsertResponseLinks(job, plain=chunk, resource=[], debug=True, interactive=True)
except Exception as e:
traceback.print_exc() |
def upsert_records_with_predicate(source: ETLObjectBase, destination: ETLWritableObjectBase, predicate: list, primary_key: str, is_case_insensitive_pk_match: bool=False) -> int:
insertable_columns = _get_shared_columns((source.columns + list(destination.insert_overrides)), destination.columns)
excluded = SQL(', ').join([SQL('{dest} = {source}').format(dest=Identifier(field), source=(SQL('EXCLUDED.') + Identifier(field))) for field in insertable_columns])
match_expression = Identifier(primary_key)
if is_case_insensitive_pk_match:
match_expression = SQL('upper({primary_key})').format(primary_key=Identifier(primary_key))
upsert_sql_template = '\n INSERT INTO {destination_object_representation} ({insert_columns})\n SELECT {select_columns}\n FROM {source_object} AS {alias}\n ON CONFLICT ({match_expression}) DO UPDATE SET\n {excluded}\n RETURNING {primary_key}\n '
alias = 's'
sql = SQL(upsert_sql_template).format(primary_key=Identifier(primary_key), match_expression=match_expression, alias=Identifier(alias), destination_object_representation=destination.object_representation, insert_columns=primatives.make_column_list(insertable_columns), select_columns=primatives.make_column_list(insertable_columns, alias, destination.insert_overrides), source_object=source.object_representation_custom_predicate(predicate), excluded=excluded)
return sql_helpers.execute_dml_sql(sql) |
class TestZenodoAPISupport():
article_id = 123456
doi = f'10.0001/zenodo.{article_id}'
doi_url = f'
file_name = 'my-file.zip'
file_url = '
file_checksum = '2942bfabb3d05332b66eb128e0842cff'
legacy_api_response = dict(created='2021-20-19T08:00:00.000000+00:00', modified='2021-20-19T08:00:00.000000+00:00', id=article_id, doi=doi, doi_url=doi_url, files=[{'id': '513d7033-93a2-4eeb-821c-2fb0bbab0012', 'key': file_name, 'checksum': f'md5:{file_checksum}', 'links': {'self': file_url}}])
new_api_response = dict(created='2021-20-19T08:00:00.000000+00:00', modified='2021-20-19T08:00:00.000000+00:00', id=article_id, doi=doi, doi_url=doi_url, files=[{'id': '513d7033-93a2-4eeb-821c-2fb0bbab0012', 'filename': file_name, 'checksum': file_checksum, 'links': {'self': file_url}}])
invalid_api_response = dict(created='2021-20-19T08:00:00.000000+00:00', modified='2021-20-19T08:00:00.000000+00:00', id=article_id, doi=doi, doi_url=doi_url, files=[{'id': '513d7033-93a2-4eeb-821c-2fb0bbab0012', 'filename': file_name, 'checksum': file_checksum, 'links': {'self': file_url}}, {'id': '513d7033-93a2-4eeb-821c-2fb0bbab0012', 'key': file_name, 'checksum': f'md5:{file_checksum}', 'links': {'self': file_url}}])
.parametrize('api_version, api_response', [('legacy', legacy_api_response), ('new', new_api_response), ('invalid', invalid_api_response)])
def test_api_version(self, api_version, api_response):
downloader = ZenodoRepository(doi=self.doi, archive_url=self.doi_url)
downloader.base_api_url =
if (api_version != 'invalid'):
assert (downloader.api_version == api_version)
else:
msg = "Couldn't determine the version of the Zenodo API"
with pytest.raises(ValueError, match=msg):
api_version = downloader.api_version
.parametrize('api_version, api_response', [('legacy', legacy_api_response), ('new', new_api_response)])
def test_download_url(self, api_version, api_response):
downloader = ZenodoRepository(doi=self.doi, archive_url=self.doi_url)
downloader.base_api_url =
download_url = downloader.download_url(file_name=self.file_name)
if (api_version == 'legacy'):
assert (download_url == self.file_url)
else:
expected_url = f'
assert (download_url == expected_url)
.parametrize('api_response', [legacy_api_response, new_api_response])
def test_populate_registry(self, tmp_path, api_response):
puppy = Pooch(base_url='', path=tmp_path)
downloader = ZenodoRepository(doi=self.doi, archive_url=self.doi_url)
downloader.base_api_url =
downloader.populate_registry(puppy)
assert (puppy.registry == {self.file_name: f'md5:{self.file_checksum}'}) |
class MetaToProto():
def namespace_meta_to_proto(namespace: NamespaceMeta) -> NamespaceProto:
if (namespace is None):
return None
else:
return NamespaceProto(name=namespace.name, properties=namespace.get_properties())
def namespace_meta_list_to_proto(namespaces: List[NamespaceMeta]) -> List[NamespaceProto]:
namespace_proto_list = []
for namespace in namespaces:
namespace_proto_list.append(MetaToProto.namespace_meta_to_proto(namespace))
return namespace_proto_list
def workflow_meta_to_proto(workflow: WorkflowMeta) -> WorkflowProto:
if (workflow is None):
return None
else:
return WorkflowProto(uuid=workflow.id, name=workflow.name, namespace=workflow.namespace, content=workflow.content, pickled_workflow=workflow.workflow_object, create_time=int64_value(datetime_to_timestamp(workflow.create_time)), update_time=int64_value(datetime_to_timestamp(workflow.update_time)), is_enabled=workflow.is_enabled, event_offset=int64_value(workflow.event_offset))
def workflow_meta_list_to_proto(workflows: List[WorkflowMeta]) -> List[WorkflowProto]:
workflow_proto_list = []
for workflow in workflows:
workflow_proto_list.append(MetaToProto.workflow_meta_to_proto(workflow))
return workflow_proto_list
def workflow_snapshot_meta_to_proto(workflow_snapshot: WorkflowSnapshotMeta) -> WorkflowSnapshotProto:
if (workflow_snapshot is None):
return None
else:
return WorkflowSnapshotProto(uuid=workflow_snapshot.id, workflow_id=workflow_snapshot.workflow_id, create_time=int64_value(datetime_to_timestamp(workflow_snapshot.create_time)), workflow_object=workflow_snapshot.workflow_object, uri=string_value(workflow_snapshot.uri), signature=string_value(workflow_snapshot.signature))
def workflow_snapshot_meta_list_to_proto(workflow_snapshots: List[WorkflowSnapshotMeta]) -> List[WorkflowSnapshotProto]:
workflow_snapshot_proto_list = []
for snapshot in workflow_snapshots:
workflow_snapshot_proto_list.append(MetaToProto.workflow_snapshot_meta_to_proto(snapshot))
return workflow_snapshot_proto_list
def workflow_execution_meta_to_proto(workflow_execution: WorkflowExecutionMeta) -> WorkflowExecutionProto:
if (workflow_execution is None):
return None
else:
return WorkflowExecutionProto(uuid=workflow_execution.id, workflow_id=workflow_execution.workflow_id, begin_date=int64_value(datetime_to_timestamp(workflow_execution.begin_date)), end_date=int64_value(datetime_to_timestamp(workflow_execution.end_date)), status=string_value(workflow_execution.status), run_type=string_value(workflow_execution.run_type), snapshot_id=workflow_execution.snapshot_id, event_offset=int64_value(workflow_execution.event_offset))
def workflow_execution_meta_list_to_proto(executions: List[WorkflowExecutionMeta]) -> List[WorkflowExecutionProto]:
workflow_execution_proto_list = []
for we in executions:
workflow_execution_proto_list.append(MetaToProto.workflow_execution_meta_to_proto(we))
return workflow_execution_proto_list
def task_execution_meta_to_proto(task_execution: TaskExecutionMeta) -> TaskExecutionProto:
if (task_execution is None):
return None
else:
return TaskExecutionProto(uuid=task_execution.id, workflow_execution_id=task_execution.workflow_execution_id, task_name=task_execution.task_name, sequence_number=task_execution.sequence_number, try_number=task_execution.try_number, begin_date=int64_value(datetime_to_timestamp(task_execution.begin_date)), end_date=int64_value(datetime_to_timestamp(task_execution.end_date)), status=string_value(task_execution.status))
def task_execution_meta_list_to_proto(executions: List[TaskExecutionMeta]) -> List[TaskExecutionProto]:
task_execution_proto_list = []
for te in executions:
task_execution_proto_list.append(MetaToProto.task_execution_meta_to_proto(te))
return task_execution_proto_list
def workflow_schedule_meta_to_proto(workflow_schedule: WorkflowScheduleMeta) -> WorkflowScheduleProto:
if (workflow_schedule is None):
return None
else:
return WorkflowScheduleProto(uuid=workflow_schedule.id, workflow_id=workflow_schedule.workflow_id, expression=string_value(workflow_schedule.expression), is_paused=workflow_schedule.is_paused, create_time=int64_value(datetime_to_timestamp(workflow_schedule.create_time)))
def workflow_schedule_meta_list_to_proto(workflow_schedules: List[WorkflowScheduleMeta]) -> List[WorkflowScheduleProto]:
workflow_schedule_proto_list = []
for ws in workflow_schedules:
workflow_schedule_proto_list.append(MetaToProto.workflow_schedule_meta_to_proto(ws))
return workflow_schedule_proto_list
def workflow_trigger_meta_to_proto(workflow_trigger: WorkflowEventTriggerMeta) -> WorkflowTriggerProto:
if (workflow_trigger is None):
return None
else:
return WorkflowTriggerProto(uuid=workflow_trigger.id, workflow_id=workflow_trigger.workflow_id, rule=workflow_trigger.rule, is_paused=workflow_trigger.is_paused, create_time=int64_value(datetime_to_timestamp(workflow_trigger.create_time)))
def workflow_trigger_meta_list_to_proto(workflow_triggers: List[WorkflowEventTriggerMeta]) -> List[WorkflowTriggerProto]:
workflow_trigger_proto_list = []
for wt in workflow_triggers:
workflow_trigger_proto_list.append(MetaToProto.workflow_trigger_meta_to_proto(wt))
return workflow_trigger_proto_list |
class BaseManager():
DECORATED_METHODS = ('get', 'save', 'filter', 'all', 'put', 'delete', 'get_history', 'put_history', 'get_attachments', 'get_attachment_data', 'put_attachment_data')
OBJECT_DECORATED_METHODS = {'Invoices': ['email', 'online_invoice'], 'Organisations': ['actions']}
OBJECT_FILTER_FIELDS = {'Invoices': {'createdByMyApp': bool, 'summaryOnly': bool, 'IDs': list, 'InvoiceNumbers': list, 'ContactIDs': list, 'Statuses': list}, 'PurchaseOrders': {'DateFrom': date, 'DateTo': date, 'Status': str}, 'Quotes': {'ContactID': UUID, 'ExpiryDateFrom': date, 'ExpiryDateTo': date, 'DateFrom': date, 'DateTo': date, 'Status': str, 'QuoteNumber': str}, 'Journals': {'paymentsOnly': bool}, 'Budgets': {'DateFrom': date, 'DateTo': date}, 'Contacts': {'IDs': list, 'includeArchived': bool, 'summaryOnly': bool, 'searchTerm': str}, 'TrackingCategories': {'includeArchived': bool}}
DATETIME_FIELDS = ('UpdatedDateUTC', 'Updated', 'FullyPaidOnDate', 'DateTimeUTC', 'CreatedDateUTC', 'JournalDate')
DATE_FIELDS = ('DueDate', 'Date', 'PaymentDate', 'StartDate', 'EndDate', 'PeriodLockDate', 'DateOfBirth', 'OpeningBalanceDate', 'PaymentDueDate', 'ReportingDate', 'DeliveryDate', 'ExpectedArrivalDate')
BOOLEAN_FIELDS = ('IsSupplier', 'IsCustomer', 'IsDemoCompany', 'PaysTax', 'IsAuthorisedToApproveTimesheets', 'IsAuthorisedToApproveLeave', 'HasHELPDebt', 'AustralianResidentForTaxPurposes', 'TaxFreeThresholdClaimed', 'HasSFSSDebt', 'EligibleToReceiveLeaveLoading', 'IsExemptFromTax', 'IsExemptFromSuper', 'SentToContact', 'IsSubscriber', 'HasAttachments', 'ShowOnCashBasisReports', 'IncludeInEmails', 'SentToContact', 'CanApplyToRevenue', 'CanApplyToLiabilities', 'CanApplyToExpenses', 'CanApplyToEquity', 'CanApplyToAssets', 'IsReconciled', 'EnablePaymentsToAccount', 'ShowInExpenseClaims', 'DiscountEnteredAsPercent', 'IsPurchased', 'IsSold', 'IsTrackedAsInventory')
DECIMAL_FIELDS = ('Hours', 'NumberOfUnit')
INTEGER_FIELDS = ('FinancialYearEndDay', 'FinancialYearEndMonth')
NO_SEND_FIELDS = ('UpdatedDateUTC', 'HasValidationErrors', 'IsDiscounted', 'DateString', 'HasErrors', 'DueDateString', 'HasAccount')
OPERATOR_MAPPINGS = {'gt': '>', 'lt': '<', 'lte': '<=', 'gte': '>=', 'ne': '!='}
def __init__(self):
pass
def dict_to_xml(self, root_elm, data):
for key in data.keys():
if (key in self.NO_SEND_FIELDS):
continue
sub_data = data[key]
elm = SubElement(root_elm, key)
if isinstance(sub_data, dict):
self.dict_to_xml(elm, sub_data)
elif (isinstance(sub_data, list) or isinstance(sub_data, tuple)):
if isplural(key):
for d in sub_data:
self.dict_to_xml(SubElement(elm, singular(key)), d)
else:
for d in sub_data:
self.dict_to_xml(elm, d)
else:
if (key in self.BOOLEAN_FIELDS):
val = ('true' if sub_data else 'false')
elif (key in self.DATE_FIELDS):
val = sub_data.strftime('%Y-%m-%dT%H:%M:%S')
else:
val = str(sub_data)
elm.text = val
return root_elm
def _prepare_data_for_save(self, data):
if (isinstance(data, list) or isinstance(data, tuple)):
root_elm = Element(self.name)
for d in data:
sub_elm = SubElement(root_elm, self.singular)
self.dict_to_xml(sub_elm, d)
else:
root_elm = self.dict_to_xml(Element(self.singular), data)
return tostring(root_elm)
def _parse_api_response(self, response, resource_name):
data = json.loads(response.text, object_hook=json_load_object_hook)
assert (data['Status'] == 'OK'), ('Expected the API to say OK but received %s' % data['Status'])
try:
return data[resource_name]
except KeyError:
return data
def _get_data(self, func):
def wrapper(*args, **kwargs):
timeout = kwargs.pop('timeout', None)
(uri, params, method, body, headers, singleobject) = func(*args, **kwargs)
if (headers is None):
headers = {}
if ('Content-Type' not in headers):
headers['Content-Type'] = 'application/xml'
if isinstance(self.credentials, OAuth2Credentials):
if self.credentials.tenant_id:
headers['Xero-tenant-id'] = self.credentials.tenant_id
else:
raise XeroTenantIdNotSet
if ('Accept' not in headers):
headers['Accept'] = 'application/json'
headers['User-Agent'] = self.user_agent
response = getattr(requests, method)(uri, data=body, headers=headers, auth=self.credentials.oauth, params=params, timeout=timeout)
if (response.status_code == 200):
if (not response.headers['content-type'].startswith('application/json')):
return response.content
return self._parse_api_response(response, self.name)
elif (response.status_code == 204):
return response.content
elif (response.status_code == 400):
try:
raise XeroBadRequest(response)
except (ValueError, ExpatError):
raise XeroExceptionUnknown(response, msg='Unable to parse Xero API response')
elif (response.status_code == 401):
raise XeroUnauthorized(response)
elif (response.status_code == 403):
raise XeroForbidden(response)
elif (response.status_code == 404):
raise XeroNotFound(response)
elif (response.status_code == 429):
limit_reason = (response.headers.get('X-Rate-Limit-Problem') or 'unknown')
payload = {'oauth_problem': [('rate limit exceeded: ' + limit_reason)], 'oauth_problem_advice': [('please wait before retrying the xero api, the limit exceeded is: ' + limit_reason)]}
raise XeroRateLimitExceeded(response, payload)
elif (response.status_code == 500):
raise XeroInternalError(response)
elif (response.status_code == 501):
raise XeroNotImplemented(response)
elif (response.status_code == 503):
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None, params=None):
uri = '/'.join([self.base_url, self.name, id])
uri_params = self.extra_params.copy()
uri_params.update((params if params else {}))
return (uri, uri_params, 'get', None, headers, True)
def _get_history(self, id):
uri = ('/'.join([self.base_url, self.name, id, 'history']) + '/')
return (uri, {}, 'get', None, None, False)
def _get_attachments(self, id):
uri = ('/'.join([self.base_url, self.name, id, 'Attachments']) + '/')
return (uri, {}, 'get', None, None, False)
def _get_attachment_data(self, id, filename):
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
return (uri, {}, 'get', None, None, False)
def get_attachment(self, id, filename, file):
data = self.get_attachment_data(id, filename)
file.write(data)
return len(data)
def _email(self, id):
uri = '/'.join([self.base_url, self.name, id, 'Email'])
return (uri, {}, 'post', None, None, True)
def _online_invoice(self, id):
uri = '/'.join([self.base_url, self.name, id, 'OnlineInvoice'])
return (uri, {}, 'get', None, None, True)
def _actions(self):
uri = '/'.join([self.base_url, self.name, 'Actions'])
return (uri, {}, 'get', None, None, False)
def save_or_put(self, data, method='post', headers=None, summarize_errors=True):
uri = '/'.join([self.base_url, self.name])
body = self._prepare_data_for_save(data)
params = self.extra_params.copy()
if (not summarize_errors):
params['summarizeErrors'] = 'false'
return (uri, params, method, body, headers, False)
def _save(self, data):
return self.save_or_put(data, method='post')
def _put(self, data, summarize_errors=True):
return self.save_or_put(data, method='put', summarize_errors=summarize_errors)
def _delete(self, id):
uri = '/'.join([self.base_url, self.name, id])
return (uri, {}, 'delete', None, None, False)
def _put_history_data(self, id, details):
uri = '/'.join([self.base_url, self.name, id, 'history'])
details_data = {'Details': details}
root_elm = Element('HistoryRecord')
self.dict_to_xml(root_elm, details_data)
data = tostring(root_elm)
return (uri, {}, 'put', data, None, False)
def _put_history(self, id, details):
return self._put_history_data(id, details)
def _put_attachment_data(self, id, filename, data, content_type, include_online=False):
uri = '/'.join([self.base_url, self.name, id, 'Attachments', filename])
params = ({'IncludeOnline': 'true'} if include_online else {})
headers = {'Content-Type': content_type, 'Content-Length': str(len(data))}
return (uri, params, 'put', io.BytesIO(data), headers, False)
def put_attachment(self, id, filename, file, content_type, include_online=False):
return self.put_attachment_data(id, filename, file.read(), content_type, include_online=include_online)
def prepare_filtering_date(self, val):
if isinstance(val, datetime):
val = val.strftime('%a, %d %b %Y %H:%M:%S GMT')
else:
val = ('"%s"' % val)
return {'If-Modified-Since': val}
def _filter(self, **kwargs):
params = self.extra_params.copy()
headers = None
uri = '/'.join([self.base_url, self.name])
if kwargs:
if ('since' in kwargs):
val = kwargs['since']
headers = self.prepare_filtering_date(val)
del kwargs['since']
def get_filter_value(key, value, value_type=None):
if ((key in self.BOOLEAN_FIELDS) or (value_type == bool)):
return ('true' if value else 'false')
elif ((key in self.DATE_FIELDS) or (value_type == date)):
return f'{value.year}-{value.month}-{value.day}'
elif ((key in self.DATETIME_FIELDS) or (value_type == datetime)):
return value.isoformat()
elif (key.endswith('ID') or (value_type == UUID)):
return ('%s' % (value.hex if (type(value) == UUID) else UUID(value).hex))
else:
return value
def get_filter_params(key, value):
last_key = key.split('_')[(- 1)]
if last_key.endswith('ID'):
return ('Guid("%s")' % str(value))
if (key in self.BOOLEAN_FIELDS):
return ('true' if value else 'false')
elif (key in self.DATE_FIELDS):
return 'DateTime({},{},{})'.format(value.year, value.month, value.day)
elif (key in self.DATETIME_FIELDS):
return value.isoformat()
else:
return ('"%s"' % str(value))
def generate_param(key, value):
parts = key.split('__')
field = key.replace('_', '.')
fmt = '%s==%s'
if (len(parts) == 2):
if (parts[1] in ['contains', 'startswith', 'endswith']):
field = parts[0]
fmt = ''.join(['%s.', parts[1], '(%s)'])
elif (parts[1] in ['tolower', 'toupper']):
field = parts[0]
fmt = ''.join(['%s.', parts[1], '()==%s'])
elif (parts[1] in self.OPERATOR_MAPPINGS):
field = parts[0]
key = field
fmt = (('%s' + self.OPERATOR_MAPPINGS[parts[1]]) + '%s')
elif (parts[1] in ['isnull']):
sign = ('=' if value else '!')
return f'{parts[0]}{sign}=null'
field = field.replace('_', '.')
return (fmt % (field, get_filter_params(key, value)))
KNOWN_PARAMETERS = ['order', 'offset', 'page']
object_params = self.OBJECT_FILTER_FIELDS.get(self.name, {})
LIST_PARAMETERS = list(filter((lambda x: (object_params[x] == list)), object_params))
EXTRA_PARAMETERS = list(filter((lambda x: (object_params[x] != list)), object_params))
for param in (KNOWN_PARAMETERS + EXTRA_PARAMETERS):
if (param in kwargs):
params[param] = get_filter_value(param, kwargs.pop(param), object_params.get(param, None))
for param in LIST_PARAMETERS:
if (param in kwargs):
if param.endswith('IDs'):
params[param] = ','.join(map((lambda x: UUID(x).hex), kwargs.pop(param)))
else:
params[param] = ','.join(kwargs.pop(param))
filter_params = []
if ('raw' in kwargs):
raw = kwargs.pop('raw')
filter_params.append(raw)
sortedkwargs = sorted(kwargs.items(), key=(lambda item: ((- 1) if ('isnull' in item[0]) else 0)))
for (key, value) in sortedkwargs:
filter_params.append(generate_param(key, value))
if filter_params:
params['where'] = '&&'.join(filter_params)
return (uri, params, 'get', None, headers, False)
def _all(self):
uri = '/'.join([self.base_url, self.name])
return (uri, {}, 'get', None, None, False) |
.django_db
def test_invalid_award_type_codes(client, monkeypatch, helpers, elasticsearch_award_index, cfda_awards_and_transactions):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
resp = helpers.post_for_spending_endpoint(client, url, award_type_codes=['ZZ', '08'], def_codes=['L', 'M'])
assert (resp.status_code == status.HTTP_400_BAD_REQUEST)
assert (resp.data['detail'] == "Field 'filter|award_type_codes' is outside valid values ['02', '03', '04', '05', '06', '07', '08', '09', '10', '11']") |
def attrgetter_atom_handle(loc, tokens):
(name, args) = attrgetter_atom_split(tokens)
if (args is None):
return (('_coconut.operator.attrgetter("' + name) + '")')
elif ('.' in name):
(attr, method) = name.rsplit('.', 1)
return '_coconut_forward_compose(_coconut.operator.attrgetter("{attr}"), {methodcaller})'.format(attr=attr, methodcaller=attrgetter_atom_handle(loc, [method, '(', args]))
elif (args == ''):
return (('_coconut.operator.methodcaller("' + name) + '")')
else:
return (((('_coconut.operator.methodcaller("' + name) + '", ') + args) + ')') |
class Post(Document):
author = Object(User, required=True)
created = Date(required=True)
body = Text(required=True)
comments = Nested(Comment)
question_answer = Join(relations={'question': 'answer'})
def _matches(cls, hit):
return False
class Index():
name = 'test-qa-site'
settings = {'number_of_shards': 1, 'number_of_replicas': 0}
def add_comment(self, user, content, created=None, commit=True):
c = Comment(author=user, content=content, created=(created or datetime.now()))
self.comments.append(c)
if commit:
self.save()
return c
def save(self, **kwargs):
if (self.created is None):
self.created = datetime.now()
return super().save(**kwargs) |
class Bicing(BikeShareSystem):
meta = {'ebikes': True}
def __init__(self, tag, meta, endpoint, bbox=None):
super(Bicing, self).__init__(tag, meta)
self.endpoint = endpoint
self.bbox = bbox
def stations_url(self):
return STATIONS_URL.format(endpoint=self.endpoint)
def update(self, scraper=None):
scraper = (scraper or PyBikesScraper())
scraper.requests_timeout = max(scraper.requests_timeout, 600)
data = json.loads(scraper.request(self.stations_url))
stations = []
for station_data in data['stations']:
try:
station = BicingStation(station_data)
except InvalidStation:
continue
stations.append(station)
if self.bbox:
stations = list(filter_bounds(stations, None, self.bbox))
self.stations = stations |
def load(filename, path=None):
from . import season, masks, filters, scores
from geetools import collection
import os
if (path is None):
path = os.getcwd()
split = filename.split('.')
if (split[(- 1)] != 'json'):
filename = '{}.json'.format(filename)
obj = json.load(open(os.path.join(path, filename)))
def get_number(param_dict, name):
if (name != ''):
params = ['{} (int)', '{} (float)']
else:
params = ['{}(int)', '{}(float)']
for param in params:
result = param_dict.get(param.format(name))
if (result is not None):
return result
return None
seas = obj['season (Season)']
start = seas['_start (SeasonDate)']['date (str)']
end = seas['_end (SeasonDate)']['date (str)']
season_param = season.Season(start, end)
ran = (obj.get('range (tuple)') or obj.get('range (list)'))
range_param = (ran[0]['(int)'], ran[1]['(int)'])
mask_list = []
for mask in (obj.get('masks (tuple)') or obj.get('masks (list)') or []):
mask_class = list(mask.keys())[0]
params = mask[mask_class]
if (mask_class == '(Mask)'):
options = [opt['(str)'] for opt in (params.get('options (list)') or params.get('options (tuple)'))]
mask_param = masks.Mask(options)
elif (mask_class == '(Hollstein)'):
options = [opt['(str)'] for opt in (params.get('options (list)') or params.get('options (tuple)'))]
mask_param = masks.Hollstein(options)
else:
continue
mask_list.append(mask_param)
filter_list = []
for filter in (obj.get('filters (tuple)') or obj.get('filters (list)') or []):
filter_class = list(filter.keys())[0]
params = filter[filter_class]
if (filter_class == '(CloudCover)'):
percent = get_number(params, 'percent')
filter_param = filters.CloudCover(percent)
elif (filter_class == 'MaskCover'):
percent = get_number(params, 'percent')
filter_param = filters.MaskCover(percent)
else:
continue
filter_list.append(filter_param)
colgroup = (obj.get('colgroup (CollectionGroup') or obj.get('colgroup (NoneType)'))
if colgroup:
collections = []
collections_param = (colgroup.get('collections (tuple)') or colgroup.get('collections (list)'))
for col in collections_param:
sat = list(col.keys())[0]
params = col[sat]
satid = params['_id (str)']
instance = collection.fromId(satid)
collections.append(instance)
colgroup_param = collection.CollectionGroup(collections)
else:
colgroup_param = None
target_collection = (obj.get('target_collection (Landsat)') or obj.get('target_collection (Sentinel)'))
target_id = target_collection.get('id (str)')
target_param = collection.fromId(target_id)
score_list = []
for score in (obj.get('scores (list)') or obj.get('scores (tuple)') or []):
score_class = list(score.keys())[0]
params = score[score_class]
range_out_param = (params.get('range_out (tuple)') or params.get('range_out (list)'))
if range_out_param:
range_out_0 = range_out_param[0]
range_out_1 = range_out_param[1]
range_out = (get_number(range_out_0, ''), get_number(range_out_1, ''))
else:
range_out = None
range_in_param = (params.get('range_in (tuple)') or params.get('range_in (list)'))
if range_in_param:
range_in_0 = range_in_param[0]
range_in_1 = range_in_param[1]
range_in = (get_number(range_in_0, ''), get_number(range_in_1, ''))
else:
range_in = None
name = params.get('name (str)')
sleep = get_number(params, 'sleep')
if (score_class == '(CloudScene)'):
continue
if (score_class == '(CloudDist)'):
dmax = get_number(params, 'dmax')
dmin = get_number(params, 'dmin')
kernel = params.get('kernel (str)')
units = params.get('units (str)')
score_param = scores.CloudDist(dmin, dmax, name, kernel=kernel, units=units)
elif (score_class == '(Doy)'):
best_doy = params.get('best_doy (str)')
doy_season_param = params.get('season (Season)')
start = doy_season_param['_start (SeasonDate)']['date (str)']
end = doy_season_param['_end (SeasonDate)']['date (str)']
Season = season.Season(start, end)
function = params.get('function (str)')
stretch = get_number(params, 'stretch')
score_param = scores.Doy(best_doy, Season, name, function, stretch)
elif (score_class == '(AtmosOpacity)'):
continue
elif (score_class == '(MaskPercent)'):
band = params.get('band (str)')
maxPixels = params.get('maxPixels (int)')
count_zeros = params.get('count_zeros (bool)')
score_param = scores.MaskPercent(band, name, maxPixels, count_zeros)
elif (score_class == '(MaskPercentKernel)'):
kernel = params.get('kernel (str)')
distance = get_number(params, 'distance')
units = params.get('units (str)')
score_param = scores.MaskPercentKernel(kernel, distance, units, name)
elif (score_class == '(Satellite)'):
ratio = get_number(params, 'ratio')
score_param = scores.Satellite(ratio, name)
elif (score_class == '(Outliers)'):
bands = (params.get('bands (tuple)') or params.get('bands (list)'))
bandlist = [band['(str)'] for band in bands]
process = params.get('process (str)')
dist = get_number(params, 'dist')
score_param = scores.Outliers(bandlist, process, dist, name)
elif (score_class == '(Index)'):
index = params.get('index (str)')
target = get_number(params, 'target')
function = params.get('function (str)')
stretch = get_number(params, 'stretch')
score_param = scores.Index(index, target, name, function, stretch)
elif (score_class == '(MultiYear)'):
main_year = params.get('main_year (int)')
my_season_param = params.get('season (Season)')
start = my_season_param['_start (SeasonDate)']['date (str)']
end = my_season_param['_end (SeasonDate)']['date (str)']
Season = season.Season(start, end)
ratio = get_number(params, 'ratio')
function = params.get('function (str)')
stretch = get_number(params, 'stretch')
score_param = scores.MultiYear(main_year, Season, ratio, function, stretch, name)
elif (score_class == '(Threshold)'):
continue
elif (score_class == '(Medoid)'):
bands = (params.get('bands (list)') or params.get('bands (tuple)'))
discard_zeros = params.get('discard_zeros (bool)')
score_param = scores.Medoid(bands, discard_zeros, name)
elif (score_class == '(Brightness)'):
target = get_number(params, 'target')
bands = (params.get('bands (list)') or params.get('bands (tuple)'))
function = params.get('function (str)')
score_param = scores.Brightness(target, bands, name, function)
else:
continue
score_param.sleep = sleep
score_param.range_out = range_out
score_param.range_in = range_in
score_list.append(score_param)
score_name_param = obj.get('score_name (str)')
bandname_date_param = obj.get('bandname_date (str)')
brdf_param = obj.get('brdf (bool)')
harmonize_param = obj.get('harmonize (bool)')
bandname_col_id_param = obj.get('bandname_col_id (str)')
return Bap(season_param, range_param, colgroup_param, score_list, mask_list, filter_list, target_param, brdf_param, harmonize_param, score_name=score_name_param, bandname_date=bandname_date_param, bandname_col_id=bandname_col_id_param) |
.parametrize('subdomain', (1, 2, 3, 4))
def test_2d_facet_subdomains(mesh_2d, subdomain):
DG = VectorFunctionSpace(mesh_2d, 'DG', 1)
n = FacetNormal(mesh_2d)
u = TestFunction(DG)
(x, y) = SpatialCoordinate(mesh_2d)
f = project(as_vector([x, y]), DG)
form = (inner(n, ((f[0] * f[1]) * u)) * ds(subdomain))
A = assemble(Tensor(form)).dat.data
ref = assemble(form).dat.data
assert np.allclose(A, ref, rtol=1e-14) |
class Parser():
def __init__(self):
self.default_fields = ['IPv4', 'IPv4 rDNS', 'Role', 'IPv4 OS', 'Ethernet Vendor', 'MAC Address', 'Pcap labels']
self.all_fields = ['ID', 'MAC Address', 'Switch', 'Port', 'VLAN', 'IPv4', 'IPv4 Subnet', 'IPv6', 'IPv6 Subnet', 'Ethernet Vendor', 'Ignored', 'State', 'Next State', 'First Seen', 'Last Seen', 'Previous States', 'IPv4 OS\n(p0f)', 'IPv6 OS\n(p0f)', 'Previous IPv4 OSes\n(p0f)', 'Previous IPv6 OSes\n(p0f)', 'Role\n(NetworkML)', 'Role Confidence\n(NetworkML)', 'Previous Roles\n(NetworkML)', 'Previous Role Confidences\n(NetworkML)', 'IPv4 rDNS', 'IPv6 rDNS', 'SDN Controller Type', 'SDN Controller URI', 'History', 'ACL History', 'Pcap labels']
def completion(text, line, completions):
(_, _, mline) = line.partition(' ')
offs = (len(mline) - len(text))
words = []
completes = [s[offs:] for s in completions if s.lower().startswith(mline.lower())]
for complete in completes:
words.append(complete.split(' ', 1)[0])
return words
def get_flags(text):
valid = True
flags = {}
not_flags = []
words = text.split()
other_words = []
for word in words:
if ((len(word) > 1) and (word[0] == '-') and (word[1] != '-')):
flags[word[1:]] = True
else:
other_words.append(word)
other_words = ' '.join(other_words)
first = other_words.split('--')
not_flags += first[0].split()
first.pop(0)
for flag in first:
if ('=' in flag):
(command, value) = flag.split('=', 1)
if (('[' in value) and (']' in value)):
val = value.rsplit(']', 1)[0].split('[', 1)[1]
val = val.split(',')
store_vals = []
for v in val:
store_vals.append(v.strip())
flags[command] = store_vals
not_f = value.rsplit(']', 1)
else:
val = value.split(' ', 1)[0]
flags[command] = val
not_f = value.split(' ', 1)
not_f.pop(0)
if not_f:
not_flags += not_f[0].split()
else:
valid = False
return (valid, flags, ' '.join(not_flags))
def _check_flags(self, flags, fields, sort_by=0, max_width=0, unique=False, nonzero=False, output_format='table', ipv4_only=True, ipv6_only=False, ipv4_and_ipv6=False):
valid = True
for flag in flags:
if (flag == 'fields'):
if ('all' in flags[flag]):
fields = self.all_fields
else:
fields = flags[flag]
elif (flag == 'sort_by'):
sort_by = int(flags[flag])
elif (flag == 'max_width'):
max_width = int(flags[flag])
elif ((flag == 'unique') and (flags[flag] is True)):
unique = True
elif ((flag == 'nonzero') and (flags[flag] is True)):
nonzero = True
elif (flag == 'output_format'):
output_format = flags[flag]
elif ((flag == '4') and (flags[flag] is True)):
ipv4_only = True
ipv6_only = False
ipv4_and_ipv6 = False
elif ((flag == '6') and (flags[flag] is True)):
ipv6_only = True
ipv4_only = False
ipv4_and_ipv6 = False
elif ((flag == '4and6') and (flags[flag] is True)):
ipv4_only = False
ipv6_only = False
ipv4_and_ipv6 = True
else:
valid = False
if (('fields' in flags) and ('4' not in flags) and ('6' not in flags) and ('4and6' not in flags)):
ipv4_only = False
ipv6_only = False
ipv4_and_ipv6 = False
return (valid, fields, sort_by, max_width, unique, nonzero, output_format, ipv4_only, ipv6_only, ipv4_and_ipv6)
def display_ip_filter(fields, ipv4_only, ipv6_only, ipv4_and_ipv6):
if ((not ipv4_only) and (not ipv6_only) and (not ipv4_and_ipv6)):
return fields
IPV4_FIELD = 'ipv4'
IPV6_FIELD = 'ipv6'
IP_FIELDS = {IPV4_FIELD, IPV6_FIELD}
filtered_fields = []
ip_fields_filter = set()
if (ipv4_only or ipv4_and_ipv6):
ip_fields_filter.add(IPV4_FIELD)
if (ipv6_only or ipv4_and_ipv6):
ip_fields_filter.add(IPV6_FIELD)
for field in fields:
ip_fields = {ip_field for ip_field in IP_FIELDS if (ip_field in field.lower())}
if (ip_fields and (not ip_fields.issubset(ip_fields_filter))):
continue
filtered_fields.append(field)
return filtered_fields
def display_results(self, endpoints, fields, sort_by=0, max_width=0, unique=False, nonzero=False, output_format='table', ipv4_only=True, ipv6_only=False, ipv4_and_ipv6=False):
matrix = []
fields = self.display_ip_filter(fields, ipv4_only, ipv6_only, ipv4_and_ipv6)
fields_lookup = {'id': (GetData._get_name, 0), 'mac': (GetData._get_mac, 1), 'mac address': (GetData._get_mac, 1), 'switch': (GetData._get_switch, 2), 'port': (GetData._get_port, 3), 'vlan': (GetData._get_vlan, 4), 'ipv4': (GetData._get_ipv4, 5), 'ipv4 subnet': (GetData._get_ipv4_subnet, 6), 'ipv6': (GetData._get_ipv6, 7), 'ipv6 subnet': (GetData._get_ipv6_subnet, 8), 'ethernet vendor': (GetData._get_ether_vendor, 9), 'ignored': (GetData._get_ignored, 10), 'state': (GetData._get_state, 11), 'next state': (GetData._get_next_state, 12), 'first seen': (GetData._get_first_seen, 13), 'last seen': (GetData._get_last_seen, 14), 'previous state': (GetData._get_prev_state, 15), 'ipv4 os': (GetData._get_ipv4_os, 16), 'ipv4 os\n(p0f)': (GetData._get_ipv4_os, 16), 'ipv6 os': (GetData._get_ipv6_os, 17), 'ipv6 os\n(p0f)': (GetData._get_ipv6_os, 17), 'previous ipv4 oses': (GetData._get_prev_ipv4_oses, 18), 'previous ipv4 oses\n(p0f)': (GetData._get_prev_ipv4_oses, 18), 'previous ipv6 oses': (GetData._get_prev_ipv6_oses, 19), 'previous ipv6 oses\n(p0f)': (GetData._get_prev_ipv6_oses, 19), 'role': (GetData._get_role, 20), 'role\n(networkml)': (GetData._get_role, 20), 'role confidence': (GetData._get_role_confidence, 21), 'role confidence\n(networkml)': (GetData._get_role_confidence, 21), 'previous roles': (GetData._get_prev_roles, 22), 'previous roles\n(networkml)': (GetData._get_prev_roles, 22), 'previous role confidences': (GetData._get_prev_role_confidences, 23), 'previous role confidences\n(networkml)': (GetData._get_prev_role_confidences, 23), 'ipv4 rdns': (GetData._get_ipv4_rdns, 24), 'ipv6 rdns': (GetData._get_ipv6_rdns, 25), 'sdn controller type': (GetData._get_controller_type, 26), 'sdn controller uri': (GetData._get_controller, 27), 'history': (GetData._get_history, 28), 'acl history': (GetData._get_acls, 29), 'pcap labels': (GetData._get_pcap_labels, 30)}
records = []
if (nonzero or unique):
raw_records = []
all_fields_with_data = set()
for endpoint in endpoints:
raw_record = {field: fields_lookup[field.lower()][0](endpoint) for field in fields}
fields_with_data = {field for (field, value) in raw_record.items() if (value and (value != NO_DATA))}
all_fields_with_data.update(fields_with_data)
if fields_with_data:
raw_records.append(raw_record)
all_fields_with_no_data = (set(fields) - all_fields_with_data)
fields = [field for field in fields if (field in all_fields_with_data)]
for raw_record in raw_records:
for field in all_fields_with_no_data:
del raw_record[field]
records.append([raw_record[field] for field in fields])
if (len(fields) > 0):
if unique:
u_records = set(map(tuple, records))
records = list(u_records)
matrix = list(map(list, u_records))
else:
matrix = records
if ((not nonzero) and (not unique)):
for endpoint in endpoints:
record = []
for field in fields:
if (field.lower() in fields_lookup):
record.append(fields_lookup[field.lower()][0](endpoint))
records.append(record)
matrix.append(record)
results = ''
if (output_format == 'json'):
results = json.dumps(records, indent='\t')
elif (len(matrix) > 0):
matrix = sorted(matrix, key=(lambda endpoint: endpoint[sort_by]))
fields_header = []
for field in fields:
fields_header.append(self.all_fields[fields_lookup[field.lower()][1]])
matrix.insert(0, fields_header)
if (output_format == 'csv'):
results = self.display_csv(matrix)
else:
results = self.display_table(len(fields), max_width, matrix)
else:
results = 'No results found for that query.'
return results
def display_table(column_count, max_width, matrix):
table = Texttable(max_width=max_width)
table.set_cols_dtype((['t'] * column_count))
table.add_rows(matrix)
return table.draw()
def display_csv(matrix):
csv_str = io.StringIO()
csv_wr = csv.writer(csv_str)
for row in matrix:
csv_wr.writerow(row)
return csv_str.getvalue() |
class OptionSeriesDumbbellSonificationDefaultinstrumentoptionsMappingHighpass(Options):
def frequency(self) -> 'OptionSeriesDumbbellSonificationDefaultinstrumentoptionsMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionSeriesDumbbellSonificationDefaultinstrumentoptionsMappingHighpassFrequency)
def resonance(self) -> 'OptionSeriesDumbbellSonificationDefaultinstrumentoptionsMappingHighpassResonance':
return self._config_sub_data('resonance', OptionSeriesDumbbellSonificationDefaultinstrumentoptionsMappingHighpassResonance) |
def extractZathaelblogWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def change_data_capture(qbo_class_list, timestamp, qb=None):
if (qb is None):
qb = QuickBooks()
cdc_class_dict = dict(((cls.qbo_object_name, cls) for cls in qbo_class_list))
cdc_class_names = list(cdc_class_dict.keys())
entity_list_string = ','.join(cdc_class_names)
if isinstance(timestamp, datetime):
timestamp_string = qb_datetime_format(timestamp)
else:
timestamp_string = timestamp
resp = qb.change_data_capture(entity_list_string, timestamp_string)
cdc_response_dict = resp.pop('CDCResponse')
cdc_response = CDCResponse.from_json(resp)
query_response_list = cdc_response_dict[0]['QueryResponse']
for query_response_dict in query_response_list:
qb_object_names = [x for x in query_response_dict if (x in cdc_class_names)]
if (len(qb_object_names) == 1):
qb_object_name = qb_object_names[0]
qb_object_list = query_response_dict.pop(qb_object_name)
qb_object_cls = cdc_class_dict[qb_object_name]
query_response = QueryResponse.from_json(query_response_dict)
query_response._object_list = [qb_object_cls.from_json(obj) for obj in qb_object_list]
setattr(cdc_response, qb_object_name, query_response)
return cdc_response |
class OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMapping(Options):
def frequency(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingFrequency':
return self._config_sub_data('frequency', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingFrequency)
def gapBetweenNotes(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingGapbetweennotes':
return self._config_sub_data('gapBetweenNotes', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingGapbetweennotes)
def highpass(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingHighpass':
return self._config_sub_data('highpass', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingHighpass)
def lowpass(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingLowpass':
return self._config_sub_data('lowpass', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingLowpass)
def noteDuration(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingNoteduration':
return self._config_sub_data('noteDuration', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingNoteduration)
def pan(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPan':
return self._config_sub_data('pan', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPan)
def pitch(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPitch':
return self._config_sub_data('pitch', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPitch)
def playDelay(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPlaydelay':
return self._config_sub_data('playDelay', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingPlaydelay)
def time(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingTime':
return self._config_sub_data('time', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingTime)
def tremolo(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingTremolo':
return self._config_sub_data('tremolo', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingTremolo)
def volume(self) -> 'OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingVolume':
return self._config_sub_data('volume', OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingVolume) |
def fortios_antivirus(data, fos):
fos.do_member_operation('antivirus', 'settings')
if data['antivirus_settings']:
resp = antivirus_settings(data, fos)
else:
fos._module.fail_json(msg=('missing task body: %s' % 'antivirus_settings'))
return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {}) |
def regtest_topup_account(receive_address: P2PKH_Address, amount: int=25) -> Optional[str]:
matured_balance = regtest_get_mined_balance()
while (matured_balance < amount):
nblocks = 1
if (matured_balance == 0):
nblocks = 200
result = node_rpc_call('generatetoaddress', nblocks, Net.REGTEST_P2PKH_ADDRESS)
if (result.status_code == 200):
logger.debug(f"generated {nblocks}: {result.json()['result']}")
matured_balance = regtest_get_mined_balance()
payload = json.dumps({'jsonrpc': '2.0', 'method': 'sendtoaddress', 'params': [receive_address.to_string(), amount], 'id': 0})
result = requests.post(BITCOIN_NODE_URI, data=payload)
if (result.status_code != 200):
raise requests.exceptions.HTTPError(result.text)
txid = result.json()['result']
logger.info("topped up wallet with %s coins to receive address='%s'. txid=%s", amount, receive_address.to_string(), txid)
return txid |
def test_extract_frames() -> None:
target_paths = ['.assets/examples/target-240p-25fps.mp4', '.assets/examples/target-240p-30fps.mp4', '.assets/examples/target-240p-60fps.mp4']
for target_path in target_paths:
temp_directory_path = get_temp_directory_path(target_path)
create_temp(target_path)
assert (extract_frames(target_path, 30.0) is True)
assert (len(glob.glob1(temp_directory_path, '*.jpg')) == 324)
clear_temp(target_path) |
class EventLocationList(ResourceList):
def get(self, *args, **kwargs):
qs = QSManager(request.args, self.schema)
popular_locations = db.session.query(Event.searchable_location_name, func.count(Event.id).label('counts')).group_by(Event.searchable_location_name).order_by(desc('counts')).limit(6)
locations = []
for (location, _) in popular_locations:
if (location is not None):
new_location = EventLocation(name=location)
new_location.id = len(locations)
locations.append(new_location)
schema = EventLocationSchema()
result = schema.dump(locations, many=True).data
view_kwargs = (request.view_args if (getattr(self, 'view_kwargs', None) is True) else {})
add_pagination_links(result, len(locations), qs, url_for(self.view, **view_kwargs))
result.update({'meta': {'count': len(locations)}})
return result
decorators = (api.has_permission('is_admin', methods='POST'),)
schema = EventLocationSchema
data_layer = {'session': db.session, 'model': EventLocation} |
class TestPlayerBase(unittest.TestCase):
module = player
def assertPathEqual(self, a, b):
return self.assertEqual(os.path.normcase(a), os.path.normcase(b))
def assertPathListEqual(self, a, b):
return self.assertListEqual(list(map(os.path.normcase, a)), list(map(os.path.normcase, b)))
def setUp(self):
self.base = ('c:\\base' if (os.name == 'nt') else '/base')
self.app = flask.Flask(self.__class__.__name__)
self.app.config['directory_base'] = self.base
self.manager = ManagerMock() |
class PythranBackend(BackendAOT):
backend_name = 'pythran'
suffix_header = '.pythran'
def check_if_compiled(self, module):
return hasattr(module, '__pythran__')
def _append_line_header_variable(self, lines_header, name_variable):
lines_header.append(f'''export {name_variable}
''')
def _make_header_from_fdef_signatures(self, fdef, signatures_as_lists_strings, locals_types=None, returns=None):
nb_defaults = len(fdef.args.defaults)
if nb_defaults:
new_signatures = []
for signature_as_strings in signatures_as_lists_strings:
for nb_args_not_given in range(1, (nb_defaults + 1)):
new_signatures.append(signature_as_strings[:(- nb_args_not_given)])
signatures_as_lists_strings.extend(new_signatures)
signatures_func = set((f"export {fdef.name}({', '.join(signature_as_strings)})" for signature_as_strings in signatures_as_lists_strings))
if ((not fdef.args.args) and (not signatures_func)):
signatures_func.add(f'export {fdef.name}()')
signatures_func = sorted(signatures_func)
if signatures_func:
signatures_func[(- 1)] = (signatures_func[(- 1)] + '\n')
return signatures_func
def make_meson_code(self, file_names, subdir):
meson_parts = []
stems = [name[:(- 3)] for name in file_names]
for name in stems:
meson_parts.append(f'''
{name} = custom_target(
'{name}',
output: ['{name}.cpp'],
input: '{name}.py',
command: [pythran, '-E', '', '-o', '/{name}.cpp']
)
{name} = py.extension_module(
'{name}',
{name},
cpp_args: cpp_args_pythran,
dependencies: [pythran_dep, np_dep],
# link_args: version_link_args,
install: true,
subdir: '{subdir}'
)
''')
return ''.join(meson_parts) |
def draw7_(pen):
pen.moveTo((771, 367))
pen.qCurveTo((793, 386), (802, 394))
pen.qCurveTo((811, 402), (819, 412))
pen.qCurveTo((819, 406), (814, 383.5))
pen.qCurveTo((809, 361), (796, 330.5))
pen.qCurveTo((783, 300), (760.5, 266.5))
pen.qCurveTo((738, 233), (701, 205.5))
pen.qCurveTo((664, 178), (612, 160.5))
pen.qCurveTo((560, 143), (489, 145))
pen.qCurveTo((414, 145), (363, 164))
pen.qCurveTo((312, 183), (280, 211.5))
pen.qCurveTo((248, 240), (231.5, 274.5))
pen.qCurveTo((215, 309), (208, 339.5))
pen.qCurveTo((201, 370), (200.5, 392.5))
pen.qCurveTo((200, 415), (200, 421))
pen.qCurveTo((207, 412), (217.5, 399))
pen.qCurveTo((228, 386), (254, 360))
pen.closePath() |
class EchoDialog(QDialog):
MESSAGE_CHARS_LIMIT = 1000
MESSAGE_LINE_LIMIT = 80
MESSAGE_HZ_LIMIT = 10
MAX_DISPLAY_MSGS = 25
STATISTIC_QUEUE_LEN = 1000
SHOW_BYTES = True
SHOW_JITTER = True
SHOW_STD_DEV = False
SHOW_WINDOW_SIZE = False
finished_signal = Signal(str)
msg_signal = Signal(object, bool)
text_hz_signal = Signal(str)
text_signal = Signal(str)
text_error_signal = Signal(str)
request_pw = Signal(object)
def __init__(self, topic, msg_type, show_only_rate=False, masteruri=None, use_ssh=False, parent=None):
QDialog.__init__(self, parent=parent)
self._masteruri = masteruri
masteruri_str = ('' if (masteruri is None) else ('[%s]' % masteruri))
echo_dialog_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui', 'EchoDialog.ui')
loadUi(echo_dialog_file, self)
self.setObjectName(' - '.join(['EchoDialog', topic, masteruri_str]))
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.setWindowFlags(Qt.Window)
self.setWindowTitle(('%s %s %s' % (('Echo --- ' if (not show_only_rate) else 'Hz --- '), topic, masteruri_str)))
self.setWindowIcon(nm.settings().icon('crystal_clear_prop_run_echo.png'))
self.clearButton.setIcon(nm.settings().icon('crystal_clear_show_delete_log.png'))
self.topicControlButton.setIcon(nm.settings().icon('sekkyumu_stop.png'))
self.resize(900, 512)
self.topic = topic
self.show_only_rate = show_only_rate
self.lock = threading.RLock()
self.last_printed_count = 0
self.msg_t0 = (- 1.0)
self.msg_tn = 0
self.times = []
self.bytes = []
self.message_count = 0
self._state_message = ''
self._state_size_message = ''
self._scrapped_msgs = 0
self._scrapped_msgs_sl = 0
self._last_received_ts = 0
self.chars_limit = self.MESSAGE_CHARS_LIMIT
self.receiving_hz = self.MESSAGE_HZ_LIMIT
self.line_limit = self.MESSAGE_LINE_LIMIT
self.max_displayed_msgs = self.MAX_DISPLAY_MSGS
self.digits_after_in_array = 2
self.enabled_message_filter = True
self.field_filter_fn = None
self._latched = False
self._msgs = []
self.filterFrame.setVisible(False)
self.topicControlButton.clicked.connect(self.on_topic_control_btn_clicked)
self.clearButton.clicked.connect(self.on_clear_btn_clicked)
if show_only_rate:
self.filterButton.setVisible(False)
else:
self.filterButton.clicked.connect(self.on_filter_clicked)
self.showStringsCheckBox.toggled.connect(self.on_no_str_checkbox_toggled)
self.maxLenStringComboBox.activated[str].connect(self.combobox_reduce_ch_activated)
self.showArraysCheckBox.toggled.connect(self.on_no_arr_checkbox_toggled)
self.maxDigitsComboBox.activated[str].connect(self.combobox_reduce_digits_activated)
self.enableMsgFilterCheckBox.toggled.connect(self.on_enable_msg_filter_checkbox_toggled)
self.msgFilterCheckBox.toggled.connect(self.on_msg_filter_checkbox_toggled)
self.maxLenComboBox.activated[str].connect(self.on_combobox_chars_activated)
self.maxHzComboBox.activated[str].connect(self.on_combobox_hz_activated)
self.displayCountComboBox.activated[str].connect(self.on_combobox_count_activated)
self.combobox_reduce_ch_activated(self.MESSAGE_LINE_LIMIT)
self.on_combobox_chars_activated(self.MESSAGE_CHARS_LIMIT)
self.on_combobox_hz_activated(self.MESSAGE_HZ_LIMIT)
self.on_combobox_count_activated(self.MAX_DISPLAY_MSGS)
self.filterButton.setFocus()
self.display.setReadOnly(True)
self.display.document().setMaximumBlockCount(500)
self._blocks_in_msg = None
self.display.setOpenLinks(False)
self.display.anchorClicked.connect(self._on_display_anchorClicked)
errmsg = ''
try:
self.__msg_class = message.get_message_class(msg_type)
if (not self.__msg_class):
errmsg = ('Cannot load message class for [%s]. Did you build messages?' % msg_type)
except Exception as e:
self.__msg_class = None
errmsg = ('Cannot load message class for [%s]. Did you build messagest?\nError: %s' % (msg_type, utf8(e)))
self.msg_signal.connect(self._append_message)
self.sub = None
self.ssh_output_file = None
self.ssh_error_file = None
self.ssh_input_file = None
self.text_signal.connect(self._append_text)
self.text_hz_signal.connect(self._append_text_hz)
self._current_msg = ''
self._current_errmsg = ''
self.text_error_signal.connect(self._append_error_text)
if use_ssh:
self.__msg_class = None
self._on_display_anchorClicked(QUrl(self._masteruri))
elif (self.__msg_class is None):
errtxt = ('<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">\n%s</pre>' % errmsg)
self.display.setText(('<a href="%s">open using SSH</a>' % masteruri))
self.display.append(errtxt)
else:
self.sub = rospy.Subscriber(self.topic, self.__msg_class, self._msg_handle)
self.print_hz_timer = QTimer()
self.print_hz_timer.timeout.connect(self._on_calc_hz)
self.print_hz_timer.start(1000)
self._start_time = time.time()
def closeEvent(self, event):
self.print_hz_timer.stop()
if (self.sub is not None):
self.sub.unregister()
del self.sub
try:
self.ssh_output_file.close()
self.ssh_error_file.close()
self.ssh_input_file.write(('%s\n' % chr(3)))
self.ssh_input_file.close()
except Exception:
pass
self.finished_signal.emit(self.topic)
if (self.parent() is None):
QApplication.quit()
def create_field_filter(self, echo_nostr, echo_noarr):
def field_filter(val):
try:
for (f, t) in zip(val.__slots__, val._slot_types):
if (echo_noarr and ('[' in t)):
continue
elif (echo_nostr and ('string' in t)):
continue
(yield f)
except Exception:
pass
return field_filter
def on_filter_clicked(self, checked):
self.filterFrame.setVisible(checked)
def on_no_str_checkbox_toggled(self, state):
self.maxLenStringComboBox.setEnabled(state)
self.field_filter_fn = self.create_field_filter((not state), (not self.showArraysCheckBox.isChecked()))
def on_no_arr_checkbox_toggled(self, state):
self.maxDigitsComboBox.setEnabled(state)
self.field_filter_fn = self.create_field_filter((not self.showStringsCheckBox.isChecked()), (not state))
def combobox_reduce_ch_activated(self, ch_txt):
try:
self.line_limit = int(ch_txt)
except ValueError:
try:
self.line_limit = float(ch_txt)
except ValueError:
self.maxLenStringComboBox.setEditText(str(self.line_limit))
self.display.clear()
with self.lock:
for (msg, current_time) in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def combobox_reduce_digits_activated(self, ch_txt):
try:
self.digits_after_in_array = int(ch_txt)
except ValueError:
self.digits_after_in_array = None
self.maxDigitsComboBox.setEditText('')
self.display.clear()
with self.lock:
for (msg, current_time) in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_enable_msg_filter_checkbox_toggled(self, state):
if (state == self.enabled_message_filter):
return
self.msgFilterCheckBox.setChecked(state)
self.enabled_message_filter = state
self.maxLenComboBox.setEnabled(state)
self.maxHzComboBox.setEnabled(state)
if self.enabled_message_filter:
self.on_combobox_chars_activated(self.maxLenComboBox.currentText(), False)
self.on_combobox_hz_activated(self.maxHzComboBox.currentText(), False)
else:
self.chars_limit = 0
self.receiving_hz = 0
self.display.clear()
with self.lock:
for (msg, current_time) in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_msg_filter_checkbox_toggled(self, state):
if (state == self.enabled_message_filter):
return
self.enableMsgFilterCheckBox.setChecked(state)
def on_combobox_chars_activated(self, chars_txt, update_display=True):
if (not self.enabled_message_filter):
return
try:
self.chars_limit = int(chars_txt)
except ValueError:
try:
self.chars_limit = float(chars_txt)
except ValueError:
self.maxLenComboBox.setEditText(str(self.chars_limit))
if update_display:
self.display.clear()
with self.lock:
for (msg, current_time) in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_combobox_hz_activated(self, hz_txt, update_display=True):
if (not self.enabled_message_filter):
return
try:
self.receiving_hz = int(hz_txt)
except ValueError:
try:
self.receiving_hz = float(hz_txt)
except ValueError:
self.maxHzComboBox.setEditText(str(self.receiving_hz))
if update_display:
self.display.clear()
with self.lock:
for (msg, current_time) in self._msgs:
self._append_message(msg, self._latched, current_time, False)
def on_combobox_count_activated(self, count_txt):
try:
self.max_displayed_msgs = int(count_txt)
self._blocks_in_msg = None
except ValueError:
self.displayCountComboBox.setEditText(str(self.max_displayed_msgs))
def on_clear_btn_clicked(self):
self.display.clear()
with self.lock:
del self._msgs[:]
self.message_count = 0
self._scrapped_msgs = 0
del self.times[:]
del self.bytes[:]
def on_topic_control_btn_clicked(self):
try:
if ((self.sub is None) and (self.ssh_output_file is None)):
if self.__msg_class:
self.sub = rospy.Subscriber(self.topic, self.__msg_class, self._msg_handle)
self._start_time = time.time()
else:
self._on_display_anchorClicked(QUrl(self._masteruri))
self.topicControlButton.setIcon(nm.settings().icon('sekkyumu_stop.png'))
else:
if (self.sub is not None):
self.sub.unregister()
self.sub = None
elif (self.ssh_output_file is not None):
self.ssh_output_file.close()
self.ssh_error_file.close()
self.ssh_output_file = None
self.topicControlButton.setIcon(nm.settings().icon('sekkyumu_play.png'))
except Exception as e:
rospy.logwarn(('Error while stop/play echo for topic %s: %s' % (self.topic, utf8(e))))
def _msg_handle(self, data):
self.msg_signal.emit(data, (data._connection_header['latching'] != '0'))
def _append_message(self, msg, latched, current_time=None, store=True):
if (current_time is None):
current_time = time.time()
self._latched = latched
if store:
with self.lock:
self._msgs.append((msg, current_time))
if (len(self._msgs) > 25):
self._msgs.pop()
msg_len = (- 1)
if (self.SHOW_BYTES or self.show_only_rate):
buff = None
try:
from cStringIO import StringIO
buff = StringIO()
import os
msg.serialize(buff)
buff.seek(0, os.SEEK_END)
msg_len = buff.tell()
except ImportError:
from io import BytesIO
buff = BytesIO()
msg.serialize(buff)
msg_len = buff.getbuffer().nbytes
self._count_messages(current_time, msg_len)
if ((self._last_received_ts != 0) and (self.receiving_hz != 0)):
if ((current_time - self._last_received_ts) < (1.0 / self.receiving_hz)):
if ((not latched) or (latched and ((current_time - self._start_time) > 3.0))):
self._scrapped_msgs += 1
self._scrapped_msgs_sl += 1
return
self._last_received_ts = current_time
if (not self.show_only_rate):
msg = self.strify_message(msg, field_filter=self.field_filter_fn, fixed_numeric_width=self.digits_after_in_array)
if isinstance(msg, tuple):
msg = msg[0]
msg = self._trim_width(msg)
msg = msg.replace('<', '<').replace('>', '>')
msg_cated = False
if ((self.chars_limit != 0) and (len(msg) > self.chars_limit)):
msg = msg[0:self.chars_limit]
msg_cated = True
ver_srollbar = self.display.verticalScrollBar()
scroll_is_at_end = ((ver_srollbar.maximum() - ver_srollbar.value()) <= 10)
if (self._scrapped_msgs_sl > 0):
txt = ('<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">scrapped %s message because of Hz-settings</pre>' % self._scrapped_msgs_sl)
self.display.append(txt)
self._scrapped_msgs_sl = 0
txt = ('<pre style="background-color:#FFFCCC; color:#000000;font-family:Fixedsys,Courier; padding:10px;"> %s \n%s</pre>' % (datetime.now().strftime('%d.%m.%Y %H:%M:%S.%f'), msg))
self._update_max_msg_count(txt)
self.display.append(txt)
if msg_cated:
txt = '<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">message has been cut off</pre>'
self.display.append(txt)
if scroll_is_at_end:
ver_srollbar.setValue(ver_srollbar.maximum())
if store:
self._print_status()
def _count_messages(self, ts=time.time(), msg_len=(- 1)):
current_time = ts
with self.lock:
if ((self.msg_t0 < 0) or (self.msg_t0 > current_time)):
self.msg_t0 = current_time
self.msg_tn = current_time
self.times = []
self.bytes = []
else:
self.times.append((current_time - self.msg_tn))
if (msg_len > (- 1)):
self.bytes.append(msg_len)
self.msg_tn = current_time
if (len(self.times) > self.STATISTIC_QUEUE_LEN):
self.times.pop(0)
if (len(self.bytes) > self.STATISTIC_QUEUE_LEN):
self.bytes.pop(0)
self.message_count += 1
def _trim_width(self, msg):
result = msg
if (self.line_limit != 0):
a = ''
for l in msg.splitlines():
a = ((a + (l if (len(l) <= self.line_limit) else (l[0:(self.line_limit - 3)] + '...'))) + '\n')
result = a
return result
def _update_max_msg_count(self, txt):
if (self._blocks_in_msg is None):
td = QTextDocument(txt)
self._blocks_in_msg = td.blockCount()
self.display.document().setMaximumBlockCount((self._blocks_in_msg * self.max_displayed_msgs))
def _on_calc_hz(self):
if rospy.is_shutdown():
self.close()
return
if ((not self.show_only_rate) and ((time.time() - self._last_received_ts) > 1)):
if (self._scrapped_msgs_sl > 0):
txt = ('<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">scrapped %s message because of Hz-settings</pre>' % self._scrapped_msgs_sl)
self._scrapped_msgs_sl = 0
self.display.append(txt)
if (self.message_count == self.last_printed_count):
return
with self.lock:
message_rate = ''
message_bytes = ''
message_jitter = ''
message_window = ''
message_std_dev = ''
message_scrapped = ''
sum_times = sum(self.times)
if (sum_times == 0):
sum_times = 1
if ((self.SHOW_BYTES or self.show_only_rate) and self.bytes):
sum_bytes = sum(self.bytes)
avg = (sum_bytes / len(self.bytes))
last = self.bytes[(- 1)]
if (avg != last):
message_bytes = ('size[ last: %s, avg: %s ]' % (self._normilize_size_print(last), self._normilize_size_print(avg)))
else:
message_bytes = ('size: %s' % self._normilize_size_print(last))
byte_rate = (float(sum_bytes) / float(sum_times))
message_bytes += (' bw: %s/s' % self._normilize_size_print(byte_rate))
n = len(self.times)
if (n < 2):
return
mean = (sum_times / n)
rate = ((1.0 / mean) if (mean > 0.0) else 0)
message_rate = ('average rate: %.3f' % rate)
if (self.SHOW_JITTER or self.show_only_rate):
max_delta = max(self.times)
min_delta = min(self.times)
message_jitter = ('jitter[ min: %.3fs max: %.3fs ]' % (min_delta, max_delta))
self.last_printed_count = self.message_count
if (self.SHOW_STD_DEV or self.show_only_rate):
std_dev = math.sqrt((sum((((x - mean) ** 2) for x in self.times)) / n))
message_std_dev = ('std dev: %.5fs' % std_dev)
if (self.SHOW_WINDOW_SIZE or self.show_only_rate):
message_window = ('window: %s' % (n + 1))
if (self._scrapped_msgs > 0):
message_scrapped += ('scrapped msgs: %s' % self._scrapped_msgs)
self._state_message = ''
self._state_size_message = message_bytes
for msg in [message_rate, message_jitter, message_std_dev, message_window, message_scrapped]:
if msg:
if self._state_message:
self._state_message += ' '
self._state_message += msg
self._print_status()
if self.show_only_rate:
self.display.append(('%s %s' % (self._state_message, message_bytes)))
def _normilize_size_print(self, size):
if (size > 999999):
return ('%.2fMiB' % (size / 1048576.0))
if (size > 999):
return ('%.2fKiB' % (size / 1024.0))
return ('%dB' % size)
def _print_status(self):
text = ('%s messages %s' % (self.message_count, self._state_message))
if self._latched:
text = ('[latched] %s' % text)
self.statusLabel.setText(text)
self.statusSizeLabel.setText(self._state_size_message)
def _append_text(self, text):
with self.lock:
self._current_msg += text
if (self._current_msg.find('---') != (- 1)):
messages = self._current_msg.split('---')
for m in messages[:(- 1)]:
current_time = time.time()
self._count_messages(current_time)
m = self._trim_width(m)
txt = ('<pre style="background-color:#FFFCCC; color:#000000;font-family:Fixedsys,Courier; padding:10px;"> %s \n%s</pre>' % (datetime.now().strftime('%d.%m.%Y %H:%M:%S.%f'), m))
self._update_max_msg_count(txt)
self.display.append(txt)
self._current_msg = messages[(- 1)]
self._print_status()
def _append_error_text(self, text):
with self.lock:
self._current_errmsg += text
if (self._current_errmsg.find('\n') != (- 1)):
messages = self._current_errmsg.split('\n')
for m in messages[:(- 1)]:
txt = ('<pre style="color:red; font-family:Fixedsys,Courier,monospace; padding:10px;">%s</pre>' % m)
self.display.append(txt)
self._current_errmsg = messages[(- 1)]
def _append_text_hz(self, text):
with self.lock:
self._current_msg += text
if (self._current_msg.find('\n') != (- 1)):
messages = self._current_msg.split('\n')
for m in messages[:(- 1)]:
txt = ('<div style="font-family:Fixedsys,Courier;">%s</div>' % m)
self.display.append(txt)
self._current_msg = messages[(- 1)]
def _on_display_anchorClicked(self, url, user=None, pw=None):
try:
ok = False
if self.show_only_rate:
(self.ssh_input_file, self.ssh_output_file, self.ssh_error_file, ok) = nm.ssh().ssh_exec(url.host(), [('rostopic hz %s' % self.topic)], user, pw, auto_pw_request=True, get_pty=True)
self.statusLabel.setText(('connected to %s over SSH' % url.host()))
else:
nostr = ('--nostr' if (not self.showStringsCheckBox.isChecked()) else '')
noarr = ('--noarr' if (not self.showArraysCheckBox.isChecked()) else '')
(self.ssh_input_file, self.ssh_output_file, self.ssh_error_file, ok) = nm.ssh().ssh_exec(url.host(), [('rostopic echo %s %s %s' % (nostr, noarr, self.topic))], user, pw, auto_pw_request=True, get_pty=True)
if ok:
self.display.clear()
target = (self._read_output_hz if self.show_only_rate else self._read_output)
thread = threading.Thread(target=target, args=(self.ssh_output_file,))
thread.setDaemon(True)
thread.start()
thread = threading.Thread(target=self._read_error, args=(self.ssh_error_file,))
thread.setDaemon(True)
thread.start()
elif self.ssh_output_file:
self.ssh_output_file.close()
self.ssh_error_file.close()
except Exception as e:
self._append_error_text(('%s\n' % e))
def _read_output_hz(self, output_file):
try:
while (not output_file.closed):
text = output_file.read(1)
if text:
self.text_hz_signal.emit(text)
except Exception:
pass
def _read_output(self, output_file):
while (not output_file.closed):
text = output_file.read(1)
if text:
self.text_signal.emit(text)
def _read_error(self, error_file):
try:
while (not error_file.closed):
text = error_file.read(1)
if text:
self.text_error_signal.emit(text)
except Exception:
pass
def strify_message(cls, val, indent='', time_offset=None, current_time=None, field_filter=None, fixed_numeric_width=None, digits_after_in_array=None):
type_ = type(val)
if (sys.version_info[0] <= 2):
types = (int, long, float)
types_wb = (int, long, float, bool)
else:
types = (int, float)
types_wb = (int, float, bool)
if ((type_ in types) and (fixed_numeric_width is not None)):
if (type_ is float):
return ((('%.' + str(fixed_numeric_width)) + 'f') % val)
else:
return ('%d' % val)
elif (type_ in types_wb):
return utf8(val)
elif isstring(val):
if (not val):
return "''"
return val
elif isinstance(val, TVal):
if ((time_offset is not None) and isinstance(val, Time)):
val = (val - time_offset)
if (fixed_numeric_width is not None):
format_str = '%d'
sec_str = (('\n%ssecs: ' % indent) + (format_str % val.secs))
nsec_str = (('\n%snsecs: ' % indent) + (format_str % val.nsecs))
return (sec_str + nsec_str)
else:
return ('\n%ssecs: %s\n%snsecs: %9d' % (indent, val.secs, indent, val.nsecs))
elif (type_ in (list, tuple)):
if (len(val) == 0):
return '[]'
val0 = val[0]
if ((type(val0) in (int, float)) and (digits_after_in_array is not None)):
list_str = (('[' + ''.join(((cls.strify_message(v, indent, time_offset, current_time, field_filter, digits_after_in_array) + ', ') for v in val)).rstrip(', ')) + ']')
return list_str
elif (type(val0) in (int, float, str, bool)):
return utf8(list(val))
else:
pref = (indent + '- ')
indent = (indent + ' ')
return ('\n' + '\n'.join([(pref + cls.strify_message(v, indent, time_offset, current_time, field_filter, digits_after_in_array)) for v in val]))
elif isinstance(val, message.Message):
if (field_filter is not None):
fields = list(field_filter(val))
else:
fields = val.__slots__
p = ('%s%%s: %%s' % indent)
ni = (' ' + indent)
python_zip = None
if (sys.hexversion > ):
python_zip = zip
else:
python_zip = itertools.izip
slots = []
for (f, t) in python_zip(val.__slots__, val._slot_types):
if (f in fields):
cval = _convert_getattr(val, f, t)
slot_name = f
if isinstance(cval, (list, tuple)):
slot_name = ('%s[%d]' % (f, len(cval)))
slots.append((p % (utf8(slot_name), cls.strify_message(cval, ni, time_offset, current_time, field_filter, fixed_numeric_width))))
vals = '\n'.join(slots)
if indent:
return ('\n' + vals)
else:
return vals
else:
return utf8(val) |
class OptionSeriesItemSonificationDefaultinstrumentoptionsMappingTremolo(Options):
def depth(self) -> 'OptionSeriesItemSonificationDefaultinstrumentoptionsMappingTremoloDepth':
return self._config_sub_data('depth', OptionSeriesItemSonificationDefaultinstrumentoptionsMappingTremoloDepth)
def speed(self) -> 'OptionSeriesItemSonificationDefaultinstrumentoptionsMappingTremoloSpeed':
return self._config_sub_data('speed', OptionSeriesItemSonificationDefaultinstrumentoptionsMappingTremoloSpeed) |
def pickle_cache(original, cache_path, include_incremental=True, protocol=pickle.HIGHEST_PROTOCOL):
internal_assert((all_parse_elements is not None), 'pickle_cache requires cPyparsing')
if (not save_new_cache_items):
logger.log('Skipping saving cache items due to environment variable.')
return
validation_dict = ({} if cache_validation_info else None)
pickleable_cache_items = []
if (ParserElement._incrementalEnabled and include_incremental):
for (lookup, value) in get_cache_items_for(original, only_useful=True):
if ((incremental_mode_cache_size is not None) and (len(pickleable_cache_items) > incremental_mode_cache_size)):
logger.log(((('Got too large incremental cache: ' + str(len(get_pyparsing_cache()))) + ' > ') + str(incremental_mode_cache_size)))
break
if (len(pickleable_cache_items) >= incremental_cache_limit):
break
loc = lookup[2]
if (0 < loc < (len(original) - 1)):
elem = lookup[0]
identifier = elem.parse_element_index
internal_assert((lambda : (elem == all_parse_elements[identifier]())), 'failed to look up parse element by identifier', (elem, all_parse_elements[identifier]()))
if (validation_dict is not None):
validation_dict[identifier] = elem.__class__.__name__
pickleable_lookup = ((identifier,) + lookup[1:])
pickleable_cache_items.append((pickleable_lookup, value))
all_adaptive_stats = {}
for wkref in MatchAny.all_match_anys:
match_any = wkref()
if ((match_any is not None) and (match_any.adaptive_usage is not None)):
identifier = match_any.parse_element_index
internal_assert((lambda : (match_any == all_parse_elements[identifier]())), 'failed to look up match_any by identifier', (match_any, all_parse_elements[identifier]()))
if (validation_dict is not None):
validation_dict[identifier] = match_any.__class__.__name__
match_any.expr_order.sort(key=(lambda i: ((- match_any.adaptive_usage[i]), i)))
all_adaptive_stats[identifier] = (match_any.adaptive_usage, match_any.expr_order)
logger.log('Caching adaptive item:', match_any, all_adaptive_stats[identifier])
logger.log('Saving {num_inc} incremental and {num_adapt} adaptive cache items to {cache_path!r}.'.format(num_inc=len(pickleable_cache_items), num_adapt=len(all_adaptive_stats), cache_path=cache_path))
pickle_info_obj = {'VERSION': VERSION, 'pyparsing_version': pyparsing_version, 'validation_dict': validation_dict, 'pickleable_cache_items': pickleable_cache_items, 'all_adaptive_stats': all_adaptive_stats}
try:
with univ_open(cache_path, 'wb') as pickle_file:
pickle.dump(pickle_info_obj, pickle_file, protocol=protocol)
except Exception:
logger.warn_exc()
return False
else:
return True
finally:
clear_packrat_cache(force=True) |
def test_plugin_overrides():
plugin_override = matchable_resource.PluginOverride('task_type', ['acceptable', 'override'], matchable_resource.PluginOverride.USE_DEFAULT)
obj = matchable_resource.PluginOverrides(overrides=[plugin_override])
assert (obj.overrides == [plugin_override])
assert (obj == matchable_resource.PluginOverrides.from_flyte_idl(obj.to_flyte_idl())) |
def get_tvtk_dataset_name(dataset):
result = 'none'
if (not hasattr(dataset, 'is_a')):
return result
dataset = get_new_output(dataset)
if (dataset is None):
return result
if (dataset.is_a('vtkStructuredPoints') or dataset.is_a('vtkImageData')):
result = 'image_data'
elif dataset.is_a('vtkRectilinearGrid'):
result = 'rectilinear_grid'
elif dataset.is_a('vtkPolyData'):
result = 'poly_data'
elif dataset.is_a('vtkStructuredGrid'):
result = 'structured_grid'
elif dataset.is_a('vtkUnstructuredGrid'):
result = 'unstructured_grid'
elif dataset.is_a('vtkCompositeDataSet'):
result = 'any'
else:
result = 'none'
return result |
class TestAwsDeploymentHelper(unittest.TestCase):
def setUp(self) -> None:
with patch('fbpcs.infra.cloud_bridge.deployment_helper.aws.aws_deployment_helper.boto3'):
self.aws_deployment_helper = AwsDeploymentHelper()
self.aws_deployment_helper.log = MagicMock()
def test_create_user(self) -> None:
self.aws_deployment_helper.iam.create_user.return_value = True
with self.subTest('basic'):
self.assertIsNone(self.aws_deployment_helper.create_user('abc'))
self.aws_deployment_helper.iam.create_user.was_called_once()
with self.subTest('create_user.ClientError'):
self.aws_deployment_helper.iam.create_user.reset_mock()
self.aws_deployment_helper.iam.create_user.side_effect = ClientError(error_response={'Error': {}}, operation_name='create_user')
self.assertIsNone(self.aws_deployment_helper.create_user(''))
self.aws_deployment_helper.iam.create_user.was_called_once()
def test_delete_user(self) -> None:
self.aws_deployment_helper.iam.delete_user.return_value = True
with self.subTest('basic'):
self.assertEqual(None, self.aws_deployment_helper.delete_user('user'))
with self.subTest('delete_user.ClientError'):
self.aws_deployment_helper.iam.delete_user.reset_mock()
self.aws_deployment_helper.iam.delete_user.side_effect = ClientError(error_response={'Error': {}}, operation_name='delete_user')
self.assertEqual(None, self.aws_deployment_helper.delete_user(''))
pass
def test_create_policy(self) -> None:
template_path = 'iam_policies/fb_pc_iam_policy.json'
test_policy_name = 'TestIamPolicyName'
test_policy_params = {'test-key-1': 'test-val-1'}
test_user_name = 'test-user-name'
test_policy_json_data = {'test-key-2': 'test-val-2', 'Statement': [{'Effect': 'Allow', 'Action': 'logs:Describe*', 'Resource': '*'}]}
test_policy_json_data_string = '{"test-key-2": "test-val-2", "Statement": [{"Effect": "Allow", "Action": "logs:Describe*", "Resource": "*"}]}'
self.aws_deployment_helper.read_json_file = MagicMock()
self.aws_deployment_helper.read_json_file.return_value = test_policy_json_data
self.aws_deployment_helper.iam.create_policy.return_value = {}
with self.subTest('basic'):
self.aws_deployment_helper.create_policy(test_policy_name, template_path, test_policy_params)
self.aws_deployment_helper.read_json_file.assert_called_once_with(file_name=template_path, policy_params=test_policy_params)
self.aws_deployment_helper.iam.create_policy.assert_called_once_with(PolicyName=test_policy_name, PolicyDocument=test_policy_json_data_string)
with self.subTest('client_error_EntityAlreadyExists'):
self.aws_deployment_helper.iam.create_policy.reset_mock()
self.aws_deployment_helper.iam.create_policy.side_effect = ClientError(error_response={'Error': {'Code': 'EntityAlreadyExists'}}, operation_name='create_policy')
self.aws_deployment_helper.create_policy(test_policy_name, 'test_template_path', test_policy_params)
self.aws_deployment_helper.iam.create_policy.assert_called_once_with(PolicyName=test_policy_name, PolicyDocument=test_policy_json_data_string)
self.aws_deployment_helper.log.error.assert_called_once()
with self.subTest('client_error_without_username'):
self.aws_deployment_helper.log.error.reset_mock()
self.aws_deployment_helper.iam.create_policy.reset_mock()
self.aws_deployment_helper.iam.create_policy.side_effect = ClientError(error_response={'Error': {'Code': 'InvalidInput'}}, operation_name='create_policy')
self.aws_deployment_helper.create_policy(test_policy_name, 'test_template_path', test_policy_params)
self.aws_deployment_helper.log.error.assert_called_once()
with self.subTest('client_error_with_username'):
self.aws_deployment_helper.log.error.reset_mock()
self.aws_deployment_helper.iam.create_policy.reset_mock()
self.aws_deployment_helper.iam.create_policy.side_effect = ClientError(error_response={'Error': {'Code': 'InvalidInput'}}, operation_name='create_policy')
self.aws_deployment_helper.create_policy(test_policy_name, 'test_template_path', test_policy_params, test_user_name)
self.aws_deployment_helper.log.error.assert_called_once()
def test_delete_policy(self) -> None:
self.aws_deployment_helper.iam.delete_policy.return_value = True
with self.subTest('basic'):
self.assertEqual(None, self.aws_deployment_helper.delete_policy('abc'))
with self.subTest('delete_policy.ClientError'):
self.aws_deployment_helper.iam.delete_policy.reset_mock()
self.aws_deployment_helper.iam.delete_policy.side_effect = ClientError(error_response={'Error': {}}, operation_name='delete_policy')
self.assertEqual(None, self.aws_deployment_helper.delete_policy(''))
def test_attach_user_policy(self) -> None:
self.aws_deployment_helper.iam.list_policies.return_value = {'Policies': [{'PolicyName': 'A'}]}
self.aws_deployment_helper.iam.list_users.return_value = {'Users': [{'UserName': 'Z'}]}
with self.subTest('basic'):
self.assertEqual(None, self.aws_deployment_helper.attach_user_policy('A', 'Z'))
with self.subTest('user_name not in current_users'):
self.assertRaises(Exception, self.aws_deployment_helper.attach_user_policy, 'A', 'Y')
with self.subTest('policy_name not in current_policies'):
self.assertRaises(Exception, self.aws_deployment_helper.attach_user_policy, 'B', 'Z')
with self.subTest('attach_user_policy.ClientError'):
self.aws_deployment_helper.iam.attach_user_policy.reset_mock()
self.aws_deployment_helper.iam.attach_user_policy.side_effect = ClientError(error_response={'Error': {}}, operation_name='attach_user_policy')
self.assertEqual(None, self.aws_deployment_helper.attach_user_policy('A', 'Z'))
self.aws_deployment_helper.iam.attach_user_policy.assert_called_once()
def test_detach_user_policy(self) -> None:
self.aws_deployment_helper.iam.list_policies.return_value = {'Policies': [{'PolicyName': 'A'}]}
self.aws_deployment_helper.iam.list_users.return_value = {'Users': [{'UserName': 'Z'}]}
with self.subTest('basic'):
self.assertEqual(None, self.aws_deployment_helper.detach_user_policy('A', 'Z'))
with self.subTest('user_name not in current_users'):
self.assertRaises(Exception, self.aws_deployment_helper.detach_user_policy, 'A', 'Y')
with self.subTest('policy_name not in current_policies'):
self.assertRaises(Exception, self.aws_deployment_helper.detach_user_policy, 'B', 'Z')
with self.subTest('detach_user_policy.ClientError'):
self.aws_deployment_helper.iam.detach_user_policy.reset_mock()
self.aws_deployment_helper.iam.detach_user_policy.side_effect = ClientError(error_response={'Error': {}}, operation_name='detach_user_policy')
self.assertEqual(None, self.aws_deployment_helper.detach_user_policy('A', 'Z'))
self.aws_deployment_helper.iam.detach_user_policy.assert_called_once()
def test_list_policies(self) -> None:
self.aws_deployment_helper.iam.list_policies.return_value = {'Policies': [{'PolicyName': 'A'}, {'PolicyName': 'B'}, {'PolicyName': 'C'}]}
with self.subTest('basic'):
expected = ['A', 'B', 'C']
self.assertEqual(expected, self.aws_deployment_helper.list_policies())
self.aws_deployment_helper.iam.list_policies.assert_called_once()
with self.subTest('list_policies.ClientError'):
self.aws_deployment_helper.iam.list_policies.reset_mock()
self.aws_deployment_helper.iam.list_policies.side_effect = ClientError(error_response={'Error': {}}, operation_name='list_policies')
self.assertEqual([], self.aws_deployment_helper.list_policies())
self.aws_deployment_helper.iam.list_policies.assert_called_once()
def test_list_users(self) -> None:
self.aws_deployment_helper.iam.list_users.return_value = {'Users': [{'UserName': 'A'}, {'UserName': 'B'}, {'UserName': 'C'}]}
with self.subTest('basic'):
expected = ['A', 'B', 'C']
self.assertEqual(expected, self.aws_deployment_helper.list_users())
self.aws_deployment_helper.iam.list_users.assert_called_once()
with self.subTest('list_users.ClientError'):
self.aws_deployment_helper.iam.list_users.reset_mock()
self.aws_deployment_helper.iam.list_users.side_effect = ClientError(error_response={'Error': {}}, operation_name='list_users')
self.assertEqual([], self.aws_deployment_helper.list_users())
self.aws_deployment_helper.iam.list_users.assert_called_once()
def test_create_access_key(self) -> None:
self.aws_deployment_helper.iam.create_access_key.return_value = {'AccessKey': {'AccessKeyId': 1, 'SecretAccessKey': 2}}
with self.subTest('basic'):
self.assertIsNone(self.aws_deployment_helper.create_access_key('user1'))
self.aws_deployment_helper.iam.create_access_key.assert_called_once_with(UserName='user1')
with self.subTest('create_access_key.ClientError'):
self.aws_deployment_helper.iam.create_access_key.reset_mock()
self.aws_deployment_helper.iam.create_access_key.side_effect = ClientError(error_response={'Error': {}}, operation_name='create_access_key')
self.assertIsNone(self.aws_deployment_helper.create_access_key('user1'))
self.aws_deployment_helper.iam.create_access_key.assert_called_once()
def test_delete_access_key(self) -> None:
with self.subTest('basic'):
self.assertIsNone(self.aws_deployment_helper.delete_access_key('user', 'key'))
self.aws_deployment_helper.iam.delete_access_key.assert_called_once()
with self.subTest('delete_access_key.ClientError'):
self.aws_deployment_helper.iam.delete_access_key.reset_mock()
self.aws_deployment_helper.iam.delete_access_key.side_effect = ClientError(error_response={'Error': {}}, operation_name='delete_access_key')
self.assertIsNone(self.aws_deployment_helper.delete_access_key('another_user', 'another_key'))
self.aws_deployment_helper.iam.delete_access_key.assert_called_once()
def test_list_access_keys(self) -> None:
self.aws_deployment_helper.iam.list_access_keys.return_value = {'AccessKeyMetadata': [{'AccessKeyId': 'A'}, {'AccessKeyId': 'B'}, {'AccessKeyId': 'C'}]}
with self.subTest('basic'):
expected = ['A', 'B', 'C']
self.assertEqual(expected, self.aws_deployment_helper.list_access_keys(''))
self.aws_deployment_helper.iam.list_access_keys.assert_called_once()
with self.subTest('list_access_keys.ClientError'):
self.aws_deployment_helper.iam.list_access_keys.reset_mock()
self.aws_deployment_helper.iam.list_access_keys.side_effect = ClientError(error_response={'Error': {}}, operation_name='list_access_keys')
self.assertEqual([], self.aws_deployment_helper.list_access_keys(''))
self.aws_deployment_helper.iam.list_access_keys.assert_called_once()
def test_read_json_file(self) -> None:
self.aws_deployment_helper.region = 'test_region'
test_policy = MagicMock()
test_policy.cluster_name = 'test_cluster_name'
test_policy.semi_automated_glue_job_arn = ''
test_file = ((pathlib.Path(__file__).parent / 'test_resources') / 'test_aws_deployment_helper_config.json')
test_data = self.aws_deployment_helper.read_json_file(test_file, test_policy)
self.assertEqual(test_data['REGION'], self.aws_deployment_helper.region)
self.assertEqual(test_data['CLUSTER_NAME'], test_policy.cluster_name)
def test_create_user_workflow(self) -> None:
self.aws_deployment_helper.iam.create_user_workflow.return_value = True
self.assertEqual(None, self.aws_deployment_helper.create_user_workflow('user1'))
self.aws_deployment_helper.iam.create_user.assert_called_once_with(UserName='user1')
self.aws_deployment_helper.iam.create_access_key.assert_called_once_with(UserName='user1')
def test_delete_user_workflow(self) -> None:
self.aws_deployment_helper.iam.delete_user_workflow.return_value = True
self.assertIsNone(self.aws_deployment_helper.delete_user_workflow('user1'))
self.aws_deployment_helper.iam.list_access_keys.assert_called_once_with(UserName='user1')
self.aws_deployment_helper.iam.delete_user.assert_called_once_with(UserName='user1')
with self.subTest('list_access_keys.ClientError'):
self.aws_deployment_helper.iam.list_access_keys.reset_mock()
self.aws_deployment_helper.iam.list_access_keys.side_effect = ClientError(error_response={'Error': {}}, operation_name='list_access_keys')
self.assertEqual([], self.aws_deployment_helper.list_access_keys(''))
self.aws_deployment_helper.iam.list_access_keys.assert_called_once()
with self.subTest('delete_access_key.ClientError'):
self.aws_deployment_helper.iam.delete_access_key.reset_mock()
self.aws_deployment_helper.iam.delete_access_key.side_effect = ClientError(error_response={'Error': {}}, operation_name='delete_access_key')
self.assertIsNone(self.aws_deployment_helper.delete_access_key('another_user', 'another_key'))
self.aws_deployment_helper.iam.delete_access_key.assert_called_once()
with self.subTest('delete_user.ClientError'):
self.aws_deployment_helper.iam.delete_user.reset_mock()
self.aws_deployment_helper.iam.delete_user.side_effect = ClientError(error_response={'Error': {}}, operation_name='delete_user')
self.assertIsNone(self.aws_deployment_helper.delete_user('')) |
def get_images(html, url):
s_files = re.search('sFiles="([^"]+)', html).group(1)
s_path = re.search('sPath="([^"]+)', html).group(1)
viewhtm = re.search('src="([^"]*?viewhtm\\d*\\.js[^"]*)', html)
viewhtm = grabhtml(urljoin(url, viewhtm.group(1)))
env = '\n\twindow = {\n\t\t"eval": eval,\n\t\t"parseInt": parseInt,\n\t\t"String": String,\n\t\t"RegExp": RegExp\n\t};\n\tlocation = {\n\t\t"hostname": null\n\t};\n\tfunction setHostname(hostname) {\n\t\tlocation.hostname = hostname;\n\t}\n\t'
js = (env + re.search('function isMobile\\(\\){.+?}(.+?)var cMod', viewhtm, re.DOTALL).group(1))
with VM(js) as vm:
vm.call('setHostname', urlparse(url).hostname)
arr_files = vm.call('unsuan', s_files).split('|')
ds = re.search("src='([^']*?ds\\.js[^']*)", html)
ds = grabhtml(urljoin(url, ds.group(1)))
global servers
servers = re.search('sDS = "([^"]+)', ds).group(1).split('^')
servers = [s.split('|')[1] for s in servers]
servers = cycle(servers)
server = next(servers)
return (((server + s_path) + f) for f in arr_files) |
def extractLarvyde(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('Preview' in item['tags']):
return None
if ((not postfix) and ('' in item['title'])):
postfix = item['title'].split('')[(- 1)]
if ('Ore no Osananajimi wa Joshikousei de Yuusha' in item['tags']):
return buildReleaseMessageWithType(item, 'Ore no Osananajimi wa Joshikousei de Yuusha', vol, chp, frag=frag, postfix=postfix)
if ('Oukoku e Tsuzuku Michi' in item['tags']):
return buildReleaseMessageWithType(item, 'Oukoku e Tsuzuku Michi', vol, chp, frag=frag, postfix=postfix)
if ('Takarakuji de 40-oku Atattandakedo' in item['tags']):
return buildReleaseMessageWithType(item, 'Takarakuji de 40 Oku Atattandakedo Isekai ni Ijuu Suru', vol, chp, frag=frag, postfix=postfix)
if ('Jaaku Chika Teikoku' in item['tags']):
return buildReleaseMessageWithType(item, 'Jaaku to Shite Akuratsu Naru Chika Teikoku Monogatari', vol, chp, frag=frag, postfix=postfix)
if ('Saenai Heroine no Sodatekata' in item['tags']):
return buildReleaseMessageWithType(item, 'Saenai Heroine no Sodatekata', vol, chp, frag=frag, postfix=postfix)
if ('Genjitsushugisha no Oukokukaizouki' in item['tags']):
return buildReleaseMessageWithType(item, 'Genjitsushugisha no Oukokukaizouki', vol, chp, frag=frag, postfix=postfix)
if ('Hitokui Dungeon e Youkoso' in item['tags']):
return buildReleaseMessageWithType(item, 'Hitokui Dungeon e Youkoso', vol, chp, frag=frag, postfix=postfix)
if ('Chikyuu Tenseisha no Koroshikata' in item['tags']):
return buildReleaseMessageWithType(item, 'Chikyuu Tenseisha no Koroshikata', vol, chp, frag=frag, postfix=postfix)
return False |
class BeamSyncStrategy(BaseSyncStrategy):
def get_sync_mode(cls) -> str:
return SYNC_BEAM
def configure_parser(cls, arg_group: _ArgumentGroup) -> None:
arg_group.add_argument('--force-beam-block-number', type=int, help='Force beam sync to activate on a specific block number (for testing)', default=None)
add_disable_backfill_arg(arg_group)
add_sync_from_checkpoint_arg(arg_group)
async def sync(self, args: Namespace, logger: logging.Logger, chain: AsyncChainAPI, base_db: AtomicDatabaseAPI, peer_pool: BasePeerPool, event_bus: EndpointAPI, metrics_service: MetricsServiceAPI) -> None:
if (metrics_service == NOOP_METRICS_SERVICE):
sync_metrics_registry = None
else:
sync_metrics_registry = SyncMetricsRegistry(metrics_service)
syncer = BeamSyncService(chain, AsyncChainDB(base_db), base_db, cast(ETHPeerPool, peer_pool), event_bus, args.sync_from_checkpoint, args.force_beam_block_number, (not args.disable_backfill), sync_metrics_registry)
async with background_asyncio_service(syncer) as manager:
(await manager.wait_finished()) |
def main():
episode_regex = '.*nts\\.live\\/shows.+(\\/episodes)\\/.+'
show_regex = '.*nts\\.live\\/shows\\/([^/]+)$'
download_dir = '~/Downloads'
if sys.platform.startswith('win32'):
download_dir = '%USERPROFILE%\\Downloads\\'
download_dir = os.path.expanduser('~/Downloads')
usage = 'Usage: %prog [options] args'
parser = OptionParser(usage=usage)
parser.add_option('-o', '--out-dir', dest='output_directory', default=download_dir, action='store', type='string', help='where the files will be downloaded, defaults to ~/Downloads on macOS and %USERPROFILE%\\Downloads', metavar='DIR')
parser.add_option('-v', '--version', default=False, dest='version', action='store_true', help='print the version number and quit')
parser.add_option('-q', '--quiet', default=False, dest='quiet', action='store_true', help='only print errors')
(options, args) = parser.parse_args()
if options.version:
print(f'nts {nts.__version__}')
exit(0)
if (len(args) < 1):
print('please pass an URL or a file containing a list of urls.\n')
parser.print_help()
exit(1)
download_dir = os.path.expanduser(options.output_directory)
download_dir = os.path.abspath(options.output_directory)
def url_matcher(url):
if isinstance(url, str):
url = url.strip()
match_ep = re.match(episode_regex, url)
match_sh = re.match(show_regex, url)
if match_ep:
nts.download(url=url, quiet=options.quiet, save_dir=download_dir)
elif match_sh:
episodes = nts.get_episodes_of_show(match_sh.group(1))
for ep in episodes:
url_matcher(ep)
else:
print(f'''{url} is not an NTS url.
''')
parser.print_help()
exit(1)
for arg in args:
if os.path.isfile(arg):
file = ''
with open(arg, 'r') as f:
file = f.read()
lines = filter(None, file.split('\n'))
for line in lines:
url_matcher(line)
else:
url_matcher(arg) |
def add_options(parser):
partial_action = common.partial_append_action
file_mods = parser.add_argument_group('Sequence File Modification')
file_mods.add_argument('--line-wrap', dest='line_wrap', metavar='N', type=int, help='Adjust line wrap for sequence strings. When N is 0, all line breaks are removed. Only fasta files are supported for the output format.')
file_mods.add_argument('--sort', dest='sort', choices=['length-asc', 'length-desc', 'name-asc', 'name-desc'], help='Perform sorting by length or name, ascending or descending. ASCII sorting is performed for names')
parser.epilog = 'Filters using regular expressions are case-sensitive\n by default. Append "(?i)" to a pattern to make it case-insensitive.'
seq_mods = parser.add_argument_group('Sequence Modificaton')
seq_mods.add_argument('--apply-function', type=module_function, metavar='/path/to/module.py:function_name[:parameter]', help='Specify a custom function to apply to the input sequences,\n specified as /path/to/file.py:function_name. Function should accept\n an iterable of Bio.SeqRecord objects, and yield SeqRecords. If the\n parameter is specified, it will be passed as a string as the second\n argument to the function. Specify more than one to chain.', default=[], action='append')
seq_mods.add_argument('--cut', dest='transforms', metavar='start:end[,start2:end2]', type=common.sequence_slices, action=partial_action(transform.multi_cut_sequences, 'slices'), help='Keep only the residues within the 1-indexed start and end\n positions specified, : separated. Includes last item. Start or end\n can be left unspecified to indicate start/end of sequence. A\n negative start may be provided to indicate an offset from the end\n of the sequence. Note that to prevent negative numbers being\n interpreted as flags, this should be written with an equals\n sign between `--cut` and the argument, e.g.: `--cut=-10:`')
seq_mods.add_argument('--relative-to', dest='cut_relative', metavar='ID', help='Apply --cut relative to the indexes of non-gap residues in\n sequence identified by ID')
seq_mods.add_argument('--drop', dest='transforms', metavar='start:end[,start2:end2]', type=common.sequence_slices, action=partial_action(transform.drop_columns, 'slices'), help='Remove the residues at the specified indices. Same format as `--cut`.')
seq_mods.add_argument('--dash-gap', action=partial_action(transform.dashes_cleanup), dest='transforms', help='Replace any of the characters "?.:~" with a "-" for all\n sequences')
seq_mods.add_argument('--lower', action=partial_action(transform.lower_sequences), dest='transforms', help='Translate the sequences to lower case')
seq_mods.add_argument('--mask', metavar='start1:end1[,start2:end2]', action=partial_action(transform.multi_mask_sequences, 'slices'), type=common.sequence_slices, dest='transforms', help='Replace\n residues in 1-indexed slice with gap-characters. If --relative-to\n is also specified, coordinates are relative to the sequence ID\n provided.')
seq_mods.add_argument('--reverse', action=partial_action(transform.reverse_sequences), dest='transforms', help='Reverse the order of sites in sequences')
seq_mods.add_argument('--reverse-complement', dest='transforms', action=partial_action(transform.reverse_complement_sequences), help='Convert sequences into reverse complements')
seq_mods.add_argument('--squeeze', action=partial_action(transform.squeeze), dest='transforms', help='Remove any gaps that are present in the same\n position across all sequences in an alignment (equivalent to\n --squeeze-threshold=1.0)')
seq_mods.add_argument('--squeeze-threshold', dest='transforms', action=partial_action(transform.squeeze, 'gap_threshold'), type=common.typed_range(float, 0.0, 1.0), metavar='PROP', help='Trim columns from an alignment which\n have gaps in least the specified proportion of sequences.')
seq_mods.add_argument('--transcribe', dest='transforms', action=partial_action(transform.transcribe, 'transcribe'), choices=('dna2rna', 'rna2dna'), help='Transcription and back\n transcription for generic DNA and RNA. Source sequences must be the\n correct alphabet or this action will likely produce incorrect\n results.')
seq_mods.add_argument('--translate', dest='transforms', action=partial_action(transform.translate, 'translate'), choices=['dna2protein', 'rna2protein', 'dna2proteinstop', 'rna2proteinstop'], help='Translate from generic DNA/RNA to\n proteins. Options with "stop" suffix will NOT translate through\n stop codons . Source sequences must be the correct alphabet or\n this action will likely produce incorrect results.')
seq_mods.add_argument('--ungap', action=partial_action(transform.ungap_sequences), dest='transforms', help='Remove gaps in the sequence alignment')
seq_mods.add_argument('--upper', action=partial_action(transform.upper_sequences), dest='transforms', help='Translate the sequences to upper case')
seq_select = parser.add_argument_group('Record Selection')
seq_select.add_argument('--deduplicate-sequences', action='store_const', const=None, default=False, dest='deduplicate_sequences', help='Remove any duplicate sequences by sequence content, keep the first instance seen')
seq_select.add_argument('--deduplicated-sequences-file', action='store', metavar='FILE', dest='deduplicate_sequences', default=False, type=common.FileType('wt'), help='Write all of the deduplicated sequences to a file')
seq_select.add_argument('--deduplicate-taxa', action=partial_action(transform.deduplicate_taxa), dest='transforms', help='Remove any duplicate sequences by ID,\n keep the first instance seen')
seq_select.add_argument('--exclude-from-file', metavar='FILE', type=common.FileType('rt'), help='Filter sequences, removing\n those sequence IDs in the specified file', dest='transforms', action=partial_action(transform.exclude_from_file, 'handle'))
seq_select.add_argument('--include-from-file', metavar='FILE', type=common.FileType('rt'), help='Filter sequences, keeping only\n those sequence IDs in the specified file', dest='transforms', action=partial_action(transform.include_from_file, 'handle'))
seq_select.add_argument('--head', metavar='N', dest='transforms', action=partial_action(transform.head, 'head'), help="Trim\n down to top N sequences. With the leading `-', print all but the last N sequences.")
seq_select.add_argument('--max-length', dest='transforms', metavar='N', action=partial_action(transform.max_length_discard, 'max_length'), type=int, help='Discard any sequences beyond the specified\n maximum length. This operation occurs *before* all length-changing\n options such as cut and squeeze.')
seq_select.add_argument('--min-length', dest='transforms', metavar='N', action=partial_action(transform.min_length_discard, 'min_length'), type=int, help='Discard any sequences less than the specified\n minimum length. This operation occurs *before* cut and squeeze.')
seq_select.add_argument('--min-ungapped-length', metavar='N', action=partial_action(transform.min_ungap_length_discard, 'min_length'), type=int, help='Discard any sequences less\n than the specified minimum length, excluding gaps. This\n operation occurs *before* cut and squeeze.', dest='transforms')
seq_select.add_argument('--pattern-include', metavar='REGEX', action=partial_action(transform.name_include, 'filter_regex'), dest='transforms', help='Filter the sequences by regular\n expression in ID or description')
seq_select.add_argument('--pattern-exclude', metavar='REGEX', action=partial_action(transform.name_exclude, 'filter_regex'), dest='transforms', help='Filter the sequences by regular\n expression in ID or description')
seq_select.add_argument('--prune-empty', action=partial_action(transform.prune_empty), dest='transforms', help="Prune sequences containing only gaps ('-')")
seq_select.add_argument('--sample', metavar='N', dest='transforms', type=int, action=partial_action(transform.sample, 'k'), help=' Select a random sampling of sequences ')
seq_select.add_argument('--sample-seed', metavar='N', type=int, help='Set random seed for sampling of sequences')
seq_select.add_argument('--seq-pattern-include', metavar='REGEX', action=partial_action(transform.seq_include, 'filter_regex'), dest='transforms', help='Filter the sequences by regular\n expression in sequence')
seq_select.add_argument('--seq-pattern-exclude', metavar='REGEX', action=partial_action(transform.seq_exclude, 'filter_regex'), dest='transforms', help='Filter the sequences by regular\n expression in sequence')
seq_select.add_argument('--tail', metavar='N', dest='transforms', action=partial_action(transform.tail, 'tail'), help='Trim down to bottom N sequences. Use +N to output sequences starting with the Nth.')
id_mods = parser.add_argument_group('Sequence ID Modification')
id_mods.add_argument('--first-name', action=partial_action(transform.first_name_capture), dest='transforms', help='Take only the first whitespace-delimited\n word as the name of the sequence')
id_mods.add_argument('--name-suffix', metavar='SUFFIX', action=partial_action(transform.name_append_suffix, 'suffix'), dest='transforms', help='Append a suffix to all IDs.')
id_mods.add_argument('--name-prefix', metavar='PREFIX', action=partial_action(transform.name_insert_prefix, 'prefix'), dest='transforms', help='Insert a prefix for all\n IDs.')
id_mods.add_argument('--pattern-replace', nargs=2, metavar=('search_pattern', 'replace_pattern'), action=partial_action(transform.name_replace, ('search_regex', 'replace_pattern')), dest='transforms', help='Replace regex pattern "search_pattern"\n with "replace_pattern" in sequence ID and description')
id_mods.add_argument('--strip-range', dest='transforms', action=partial_action(transform.strip_range), help='Strip ranges\n from sequences IDs, matching </x-y>')
format_group = parser.add_argument_group('Format Options')
format_group.add_argument('--input-format', metavar='FORMAT', help='Input file format (default: determine from extension)')
format_group.add_argument('--output-format', metavar='FORMAT', help='Output file format (default: determine from extension)')
parser.add_argument('--alphabet', choices=ALPHABETS, help='Input alphabet. Required for writing NEXUS.')
return parser |
def downgrade():
op.drop_index(op.f('ix_servednoticehistory_vendor'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_system'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_special_purpose'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_special_feature'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_purpose'), table_name='servednoticehistory')
op.drop_index(op.f('ix_servednoticehistory_feature'), table_name='servednoticehistory')
op.drop_column('servednoticehistory', 'tcf_version')
op.drop_column('servednoticehistory', 'system')
op.drop_column('servednoticehistory', 'vendor')
op.drop_column('servednoticehistory', 'special_purpose')
op.drop_column('servednoticehistory', 'special_feature')
op.drop_column('servednoticehistory', 'purpose')
op.drop_column('servednoticehistory', 'feature')
op.drop_index(op.f('ix_privacypreferencehistory_vendor'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_system'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_special_purpose'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_special_feature'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_purpose'), table_name='privacypreferencehistory')
op.drop_index(op.f('ix_privacypreferencehistory_feature'), table_name='privacypreferencehistory')
op.drop_column('privacypreferencehistory', 'tcf_version')
op.drop_column('privacypreferencehistory', 'system')
op.drop_column('privacypreferencehistory', 'vendor')
op.drop_column('privacypreferencehistory', 'special_purpose')
op.drop_column('privacypreferencehistory', 'special_feature')
op.drop_column('privacypreferencehistory', 'purpose')
op.drop_column('privacypreferencehistory', 'feature')
op.drop_constraint('last_served_identity_vendor', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_system', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_special_purpose', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_special_feature', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_purpose', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_identity_feature', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_vendor', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_system', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_special_purpose', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_special_feature', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_purpose', 'lastservednotice', type_='unique')
op.drop_constraint('last_served_fides_user_device_identity_feature', 'lastservednotice', type_='unique')
op.drop_index(op.f('ix_lastservednotice_vendor'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_system'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_special_purpose'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_special_feature'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_purpose'), table_name='lastservednotice')
op.drop_index(op.f('ix_lastservednotice_feature'), table_name='lastservednotice')
op.drop_column('lastservednotice', 'system')
op.drop_column('lastservednotice', 'vendor')
op.drop_column('lastservednotice', 'special_purpose')
op.drop_column('lastservednotice', 'special_feature')
op.drop_column('lastservednotice', 'purpose')
op.drop_column('lastservednotice', 'feature')
op.drop_column('lastservednotice', 'tcf_version')
op.drop_index(op.f('ix_currentprivacypreference_vendor'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_system'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_special_purpose'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_special_feature'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_purpose'), table_name='currentprivacypreference')
op.drop_index(op.f('ix_currentprivacypreference_feature'), table_name='currentprivacypreference')
op.drop_constraint('identity_vendor', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_system', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_special_purpose', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_special_feature', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_purpose', 'currentprivacypreference', type_='unique')
op.drop_constraint('identity_feature', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_vendor', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_system', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_special_purpose', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_special_feature', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_purpose', 'currentprivacypreference', type_='unique')
op.drop_constraint('fides_user_device_identity_feature', 'currentprivacypreference', type_='unique')
op.drop_column('currentprivacypreference', 'system')
op.drop_column('currentprivacypreference', 'vendor')
op.drop_column('currentprivacypreference', 'special_purpose')
op.drop_column('currentprivacypreference', 'special_feature')
op.drop_column('currentprivacypreference', 'purpose')
op.drop_column('currentprivacypreference', 'feature')
op.drop_column('currentprivacypreference', 'tcf_version')
op.drop_index(op.f('ix_consentsettings_id'), table_name='consentsettings')
op.drop_table('consentsettings') |
def test_ball_history_vectorize():
history = BallHistory()
assert (history.vectorize() is None)
state = BallState.default()
state.rvw[0] = [1, 1, 1]
for _ in range(10):
history.add(state)
assert ((vectorize := history.vectorize()) is not None)
(rvws, motion_states, t) = vectorize
assert np.array_equal(rvws, np.array(([state.rvw] * 10)))
assert np.array_equal(motion_states, np.array(([0] * 10)))
assert np.array_equal(t, np.array(([0] * 10)))
assert (BallHistory.from_vectorization(history.vectorize()) == history) |
class GetBookmarksTestCase(TestCase):
fixtures = ['bookmark_alerts']
def setUp(self):
self.now_month = '2014-11-01'
def test_get_org_bookmarks_without_options(self):
bookmarks = Command().get_org_bookmarks(self.now_month, recipient_email=None, recipient_email_file=None, skip_email_file=None)
active = all([x.user.is_active for x in bookmarks])
self.assertEqual(len(bookmarks), 2)
self.assertTrue(active)
def test_get_org_bookmarks_with_test_options(self):
bookmarks = Command().get_org_bookmarks(self.now_month, recipient_email='', ccg='03V', practice='P87629', pcn=None, stp=None, recipient_email_file=None, skip_email_file=None)
self.assertEqual(len(bookmarks), 1)
self.assertEqual(bookmarks[0].user.email, '')
self.assertTrue(bookmarks[0].user.profile.key)
self.assertTrue(bookmarks[0].user.id)
self.assertEqual(bookmarks[0].pct.code, '03V')
self.assertEqual(bookmarks[0].practice.code, 'P87629')
def test_get_org_bookmarks_with_skip_file(self):
skip_file = 'frontend/tests/fixtures/commands/skip_alerts_recipients.txt'
bookmarks = Command().get_org_bookmarks(self.now_month, skip_email_file=skip_file, recipient_email=None, recipient_email_file=None)
self.assertEqual(len(bookmarks), 0)
def test_get_search_bookmarks_without_options(self):
bookmarks = Command().get_search_bookmarks(self.now_month, recipient_email=None)
active = all([x.user.is_active for x in bookmarks])
self.assertEqual(len(bookmarks), 1)
self.assertEqual(bookmarks[0].url, 'foo')
self.assertTrue(active)
def test_get_search_bookmarks_with_options(self):
bookmarks = Command().get_search_bookmarks(self.now_month, recipient_email='', url='frob', search_name='baz')
self.assertEqual(len(bookmarks), 1)
self.assertEqual(bookmarks[0].user.email, '')
self.assertTrue(bookmarks[0].user.profile.key)
self.assertTrue(bookmarks[0].user.id)
self.assertEqual(bookmarks[0].url, 'frob') |
class Pipe(metaclass=MetaPipe):
output: Optional[str] = None
async def open(self):
pass
async def open_request(self):
pass
async def open_ws(self):
pass
async def close(self):
pass
async def close_request(self):
pass
async def close_ws(self):
pass
async def pipe(self, next_pipe, **kwargs):
return (await next_pipe(**kwargs))
async def pipe_request(self, next_pipe, **kwargs):
return (await next_pipe(**kwargs))
async def pipe_ws(self, next_pipe, **kwargs):
return (await next_pipe(**kwargs))
async def on_pipe_success(self):
pass
async def on_pipe_failure(self):
pass
def on_receive(self, data):
return data
def on_send(self, data):
return data |
def test_get_other_versions(frontend_db, backend_db):
insert_test_fw(backend_db, 'uid_1', version='1.0')
insert_test_fw(backend_db, 'uid_2', version='2.0')
insert_test_fw(backend_db, 'uid_3', version='3.0')
fw1 = frontend_db.get_object('uid_1')
result = frontend_db.get_other_versions_of_firmware(fw1)
assert (result == [('uid_2', '2.0'), ('uid_3', '3.0')])
assert (frontend_db.get_other_versions_of_firmware(TEST_FO) == []) |
def test_validate():
write_and_validate(BASELINE, "specify either 'elements_in' or 'select'")
write_and_validate(WITH_ELS, "at least one 'analyse' module must be specified")
bad_analyse = {**WITH_ELS, 'analyse': None}
write_and_validate(bad_analyse, 'must be a dict or list')
bad_analyse_dict = {**WITH_ELS, 'analyse': {}}
write_and_validate(bad_analyse_dict, "containing at least a 'name' attribute")
write(GOOD_ANALYSE_DICT)
validate()
write(GOOD_SELECT_ANALYSE)
validate() |
.parametrize('elasticapm_client, custom_header', [({'sanitize_field_names': (BASE_SANITIZE_FIELD_NAMES_UNPROCESSED + ['some-header'])}, {'some-header': processors.MASK}), ({'sanitize_field_names': (BASE_SANITIZE_FIELD_NAMES_UNPROCESSED + ['some-*'])}, {'some-header': processors.MASK}), ({'sanitize_field_names': (BASE_SANITIZE_FIELD_NAMES_UNPROCESSED + ['other-val'])}, {'some-header': 'some-secret-value'}), ({'sanitize_field_names': (BASE_SANITIZE_FIELD_NAMES_UNPROCESSED + ['*-*'])}, {'some-header': processors.MASK})], indirect=['elasticapm_client'])
def test_sanitize_ custom_header,
result = processors.sanitize_
expected = {'cookie': 'foo=bar; baz=foo', 'foo': 'bar', 'password': processors.MASK, 'secret': processors.MASK, 'authorization': processors.MASK, 'Ms-Client-Principal-Id': processors.MASK}
expected.update(custom_header)
assert (result['context']['request']['headers'] == expected)
assert (result['context']['response']['headers'] == expected) |
def test_call_with_context_args_and_kwargs(singleton_cls):
provider = singleton_cls(Example, 11)
instance = provider(22, init_arg3=33, init_arg4=44)
assert (instance.init_arg1 == 11)
assert (instance.init_arg2 == 22)
assert (instance.init_arg3 == 33)
assert (instance.init_arg4 == 44) |
def extract_return_annotation(return_annotation: Union[(Type, Tuple, None)]) -> Dict[(str, Type)]:
if (return_annotation in (None, type(None), inspect.Signature.empty)):
return {}
if (isinstance(return_annotation, Type) or isinstance(return_annotation, TypeVar)):
bases = return_annotation.__bases__
if ((len(bases) == 1) and (bases[0] == tuple) and hasattr(return_annotation, '_fields')):
logger.debug(f'Task returns named tuple {return_annotation}')
return dict(get_type_hints(cast(Type, return_annotation), include_extras=True))
if (hasattr(return_annotation, '__origin__') and (return_annotation.__origin__ is tuple)):
logger.debug(f'Task returns unnamed typing.Tuple {return_annotation}')
if (len(return_annotation.__args__) == 1):
raise FlyteValidationException('Tuples should be used to indicate multiple return values, found only one return variable.')
return OrderedDict(zip(list(output_name_generator(len(return_annotation.__args__))), return_annotation.__args__))
elif isinstance(return_annotation, tuple):
if (len(return_annotation) == 1):
raise FlyteValidationException("Please don't use a tuple if you're just returning one thing.")
return OrderedDict(zip(list(output_name_generator(len(return_annotation))), return_annotation))
else:
logger.debug(f'Task returns unnamed native tuple {return_annotation}')
return {default_output_name(): cast(Type, return_annotation)} |
class LienTest(ForsetiTestCase):
def setUp(self):
self.org_234 = organization.Organization('234', display_name='Organization 234', full_name='organization/234/', data='fake_org_data_234')
self.proj_1 = project.Project('proj-1', project_number=, display_name='My project 1', parent=self.org_234, full_name='organization/234/project/proj-1/', data='fake_project_data_2341')
def test_create_from_json(self):
json_string = '\n{\n "name": "liens/lien-1",\n "parent": "projects/proj-1",\n "restrictions": ["resourcemanager.projects.delete"]\n}\n'
lien_resource = lien.Lien.from_json(self.proj_1, json_string)
self.assertEqual('lien-1', lien_resource.id)
self.assertEqual('lien', lien_resource.type)
self.assertEqual('projects/proj-1/liens/lien-1', lien_resource.name)
self.assertEqual(['resourcemanager.projects.delete'], lien_resource.restrictions)
self.assertEqual(json.loads(json_string), json.loads(lien_resource.raw_json)) |
def test_object_same_signature_diff_content(tmpdir_factory, merge_files_manyLR):
fpath = str(tmpdir_factory.mktemp('lf').join('same-id.dlis'))
content = ['data/chap4-7/eflr/envelope.dlis.part', 'data/chap4-7/eflr/file-header.dlis.part', 'data/chap4-7/eflr/channel.dlis.part', 'data/chap4-7/eflr/channel-same-objects.dlis.part']
merge_files_manyLR(fpath, content)
with dlis.load(fpath) as (f, *_):
with pytest.raises(ValueError) as exc:
_ = f.object('CHANNEL', 'CHANN1', 10, 0)
assert ('Candidates are' in str(exc.value))
chs = f.find('CHANNEL', 'CHANN1')
assert (len(chs) == 2) |
def extractTryTranslationsTheBusyOne(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Strategist Knows Everything', 'Gunshi wa Nandemo Shitteiru', 'translated'), ('Jobless', 'I, without possessing a job, aim to become an adventurer!', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
def wipe_transforms(client: Elasticsearch, timeout=30):
end_time = (time.time() + timeout)
while (time.time() < end_time):
resp = client.transform.get_transform(transform_id='*')
if (resp['count'] == 0):
break
for trasnform in resp['transforms']:
client.options(ignore_status=404).transform.stop_transform(transform_id=trasnform['id'])
client.options(ignore_status=404).transform.delete_transform(transform_id=trasnform['id']) |
class Modal(Options):
def fade(self):
return self._config_get(False)
def fade(self, flag):
if flag:
self.component.attr['class'].add('fade')
self.component.attr['aria-hidden'] = True
self._config(flag)
def focus(self):
return self.component.attr.get('data-bs-focus')
def focus(self, flag):
self.component.attr['data-bs-focus'] = json.dumps(flag)
def keyboard(self):
return self.component.attr.get('data-bs-keyboard')
def keyboard(self, flag):
self.component.attr['data-bs-keyboard'] = json.dumps(flag)
def backdrop(self):
return self.component.attr.get('data-bs-backdrop')
def backdrop(self, flag):
self.component.attr['data-bs-backdrop'] = json.dumps(flag)
def scroll(self):
return self._config_get(False)
def scroll(self, flag):
if flag:
self.component.attr['class'].add('modal-dialog-scrollable')
self._config(flag)
def centered(self):
return self._config_get(False)
def centered(self, flag):
if flag:
self.component.attr['class'].add('modal-dialog-centered')
self._config(flag)
def size(self, breakpoint):
self.component.attr['class'].add(('modal-%s' % breakpoint))
def full_screen(self, breakpoint):
self.component.attr['class'].add(('modal-fullscreen-%s-down' % breakpoint)) |
def main():
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-r', '--rx_port', default=5556, help='Port number to receive zmq messages for IO on')
p.add_argument('-t', '--tx_port', default=5555, help='Port number to send IO messages via zmq')
args = p.parse_args()
io_server = IOServer(args.rx_port, args.tx_port)
io_server.start()
try:
while 1:
topic = input('Topic:')
msg_id = input('ID:')
data = input('Data:')
d = {'id': msg_id, 'data': data}
io_server.send_msg(topic, d)
except KeyboardInterrupt:
io_server.shutdown() |
class TestPIDPrepareStageService(IsolatedAsyncioTestCase):
('fbpcp.service.storage.StorageService')
('fbpcp.service.onedocker.OneDockerService')
def setUp(self, mock_onedocker_svc, mock_storage_svc) -> None:
self.mock_onedocker_svc = mock_onedocker_svc
self.mock_storage_svc = mock_storage_svc
self.onedocker_binary_config = OneDockerBinaryConfig(tmp_directory='/tmp', binary_version='latest', repository_path='test_path/')
self.binary_name = 'data_processing/pid_preparer'
self.onedocker_binary_config_map = {self.binary_name: self.onedocker_binary_config}
self.input_path = 'in'
self.output_path = 'out'
self.pc_instance_id = 'test_instance_123'
self.container_timeout = 86400
self.container_permission_id = 'test-container-permission'
async def test_pid_prepare_stage_service(self) -> None:
async def _run_sub_test(pc_role: PrivateComputationRole, multikey_enabled: bool, test_num_containers: int, test_run_id: Optional[str]=None) -> None:
pid_protocol = (PIDProtocol.UNION_PID_MULTIKEY if ((test_num_containers == 1) and multikey_enabled) else PIDProtocol.UNION_PID)
max_col_cnt_expect = (DEFAULT_MULTIKEY_PROTOCOL_MAX_COLUMN_COUNT if (pid_protocol is PIDProtocol.UNION_PID_MULTIKEY) else 1)
id_filter_thresh_expect = (- 1)
pc_instance = self.create_sample_pc_instance(pc_role=pc_role, test_num_containers=test_num_containers, multikey_enabled=multikey_enabled, pid_max_column_count=max_col_cnt_expect, run_id=test_run_id)
stage_svc = PIDPrepareStageService(storage_svc=self.mock_storage_svc, onedocker_svc=self.mock_onedocker_svc, onedocker_binary_config_map=self.onedocker_binary_config_map)
containers = [self.create_container_instance(i) for i in range(test_num_containers)]
self.mock_onedocker_svc.start_containers = MagicMock(return_value=containers)
self.mock_onedocker_svc.wait_for_pending_containers = AsyncMock(return_value=containers)
updated_pc_instance = (await stage_svc.run_async(pc_instance=pc_instance, server_certificate_provider=NullCertificateProvider(), ca_certificate_provider=NullCertificateProvider(), server_certificate_path='', ca_certificate_path=''))
env_vars = generate_env_vars_dict(repository_path=self.onedocker_binary_config.repository_path)
args_ls_expect = self.get_args_expected(pc_role, test_num_containers, max_col_cnt_expect, id_filter_thresh_expect, test_run_id)
self.mock_onedocker_svc.start_containers.assert_called_with(package_name=self.binary_name, version=self.onedocker_binary_config.binary_version, cmd_args_list=args_ls_expect, timeout=self.container_timeout, env_vars=env_vars, container_type=None, certificate_request=None, opa_workflow_path=None, permission=ContainerPermissionConfig(self.container_permission_id))
self.assertEqual(len(updated_pc_instance.infra_config.instances), 1, 'Failed to add the StageStateInstance into pc_instance')
stage_state_expect = StageStateInstance(pc_instance.infra_config.instance_id, pc_instance.current_stage.name, containers=containers)
stage_state_actual = updated_pc_instance.infra_config.instances[0]
self.assertEqual(stage_state_actual, stage_state_expect, 'Appended StageStateInstance is not as expected')
data_tests = itertools.product([PrivateComputationRole.PUBLISHER, PrivateComputationRole.PARTNER], [True, False], [1, 2], [None, '2621fda2-0eca-11ed-861d-0242ac120002'])
for (pc_role, multikey_enabled, test_num_containers, test_run_id) in data_tests:
with self.subTest(pc_role=pc_role, multikey_enabled=multikey_enabled, test_num_containers=test_num_containers, test_run_id=test_run_id):
(await _run_sub_test(pc_role=pc_role, multikey_enabled=multikey_enabled, test_num_containers=test_num_containers, test_run_id=test_run_id))
def create_sample_pc_instance(self, pc_role: PrivateComputationRole=PrivateComputationRole.PUBLISHER, test_num_containers: int=1, pid_max_column_count: int=1, multikey_enabled: bool=False, status: PrivateComputationInstanceStatus=PrivateComputationInstanceStatus.PID_SHARD_COMPLETED, run_id: Optional[str]=None) -> PrivateComputationInstance:
infra_config: InfraConfig = InfraConfig(instance_id=self.pc_instance_id, role=pc_role, instances=[], status=status, status_update_ts=, game_type=PrivateComputationGameType.LIFT, num_pid_containers=test_num_containers, num_mpc_containers=test_num_containers, num_files_per_mpc_container=test_num_containers, status_updates=[], run_id=run_id, container_permission_id=self.container_permission_id)
common: CommonProductConfig = CommonProductConfig(input_path=self.input_path, output_dir=self.output_path, pid_use_row_numbers=True, pid_max_column_count=pid_max_column_count, multikey_enabled=multikey_enabled)
product_config: ProductConfig = LiftConfig(common=common)
return PrivateComputationInstance(infra_config=infra_config, product_config=product_config)
def create_container_instance(self, id: int, container_status: ContainerInstanceStatus=ContainerInstanceStatus.COMPLETED) -> ContainerInstance:
return ContainerInstance(instance_id=f'test_container_instance_{id}', ip_address=f'127.0.0.{id}', status=container_status)
def get_args_expected(self, pc_role: PrivateComputationRole, test_num_containers: int, max_col_cnt_expected: int, id_filter_thresh_expect: int, test_run_id: Optional[str]=None) -> List[str]:
arg_ls = []
if (pc_role is PrivateComputationRole.PUBLISHER):
arg_ls = [f'--input_path=out/test_instance_123_out_dir/pid_stage/out.csv_publisher_sharded_{i} --output_path=out/test_instance_123_out_dir/pid_stage/out.csv_publisher_prepared_{i} --tmp_directory=/tmp --max_column_cnt={max_col_cnt_expected} --id_filter_thresh={id_filter_thresh_expect}' for i in range(test_num_containers)]
elif (pc_role is PrivateComputationRole.PARTNER):
arg_ls = [f'--input_path=out/test_instance_123_out_dir/pid_stage/out.csv_advertiser_sharded_{i} --output_path=out/test_instance_123_out_dir/pid_stage/out.csv_advertiser_prepared_{i} --tmp_directory=/tmp --max_column_cnt={max_col_cnt_expected} --id_filter_thresh={id_filter_thresh_expect}' for i in range(test_num_containers)]
modified_arg_ls = []
for arg in arg_ls:
modified_arg = arg
if (test_run_id is not None):
modified_arg = ' '.join([arg, f'--run_id={test_run_id}'])
else:
modified_arg = arg
modified_arg_ls.append(modified_arg)
return modified_arg_ls |
def test_report(topic, user):
report = Report(reason='Test Report')
report.save(user=user, post=topic.first_post)
assert (report.reason == 'Test Report')
report.reason = 'Test Report Edited'
report.save()
assert (report.reason == 'Test Report Edited')
report.delete()
report = Report.query.filter_by(id=report.id).first()
assert (report is None) |
_converter(acc_ops.gelu)
def acc_ops_gelu(target: Target, args: Tuple[(Argument, ...)], kwargs: Dict[(str, Argument)], name: str) -> ConverterOutput:
input_val = kwargs['input']
if (not isinstance(input_val, AITTensor)):
raise RuntimeError(f'Non-tensor inputs for {name}: {input_val}')
if (kwargs.get('approximate', None) == 'tanh'):
result = elementwise(FuncEnum.FASTGELU)(input_val)
else:
result = elementwise(FuncEnum.GELU)(input_val)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.