code stringlengths 281 23.7M |
|---|
class ForwardingRule(object):
def __init__(self, project_id, resource_id, full_name, creation_timestamp, name, description, region, ip_address, ip_protocol, port_range, ports, target, self_link, load_balancing_scheme, subnetwork, network, backend_service, raw_json):
self.project_id = project_id
self.resource_id = resource_id
self.full_name = full_name
self.creation_timestamp = creation_timestamp
self.name = name
self.description = description
self.region = region
self.ip_address = ip_address
self.ip_protocol = ip_protocol
self.port_range = port_range
self.ports = ports
self.target = target
self.self_link = self_link
self.load_balancing_scheme = load_balancing_scheme
self.subnetwork = subnetwork
self.network = network
self.backend_service = backend_service
self._json = raw_json
def from_dict(cls, project_id, full_name, forwarding_rule):
return cls(project_id=project_id, resource_id=forwarding_rule.get('id'), full_name=full_name, creation_timestamp=forwarding_rule.get('creationTimestamp', ''), name=forwarding_rule.get('name', ''), description=forwarding_rule.get('description', ''), region=forwarding_rule.get('region', ''), ip_address=forwarding_rule.get('IPAddress', ''), ip_protocol=forwarding_rule.get('IPProtocol', ''), port_range=forwarding_rule.get('portRange', ''), ports=forwarding_rule.get('ports', []), target=forwarding_rule.get('target', ''), self_link=forwarding_rule.get('selfLink', ''), load_balancing_scheme=forwarding_rule.get('loadBalancingScheme', ''), subnetwork=forwarding_rule.get('subnetwork', ''), network=forwarding_rule.get('network', ''), backend_service=forwarding_rule.get('backend_service', ''), raw_json=json.dumps(forwarding_rule, sort_keys=True))
def from_json(project_id, full_name, forwarding_rule_data):
forwarding_rule = json.loads(forwarding_rule_data)
return ForwardingRule.from_dict(project_id, full_name, forwarding_rule)
def __repr__(self):
return self._json
def __hash__(self):
return hash(self._json) |
class TestLCh(util.ColorAssertsPyTest):
COLORS = [('red', 'color(--lch 54.291 106.84 40.858)'), ('orange', 'color(--lch 75.59 83.769 70.824)'), ('yellow', 'color(--lch 97.607 94.712 99.572)'), ('green', 'color(--lch 46.278 67.984 134.38)'), ('blue', 'color(--lch 29.568 131.2 301.36)'), ('indigo', 'color(--lch 19.715 71.818 310.91)'), ('violet', 'color(--lch 69.618 64.617 325.57)'), ('white', 'color(--lch 100 0 none)'), ('gray', 'color(--lch 53.585 0 none)'), ('black', 'color(--lch 0 0 none)'), ('lch(100 30 270)', 'color(--lch 100 30 270 / 1)'), ('lch(100 30 270 / 0.5)', 'color(--lch 100 30 270 / 0.5)'), ('lch(50% 30 270)', 'color(--lch 50 30 270 / 1)'), ('lch(50% 50% 270 / 50%)', 'color(--lch 50 75 270 / 0.5)'), ('lch(none none none / none)', 'color(--lch none none none / none)'), ('lch(1 30 50%)', None), ('lch(1, 30, 50)', None), ('lch(1 30)', None), ('lch(1deg 30 270)', None), ('lch(1 30 270 50%)', None), ('lch(75 20 180deg)', 'color(--lch 75 20 180 / 1)'), ('lch(75 20 0.5turn)', 'color(--lch 75 20 180 / 1)'), ('lch(75 20 3.14159rad)', 'color(--lch 75 20 180 / 1)'), ('lch(75 20 200grad)', 'color(--lch 75 20 180 / 1)'), ('color(--lch 100 30 270)', 'color(--lch 100 30 270)'), ('color(--lch 100 30 270 / 0.5)', 'color(--lch 100 30 270 / 0.5)'), ('color(--lch 50% 50% 50% / 50%)', 'color(--lch 50 75 180 / 0.5)'), ('color(--lch none none none / none)', 'color(--lch none none none / none)'), ('color(--lch 0% 0% 0%)', 'color(--lch 0 0 none)'), ('color(--lch 100% 100% 100%)', 'color(--lch 100 150 360 / 1)'), ('color(--lch -100% -100% -100%)', 'color(--lch -100 -150 -360 / 1)')]
.parametrize('color1,color2', COLORS)
def test_colors(self, color1, color2):
if (color2 is None):
with pytest.raises(ValueError):
Color(color1)
else:
self.assertColorEqual(Color(color1).convert('lch'), Color(color2), color=True) |
class Neighbor(Command):
help_msg = 'show neighbor information'
command = 'neighbor'
subcommands = {'summary': NeighborSummary, 'sent-routes': SentRoutes, 'received-routes': ReceivedRoutes}
fmtstr = ' {0:<12s} {1:<12s} {2:<}\n'
def action(self, params):
core_service = self.api.get_core_service()
core_service_view = CoreServiceDetailView(core_service)
peers_view = core_service_view.rel('peer_manager').rel('peers')
ret = peers_view.encode()
return CommandsResponse(STATUS_OK, [{'ip_addr': k, 'as_num': str(v['remote_as']), 'bgp_state': v['stats']['bgp_state']} for (k, v) in ret.items()])
def cli_resp_formatter(cls, resp):
if (resp.status == STATUS_ERROR):
return Command.cli_resp_formatter(resp)
return (cls._format_header() + cls._format_value(resp.value))
def _format_header(cls):
return cls.fmtstr.format('IP Address', 'AS Number', 'BGP State')
def _format_value(cls, value):
ret = ''
for v in value:
ret += cls.fmtstr.format(v['ip_addr'], v['as_num'], v['bgp_state'])
return ret |
class TaxService(QuickbooksBaseObject, UpdateMixin):
list_dict = {'TaxRateDetails': TaxRateDetails}
qbo_object_name = 'TaxService/Taxcode'
def __init__(self):
super(TaxService, self).__init__()
self.TaxCode = None
self.TaxCodeId = None
self.Id = 0
self.TaxRateDetails = []
def __str__(self):
return self.TaxCode
def save(self, qb=None):
if (not qb):
qb = QuickBooks()
if (self.TaxCodeId and (self.TaxCodeId > 0)):
json_data = qb.update_object(self.qbo_object_name, self.to_json())
else:
json_data = qb.create_object(self.qbo_object_name, self.to_json())
obj = type(self).from_json(json_data)
self.TaxCodeId = obj.Id
self.Id = self.TaxCodeId
return obj |
class OptionPlotoptionsErrorbarSonificationContexttracksMappingHighpassResonance(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class WafTagsResponseAllOf(ModelNormal):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'data': ([WafTagsResponseDataItem],), 'included': ([WafRule],)}
_property
def discriminator():
return None
attribute_map = {'data': 'data', 'included': 'included'}
read_only_vars = {}
_composed_schemas = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
for (var_name, var_value) in kwargs.items():
if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.') |
class Solution():
def rotate(self, matrix: List[List[int]]) -> None:
if (not matrix):
return
(start, end) = (0, (len(matrix[0]) - 1))
while (start <= end):
for index in range(start, end):
index2 = ((end - index) + start)
tmp = matrix[start][index]
matrix[start][index] = matrix[index2][start]
matrix[index2][start] = matrix[end][index2]
matrix[end][index2] = matrix[index][end]
matrix[index][end] = tmp
start += 1
end -= 1 |
def load_sonar_dataset(data_dir='data/'):
url = '
data_path = os.path.join(data_dir, 'sonar.csv')
if (not os.path.exists(data_dir)):
os.makedirs(data_dir)
if (not os.path.exists(data_path)):
urllib.request.urlretrieve(url, data_path)
df = pandas.read_csv(data_path, header=None)
assert (len(df.columns) == 61)
df.columns = ([f'b.{i}' for i in range(0, 60)] + ['label'])
df['label'] = df['label'].replace({'M': 'metal', 'R': 'rock'}).astype('category')
return df |
(cls=ClickAliasedCommand, help=cmd_help, short_help=cmd_short_help, alias='u')
('-n', '--no-interaction', help='Do not ask any interactive question.', is_flag=True)
('-p', '--pretend', help='Run but do not make any changes.', is_flag=True)
('--use-version', help='The branch, tag or commit ID to checkout after clone.', type=click.STRING)
_context
def update(ctx: click.Context, **options: Dict[(str, Any)]) -> None:
project_data = require_fastapi_mvc_project()
ensure_permissions(os.getcwd(), w=True)
if options['no_interaction']:
update_kwargs = {'data': project_data, 'overwrite': True}
else:
update_kwargs = {'user_defaults': project_data}
try:
copier.run_update(vcs_ref=(options['use_version'] or COPIER_PROJECT.vcs_ref), answers_file=ANSWERS_FILE, pretend=options['pretend'], **update_kwargs)
except UserMessageError as ex:
click.secho(ex, fg='yellow')
ctx.exit(2) |
(malformed_type_strs)
('address20')
def test_get_abi_strategy_returns_no_strategy_for_invalid_type_strs(malformed_type_str):
try:
get_abi_strategy(malformed_type_str)
except (ParseError, NoEntriesFound, ValueError):
pass
else:
raise AssertionError('Expected ParseError or NoEntriesFound') |
def init(saved_lang):
locale = QtCore.QLocale.system()
lang = locale.name()
if saved_lang:
lang = saved_lang
i18n_path = __get_i18n_path()
print('Loading translations:', i18n_path, 'locale:', lang)
translator = QtCore.QTranslator()
translator.load((((((i18n_path + '/') + lang) + '/opensnitch-') + lang) + '.qm'))
return translator |
class TestStringEval(unittest.TestCase):
def test_eval_strings(self):
input_data = ['this string is useful', 'garbage n$%Schtz', '/an/interesting/directory']
result = eval_strings(input_data)
assert (result == ['/an/interesting/directory', 'this string is useful', 'garbage n$%Schtz'])
def test_add_mail_adress_score(self):
input_data_true = 'here is '
input_data_false = 'here is nothing'
result_true = _add_mail_adress_score(input_data_true, 0)
result_false = _add_mail_adress_score(input_data_false, 0)
assert (result_true > 0), 'Mail false-negative'
assert (result_false == 0), 'Mail false-positive'
def test_add_parameter_score(self):
input_true = '-p p does something'
input_true2 = '--help this also does something'
input_false = 'this is not a startup option'
result_true = _add_parameter_score(input_true, 0)
result_true2 = _add_parameter_score(input_true2, 0)
result_false = _add_parameter_score(input_false, 0)
assert (result_true > 0), 'Parameter not detected'
assert (result_true2 > 0), 'Parameter not detected'
assert (result_false == 0), 'Parameter wrongly detected'
def test_add_html_score(self):
input_true = '<body>'
input_true2 = '</body>'
input_false = '<head'
result_true = _add_html_score(input_true, 0)
result_true2 = _add_html_score(input_true2, 0)
result_false = _add_html_score(input_false, 0)
assert (result_true > 0), 'html not detected'
assert (result_true2 > 0), 'html not detected'
assert (result_false == 0), 'html wrongly detected'
def test_add_case_ratio_score(self):
input_true = 'This is normal text'
input_false = 'THIS iS WeiRD'
result_true = _add_case_ratio_score(input_true, 0)
result_false = _add_case_ratio_score(input_false, 0)
assert (result_true > 0), 'case ratio wrongly detected'
assert (result_false < 0), 'case ratio wrongly detected'
def test_add_dictionary_score(self):
input_true = 'version'
input_true2 = '
input_false = 'wheelchair'
result_true = _add_dictionary_score(input_true, 0)
result_true2 = _add_dictionary_score(input_true2, 0)
result_false = _add_dictionary_score(input_false, 0)
assert (result_true > 0), 'dict word not detected'
assert (result_true2 > 0), 'dict word not detected'
assert (result_false == 0), 'dict word wrongly detected'
def test_add_format_string_score(self):
input_true = 'contains %s'
input_true2 = '%lf'
input_false = 'nothing here'
result_true = _add_format_string_score(input_true, 0)
result_true2 = _add_format_string_score(input_true2, 0)
result_false = _add_format_string_score(input_false, 0)
assert (result_true < 0), 'Parameter not detected'
assert (result_true2 < 0), 'Parameter not detected'
assert (result_false == 0), 'Parameter wrongly detected'
def test_add_length_score(self):
input_data = 'four'
result = _add_length_score(input_data, 0)
assert (result == 2), 'Parameter not detected'
def test_add_path_score(self):
input_true = 'path: /home/user'
input_false = 'no path'
result_true = _add_path_score(input_true, 0)
result_false = _add_path_score(input_false, 0)
assert (result_true > 0), 'Path not detected'
assert (result_false == 0), 'Path wrongly detected'
def test_add_possible_version_number_score(self):
input_true = '1.4.4'
input_false = 'blabla5'
result_true = _add_possible_version_number_score(input_true, 0)
result_false = _add_possible_version_number_score(input_false, 0)
assert (result_true > 0), 'version not detected'
assert (result_false == 0), 'version wrongly detected'
def test_add_possible_year_score(self):
input_true = 'this year is 2017'
input_false = '1089 is to early to be of any use'
result_true = _add_possible_year_score(input_true, 0)
result_false = _add_possible_year_score(input_false, 0)
assert (result_true > 0), 'year not detected'
assert (result_false == 0), 'year wrongly detected'
def test_add_quad_characters_score(self):
input_true = 'qqqq'
input_false = 'www'
result_true = _add_quad_characters_score(input_true, 0)
result_false = _add_quad_characters_score(input_false, 0)
assert (result_true < 0), 'qcharacter not detected'
assert (result_false == 0), 'qcharacter wrongly detected'
def test_add_rare_special_character_score(self):
input_true = '^ is rare'
input_false = '. is not rare'
result_true = _add_rare_special_character_score(input_true, 0)
result_false = _add_rare_special_character_score(input_false, 0)
assert (result_true < 0), 'rare character not detected'
assert (result_false == 0), 'rare character wrongly detected'
def test_add_special_character_ratio_score(self):
input_true = '$$$$ab&%!'
input_false = 'normal text!'
result_true = _add_special_character_ratio_score(input_true, 0)
result_false = _add_special_character_ratio_score(input_false, 0)
assert (result_true < 0), 'special normal ratio fail'
assert (result_false > 0), 'special to normal ratio fail2'
def test_add_underscore_or_period_at_beginning_score(self):
input_true = '_hello'
input_true2 = '__magic'
input_true3 = '.gitgud'
result_true = _add_underscore_or_period_at_beginning_score(input_true, 0)
result_true2 = _add_underscore_or_period_at_beginning_score(input_true2, 0)
result_true3 = _add_underscore_or_period_at_beginning_score(input_true3, 0)
assert (result_true < 0), 'underscore or period not detected'
assert (result_true2 < 0), 'underscore or period not detected'
assert (result_true3 < 0), 'underscore or period not detected'
def test_score(self):
input_data = 'score me pls'
result = _score(input_data)
assert (result[1] > 0), 'score should be above 0!' |
def test_check_or_select_variables():
input_df = pd.DataFrame({'words': ['dog', 'dig', 'cat'], 'animals': [1, 2, np.nan]})
enc = MockClassFit(ignore_format=False)
assert (enc._check_or_select_variables(input_df) == ['words'])
enc = MockClassFit(ignore_format=True)
assert (enc._check_or_select_variables(input_df) == ['words', 'animals']) |
class ToMini():
_node_to_mini_node: Dict[(bn.BMGNode, MiniNode)]
_observed_constants: int
_queries: int
_mini_nodes: List[MiniNode]
def __init__(self) -> None:
self._node_to_mini_node = {}
self._observed_constants = 0
self._queries = 0
self._mini_nodes = []
def to_json(self, bmg: BMGraphBuilder, indent=None) -> str:
self._observed_constants = 0
self._queries = 0
self._mini_nodes = []
self._node_to_mini_node = {}
for node in bmg.all_ancestor_nodes():
self._add_node_to_mini_nodes(node)
mini = {'comment': 'Mini BMG', 'nodes': self._mini_nodes}
return json.dumps(mini, indent=indent)
def _add_mini_node(self, mini: MiniNode) -> None:
mini['sequence'] = len(self._mini_nodes)
self._mini_nodes.append(mini)
def _node_to_mini_seq(self, node: bn.BMGNode) -> int:
return self._node_to_mini_node[node]['sequence']
def _add_inputs(self, mini: MiniNode, node: bn.BMGNode) -> None:
in_nodes = [self._node_to_mini_seq(i) for i in node.inputs]
if (len(in_nodes) > 0):
mini['in_nodes'] = in_nodes
def _make_query(self, node: bn.Query) -> MiniNode:
mini: MiniNode = {'operator': 'QUERY', 'type': 'NONE', 'query_index': self._queries}
self._queries += 1
self._add_inputs(mini, node)
return mini
def _make_constant(self, value: Any) -> MiniNode:
return {'operator': 'CONSTANT', 'type': 'REAL', 'value': float(value)}
def _make_distribution(self, node: bn.DistributionNode) -> MiniNode:
op = _node_type_to_distribution[type(node)]
mini: MiniNode = {'operator': op, 'type': 'DISTRIBUTION'}
self._add_inputs(mini, node)
return mini
def _make_operator(self, node: bn.OperatorNode) -> MiniNode:
op = _node_type_to_operator[type(node)]
mini: Dict[(str, Any)] = {'operator': op, 'type': 'REAL'}
self._add_inputs(mini, node)
return mini
def _is_observed_sample(self, node: bn.BMGNode) -> bool:
return (isinstance(node, bn.SampleNode) and any((isinstance(o, bn.Observation) for o in node.outputs.items)))
def _get_sample_observation(self, node: bn.SampleNode) -> Any:
for o in node.outputs.items:
if isinstance(o, bn.Observation):
return o.value
return None
def _make_observed_sample(self, node: bn.SampleNode) -> None:
ob = self._get_sample_observation(node)
mini_const = self._make_constant(ob)
self._add_mini_node(mini_const)
const_seq = mini_const['sequence']
dist_seq = self._node_to_mini_seq(node.operand)
in_nodes = [dist_seq, const_seq]
mini_obs = {'operator': 'OBSERVE', 'type': 'NONE', 'in_nodes': in_nodes}
self._add_mini_node(mini_obs)
self._node_to_mini_node[node] = mini_const
def _add_node_to_mini_nodes(self, node: bn.BMGNode) -> None:
mini: Optional[MiniNode] = None
if self._is_observed_sample(node):
assert isinstance(node, bn.SampleNode)
self._make_observed_sample(node)
elif isinstance(node, bn.Observation):
pass
elif isinstance(node, bn.Query):
mini = self._make_query(node)
elif isinstance(node, bn.ConstantNode):
mini = self._make_constant(node.value)
elif isinstance(node, bn.DistributionNode):
mini = self._make_distribution(node)
elif isinstance(node, bn.OperatorNode):
mini = self._make_operator(node)
else:
raise ValueError(f'{type(node)} is not supported by miniBMG')
if (mini is not None):
self._add_mini_node(mini)
self._node_to_mini_node[node] = mini |
def extractMxnhwuWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class AgencyBase(APIView):
params_to_validate: List[str]
additional_models: dict
def _validate_params(self, param_values, params_to_validate=None):
params_to_validate = (params_to_validate or getattr(self, 'params_to_validate', []))
additional_models = getattr(self, 'additional_models', [])
award_type_codes = sorted(award_type_mapping.keys())
all_models = [{'key': 'fiscal_year', 'name': 'fiscal_year', 'type': 'integer', 'min': fy(settings.API_SEARCH_MIN_DATE), 'max': current_fiscal_year()}, {'key': 'fiscal_period', 'name': 'fiscal_period', 'type': 'integer', 'min': 2, 'max': 12}, {'key': 'filter', 'name': 'filter', 'type': 'text', 'text_type': 'search'}, {'key': 'agency_type', 'name': 'agency_type', 'type': 'enum', 'enum_values': ['awarding', 'funding'], 'optional': True, 'default': 'awarding'}, {'name': 'award_type_codes', 'key': 'award_type_codes', 'type': 'array', 'array_type': 'enum', 'enum_values': (award_type_codes + ['no intersection']), 'optional': True}]
all_models = update_list_of_dictionaries(all_models, additional_models, 'key')
for val in params_to_validate:
if (param_values.get(val) == ''):
param_values.pop(val)
chosen_models = [model for model in all_models if (model['key'] in params_to_validate)]
param_values = TinyShield(chosen_models).block(param_values)
if param_values.get('fiscal_year'):
validate_request_within_revealed_submissions(fiscal_year=param_values['fiscal_year'], fiscal_period=param_values.get('fiscal_period'))
return param_values
_property
def _query_params(self):
query_params = self.request.query_params.copy()
if (query_params.get('award_type_codes') is not None):
query_params['award_type_codes'] = query_params['award_type_codes'].strip('[]').split(',')
return self._validate_params(query_params)
_property
def validated_url_params(self):
return self._validate_params(self.kwargs, list(self.kwargs))
def toptier_code(self):
return self.kwargs['toptier_code']
_property
def agency_id(self):
agency = Agency.objects.filter(toptier_flag=True, toptier_agency=self.toptier_agency).values('id')
if (not agency):
raise NotFound(f"Cannot find Agency for toptier code of '{self.toptier_code}'")
return agency[0]['id']
_property
def toptier_agency(self):
toptier_agency = ToptierAgency.objects.filter(toptieragencypublisheddabsview__toptier_code=self.toptier_code).first()
if (not toptier_agency):
raise NotFound(f"Agency with a toptier code of '{self.toptier_code}' does not exist")
return toptier_agency
_property
def fiscal_year(self):
return (self._query_params.get('fiscal_year') or current_fiscal_year())
_property
def fiscal_period(self):
return (self._query_params.get('fiscal_period') or get_final_period_of_quarter(calculate_last_completed_fiscal_quarter(self.fiscal_year)) or 3)
def filter(self):
return self._query_params.get('filter')
def agency_type(self):
return self._query_params.get('agency_type')
def award_type_codes(self):
return self._query_params.get('award_type_codes')
def standard_response_messages(self):
return ([get_account_data_time_period_message()] if (self.fiscal_year < 2017) else [])
def create_assurance_statement_url(result):
try:
agency_name_split = result['agency_name'].split(' ')
abbreviation_wrapped = f"({result['abbreviation']})"
toptier_code = result['toptier_code']
fiscal_year = result['fiscal_year']
if result['submission_is_quarter']:
fiscal_period = f"Q{int((result['fiscal_period'] / 3))}"
else:
fiscal_period = f"P{str(result['fiscal_period']).zfill(2)}"
except Exception:
logger.error("Missing fields in result. Can't create assurance statement url.")
logger.error(f'Result object includes: {json.dumps(result)}')
return None
host = settings.FILES_SERVER_BASE_URL
file_name = ((f'{fiscal_year}-{fiscal_period}-{toptier_code}_' + '%20'.join([*agency_name_split, abbreviation_wrapped])) + '-Agency_Comments.txt')
return f'{host}/agency_submissions/{file_name}' |
class OptionSeriesErrorbarSonificationDefaultinstrumentoptionsMappingVolume(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ColourList():
values = None
python_type = 'List[str]'
yaml_type = 'ColourList'
json_schema = {'type': 'array', 'items': {'$ref': 'definitions.json#/definitions/colour'}}
def __init__(self, param):
self._param = param
assert (param._defs.get('default') == 'stringarray()'), param._defs.get('default')
self.yaml_default = []
self.python_default = self.yaml_default |
class ModuleList(Module):
_modules: Dict[(str, Module)]
def __init__(self, modules: Optional[Iterable[Module]]=None) -> None:
super(ModuleList, self).__init__()
if (modules is not None):
self += modules
def _get_abs_string_index(self, idx):
idx = operator.index(idx)
if (not ((- len(self)) <= idx < len(self))):
raise IndexError(f'index {idx} is out of range ')
if (idx < 0):
idx += len(self)
return str(idx)
def __getitem__(self, idx: int) -> Union[(Module, 'ModuleList')]:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[(int, slice)]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(zip(str_indices, self._modules.values()))
def __len__(self) -> int:
return len(self._modules)
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> 'ModuleList':
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> 'ModuleList':
combined = ModuleList()
for (i, module) in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if (not key.isdigit())]
return keys
def insert(self, index: int, module: Module) -> None:
for i in range(len(self._modules), index, (- 1)):
self._modules[str(i)] = self._modules[str((i - 1))]
self._modules[str(index)] = module
def append(self, module: Module) -> 'ModuleList':
self.add_module(str(len(self)), module)
return self
def pop(self, key: Union[(int, slice)]) -> Module:
v = self[key]
del self[key]
return v
def extend(self, modules: Iterable[Module]) -> 'ModuleList':
if (not isinstance(modules, container_abcs.Iterable)):
raise TypeError(('ModuleList.extend should be called with an iterable, but got ' + type(modules).__name__))
offset = len(self)
for (i, module) in enumerate(modules):
self.add_module(str((offset + i)), module)
return self |
class IsolateLogPrinter():
debug: bool
def __init__(self, debug: bool=False) -> None:
self.debug = debug
def print(self, log: Log):
if ((log.level < LogLevel.INFO) and (not self.debug)):
return
level = str(log.level)
if hasattr(log, 'timestamp'):
timestamp = log.timestamp
else:
timestamp = datetime.now(timezone.utc)
event: EventDict = {'event': log.message, 'level': level, 'timestamp': timestamp.strftime('%Y-%m-%d %H:%M:%S.%f')[:(- 3)]}
if (self.debug and log.bound_env and (log.bound_env.key != 'global')):
event['bound_env'] = log.bound_env.key
message = _renderer.__call__(logger={}, name=level, event_dict=event)
print(message)
def print_dict(self, log: dict):
level = LogLevel[log['level']]
if ((level < LogLevel.INFO) and (not self.debug)):
return
if ('timestamp' in log.keys()):
timestamp = log['timestamp']
else:
timestamp = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S.%f')
event: EventDict = {'event': log['message'], 'level': log['level'], 'timestamp': timestamp[:(- 3)]}
message = _renderer.__call__(logger={}, name=log['level'], event_dict=event)
print(message) |
class CodeNodeSerializer(AbstractSyntaxTreeNodeSerializer):
def serialize(self, node: CodeNode) -> Dict:
data = super().serialize(node)
data.update({'instructions': [self._pseudo.serialize(instruction) for instruction in node.instructions]})
return data
def deserialize(self, data: dict) -> CodeNode:
return CodeNode(stmts=[self._pseudo.deserialize(instruction) for instruction in data['instructions']], reaching_condition=LogicCondition.deserialize(data['rc'], self._group.new_context)) |
def get_compare_questions(a_questions, b_questions):
a_questions = {a['id']: a for a in a_questions}
b_questions = {b['id']: b for b in b_questions}
for (id_, a) in a_questions.items():
if (id_ not in b_questions):
continue
question = {**a['input'], 'id': id_, 'A': a['output'], 'B': b_questions[id_]['output']}
if (question['A'] == question['B']):
continue
if (random.random() >= 0.5):
order = ['B', 'A']
else:
order = ['A', 'B']
question['options'] = []
for model_id in order:
option = question[model_id]
option['id'] = model_id
question['options'].append(option)
(yield question) |
class ExperimentConfig():
builder: builders.ActorLearnerBuilder
network_factory: NetworkFactory
environment_factory: types.EnvironmentFactory
max_num_actor_steps: int
seed: int
policy_network_factory: Optional[DeprecatedPolicyFactory] = None
evaluator_factories: Optional[Sequence[EvaluatorFactory]] = None
eval_policy_network_factory: Optional[DeprecatedPolicyFactory] = None
environment_spec: Optional[specs.EnvironmentSpec] = None
observers: Sequence[observers_lib.EnvLoopObserver] = ()
logger_factory: loggers.LoggerFactory = experiment_utils.make_experiment_logger
def get_evaluator_factories(self):
if (self.evaluator_factories is not None):
return self.evaluator_factories
def eval_policy_factory(networks: AgentNetwork, environment_spec: specs.EnvironmentSpec, evaluation: EvaluationFlag) -> PolicyNetwork:
del evaluation
if (self.eval_policy_network_factory is not None):
return self.eval_policy_network_factory(networks)
else:
return self.builder.make_policy(networks=networks, environment_spec=environment_spec, evaluation=True)
return [default_evaluator_factory(environment_factory=self.environment_factory, network_factory=self.network_factory, policy_factory=eval_policy_factory, logger_factory=self.logger_factory, observers=self.observers)] |
class BaseFatetell(GenericAction):
type: str
def __init__(self, target, cond):
self.source = target
self.target = target
self.cond = cond
self.initiator = self.game.hybrid_stack[(- 1)]
self.card_manipulator = self
def apply_action(self):
g = self.game
(card,) = g.deck.getcards(1)
g.players.reveal(card)
self.card = card
detach_cards([card])
g.emit_event(self.type, self)
return self.succeeded
def set_card(self, card, card_manipulator):
self.card = card
self.card_manipulator = card_manipulator
def succeeded(self):
return self.cond(self.card) |
def parse_oozie_default_args():
return Namespace(cluster_base_name=None, cluster_depends_on_past=False, cluster_name_suffix=None, cluster_num_workers=128, cluster_project_id='my-project', cluster_region='us-central1', cluster_wait_for_downstream=False, dag_concurrency=16, dag_disable_catchup=False, dag_max_active_runs=1, dag_name=None, dag_schedule_interval='', only_nodes=None, prune_nodes=None, with_external_task_sensors=[], workflow_name='test-workflow') |
class StatusPayloadFactory(factory.Factory):
class Meta():
model = StatusPayload
version = LESProtocolV2.version
network_id = MAINNET_NETWORK_ID
head_td = GENESIS_DIFFICULTY
head_hash = MAINNET_GENESIS_HASH
head_number = GENESIS_BLOCK_NUMBER
genesis_hash = MAINNET_GENESIS_HASH
serve_headers = True
serve_chain_since = 0
serve_state_since = None
serve_recent_state = None
serve_recent_chain = None
tx_relay = False
flow_control_bl = None
flow_control_mcr = None
flow_control_mrr = None
announce_type = factory.LazyAttribute((lambda o: (o.version if (o.version >= LESProtocolV2.version) else None))) |
class ReadFontInfoVersion1TestCase(unittest.TestCase):
def setUp(self):
self.dstDir = tempfile.mktemp()
os.mkdir(self.dstDir)
metaInfo = {'creator': 'test', 'formatVersion': 1}
path = os.path.join(self.dstDir, 'metainfo.plist')
with open(path, 'wb') as f:
plistlib.dump(metaInfo, f)
def tearDown(self):
shutil.rmtree(self.dstDir)
def _writeInfoToPlist(self, info):
path = os.path.join(self.dstDir, 'fontinfo.plist')
with open(path, 'wb') as f:
plistlib.dump(info, f)
def testRead(self):
originalData = dict(fontInfoVersion1)
self._writeInfoToPlist(originalData)
infoObject = TestInfoObject()
reader = UFOReader(self.dstDir, validate=True)
reader.readInfo(infoObject)
for attr in dir(infoObject):
if (attr not in fontInfoVersion2):
continue
originalValue = fontInfoVersion2[attr]
readValue = getattr(infoObject, attr)
self.assertEqual(originalValue, readValue)
def testFontStyleConversion(self):
fontStyle1To2 = {64: 'regular', 1: 'italic', 32: 'bold', 33: 'bold italic'}
for (old, new) in list(fontStyle1To2.items()):
info = dict(fontInfoVersion1)
info['fontStyle'] = old
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir, validate=True)
infoObject = TestInfoObject()
reader.readInfo(infoObject)
self.assertEqual(new, infoObject.styleMapStyleName)
def testWidthNameConversion(self):
widthName1To2 = {'Ultra-condensed': 1, 'Extra-condensed': 2, 'Condensed': 3, 'Semi-condensed': 4, 'Medium (normal)': 5, 'Semi-expanded': 6, 'Expanded': 7, 'Extra-expanded': 8, 'Ultra-expanded': 9}
for (old, new) in list(widthName1To2.items()):
info = dict(fontInfoVersion1)
info['widthName'] = old
self._writeInfoToPlist(info)
reader = UFOReader(self.dstDir, validate=True)
infoObject = TestInfoObject()
reader.readInfo(infoObject)
self.assertEqual(new, infoObject.openTypeOS2WidthClass) |
def test_new_file_dir():
fd = FlyteDirectory(path='s3://my-bucket')
assert (fd.sep == '/')
inner_dir = fd.new_dir('test')
assert (inner_dir.path == 's3://my-bucket/test')
fd = FlyteDirectory(path='s3://my-bucket/')
inner_dir = fd.new_dir('test')
assert (inner_dir.path == 's3://my-bucket/test')
f = inner_dir.new_file('test')
assert isinstance(f, FlyteFile)
assert (f.path == 's3://my-bucket/test/test') |
.parametrize('name, content_var', FILES.items())
def test_python_virtualenv(tmpdir, name, content_var):
if nodeenv.is_WIN:
bin_dir = tmpdir.join('Scripts')
else:
bin_dir = tmpdir.join('bin')
bin_dir.mkdir()
for n in FILES:
bin_dir.join(n).write(n)
with mock.patch.object(sys, 'argv', ['nodeenv', '-p']):
opts = nodeenv.parse_args()
nodeenv.install_activate(str(tmpdir), opts)
content = getattr(nodeenv, content_var)
disable_prompt = nodeenv.DISABLE_PROMPT.get(name)
if disable_prompt:
enable_prompt = nodeenv.ENABLE_PROMPT.get(name, '')
content = (((name + disable_prompt) + content) + enable_prompt)
assert (bin_dir.join(name).read() == fix_content(content, tmpdir)) |
(IPythonShell)
class PythonShell(MPythonShell, LayoutWidget):
command_executed = Event()
key_pressed = Event(KeyPressedEvent)
def __init__(self, parent=None, **traits):
create = traits.pop('create', None)
super().__init__(parent=parent, **traits)
if create:
self.create()
warnings.warn('automatic widget creation is deprecated and will be removed in a future Pyface version, code should not pass the create parameter and should instead call create() explicitly', DeprecationWarning, stacklevel=2)
elif (create is not None):
warnings.warn('setting create=False is no longer required', DeprecationWarning, stacklevel=2)
def interpreter(self):
return self.control.interp
def execute_command(self, command, hidden=True):
if hidden:
self.control.hidden_push(command)
else:
self.control.Execute(command)
def execute_file(self, path, hidden=True):
filename = os.path.basename(path)
main_mod = types.ModuleType('__main__')
prog_ns = main_mod.__dict__
prog_ns['__file__'] = filename
prog_ns['__nonzero__'] = (lambda : True)
save_argv = sys.argv
sys.argv = [filename]
save_main = sys.modules['__main__']
sys.modules['__main__'] = main_mod
old_stdin = sys.stdin
old_stdout = sys.stdout
old_stderr = sys.stderr
if hidden:
sys.stdin = sys.stdout = sys.stderr = _NullIO()
else:
sys.stdin = sys.stdout = sys.stderr = self.control
try:
if (not hidden):
self.control.clearCommand()
self.control.write(('# Executing "%s"\n' % path))
exec(open(path).read(), prog_ns, prog_ns)
if (not hidden):
self.control.prompt()
finally:
sys.argv = save_argv
sys.modules['__main__'] = save_main
sys.stdin = old_stdin
sys.stdout = old_stdout
sys.stderr = old_stderr
del prog_ns['__name__']
del prog_ns['__file__']
del prog_ns['__nonzero__']
self.interpreter().locals.update(prog_ns)
def get_history(self):
return (self.control.history, self.control.historyIndex)
def set_history(self, history, history_index):
if (not (0 <= history_index <= len(history))):
history_index = len(history)
self.control.history = list(history)
self.control.historyIndex = history_index
def _create_control(self, parent):
shell = PyShell(parent, (- 1))
shell.Bind(wx.EVT_CHAR, self._wx_on_char)
shell.SetDropTarget(PythonDropTarget(self))
shell.handlers.append(self._on_command_executed)
return shell
def on_drop(self, x, y, obj, default_drag_result):
name = 'dragged'
if (hasattr(obj, 'name') and isinstance(obj.name, str) and (len(obj.name) > 0)):
py_name = python_name(obj.name)
try:
if eval(py_name, {py_name: True}):
name = py_name
except:
pass
self.control.interp.locals[name] = obj
self.control.run(name)
self.control.SetFocus()
return wx.DragCopy
def on_drag_over(self, x, y, obj, default_drag_result):
return wx.DragCopy
def _wx_on_char(self, event):
if (event.AltDown() and (event.GetKeyCode() == 317)):
zoom = self.shell.control.GetZoom()
if (zoom != 20):
self.control.SetZoom((zoom + 1))
elif (event.AltDown() and (event.GetKeyCode() == 319)):
zoom = self.shell.control.GetZoom()
if (zoom != (- 10)):
self.control.SetZoom((zoom - 1))
self.key_pressed = KeyPressedEvent(alt_down=(event.AltDown() == 1), control_down=(event.ControlDown() == 1), shift_down=(event.ShiftDown() == 1), key_code=event.GetKeyCode(), event=event)
event.Skip() |
class OptionPlotoptionsPyramidSonificationDefaultinstrumentoptionsMappingTime(Options):
def mapFunction(self):
return self._config_get(None)
def mapFunction(self, value: Any):
self._config(value, js_type=False)
def mapTo(self):
return self._config_get(None)
def mapTo(self, text: str):
self._config(text, js_type=False)
def max(self):
return self._config_get(None)
def max(self, num: float):
self._config(num, js_type=False)
def min(self):
return self._config_get(None)
def min(self, num: float):
self._config(num, js_type=False)
def within(self):
return self._config_get(None)
def within(self, value: Any):
self._config(value, js_type=False) |
class ViewAction():
inline_actions: Optional[List[Union[(str, Callable)]]] = ['view_action']
def view_action(self, request, obj, parent_obj=None):
url = reverse('admin:{}_{}_change'.format(obj._meta.app_label, obj._meta.model_name), args=(obj.pk,))
return redirect(url)
view_action.short_description = _('View') |
class WebhookLogRepository(BaseRepository[WebhookLog], UUIDRepositoryMixin[WebhookLog]):
model = WebhookLog
async def get_by_id_and_webhook(self, id: uuid.UUID, webhook: uuid.UUID) -> (WebhookLog | None):
statement = select(WebhookLog).where((WebhookLog.id == id), (WebhookLog.webhook_id == webhook))
return (await self.get_one_or_none(statement)) |
class Migration(migrations.Migration):
dependencies = [('objects', '0010_auto__1820')]
operations = [migrations.AlterField(model_name='objectdb', name='db_sessid', field=models.CharField(help_text='csv list of session ids of connected Account, if any.', max_length=32, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')], verbose_name='session id')), migrations.AlterField(model_name='objectdb', name='db_typeclass_path', field=models.CharField(db_index=True, help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.", max_length=255, null=True, verbose_name='typeclass'))] |
class TestAxInvalidCastChoice():
def test_invalid_cast_choice(self, monkeypatch):
with monkeypatch.context() as m:
m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test_hp_cast.yaml'])
ax_config = AxTunerConfig(name='Basic Test', minimize=False, objective_name='None', verbose_logging=False)
with pytest.raises(TypeError):
config = ConfigArgBuilder(HPOne, HPTwo).tuner(ax_config) |
class TransferModel(Base):
__tablename__ = 'transfers'
block_number = Column(Numeric, nullable=False)
transaction_hash = Column(String, primary_key=True)
trace_address = Column(ARRAY(Integer), nullable=False)
protocol = Column(String, nullable=True)
from_address = Column(String, nullable=False)
to_address = Column(String, nullable=False)
token_address = Column(String, nullable=False)
amount = Column(Numeric, nullable=False)
error = Column(String, nullable=True) |
def test():
n = Network((LATENCY * 2))
nodes = [Node(n, i, ((i % 4) == 0)) for i in range(20)]
for i in range(30):
for _ in range(2):
z = random.randrange(20)
n.send_to([(1000 + i)], z, at=(5 + (i * 2)))
for i in range((21 * LATENCY)):
n.tick()
if ((i % 10) == 0):
print('Value sets', [sorted(node.seen.keys()) for node in nodes])
countz = {}
maxval = ''
for node in nodes:
if node.honest:
k = str(sorted(node.seen.keys()))
countz[k] = (countz.get(k, 0) + 1)
if (countz[k] > countz.get(maxval, 0)):
maxval = k
print(('Most popular: %s' % maxval), ('with %d agreeing' % countz[maxval])) |
class TaskCollection(object):
task_type_to_cls = {'manual_fetch': ManualFetchTask, 'manual_fetch_skip_for_now': ManualFetchTask, 'auto_fetch': AutoFetchTask, 'convert': ConvertTask, 'import': ImportTask, 'post_process': PostProcessTask}
def __init__(self, task_data=None, ordered=False, task_type=None):
self._tasks = {}
if isinstance(task_data, dict):
for (name, attrs) in task_data.items():
cls = self.task_type_to_cls[attrs['type']]
task = cls(name, attrs)
self.add(task)
elif isinstance(task_data, list):
for task in task_data:
self.add(task)
self._ordered = ordered
self._type = task_type
def add(self, task):
self._tasks[task.name] = task
def __getitem__(self, name):
return self._tasks[name]
def __iter__(self):
if self._ordered:
graph = nx.DiGraph()
for task in self._tasks.values():
graph.add_node(task)
for dependency in task.dependencies:
graph.add_node(dependency)
graph.add_edge(dependency, task)
tasks = nx.topological_sort(graph)
else:
tasks = [task for (_, task) in sorted(self._tasks.items())]
for task in tasks:
if (self._type is None):
(yield task)
elif (self._type == task.task_type):
(yield task)
def __bool__(self):
if self._type:
return any((task for task in self if (task.task_type == self._type)))
else:
return bool(self._tasks)
def by_type(self, task_type):
return TaskCollection(list(self), ordered=self._ordered, task_type=task_type)
def ordered(self):
return TaskCollection(list(self), ordered=True, task_type=self._type) |
class StepStatsRenderer():
def render_stats(episode_event_log: EpisodeEventLog, event_name: str='BaseEnvEvents.reward', group_by: Optional[str]=None, aggregation_func: Optional[Callable]=None, metric: str='value', cumulative: bool=True):
events = defaultdict(list)
group_by_values = set()
for (step_id, step_event_log) in enumerate(episode_event_log.step_event_logs):
for event_record in step_event_log.events:
if (event_name != event_record.interface_method.__qualname__):
continue
assert ((group_by is None) or (group_by in event_record.attributes.keys())), 'Group by must be present in event attributes'
assert (metric in event_record.attributes.keys()), 'Metric by must be present in event attributes'
group_by_value = (event_record.attributes[group_by] if (group_by is not None) else None)
events[(step_id, group_by_value)].append(event_record.attributes[metric])
group_by_values.add(group_by_value)
aggregated = {}
for (key, values) in events.items():
if (aggregation_func is None):
assert (len(values) == 1), 'Aggregation function is None but multiple values encountered'
aggregated[key] = values[0]
else:
aggregated[key] = aggregation_func(values)
lines = {}
step_ids = list(range(len(episode_event_log.step_event_logs)))
for group_by_value in group_by_values:
lines[group_by_value] = list(map((lambda step_id: aggregated.get((step_id, group_by_value), 0)), step_ids))
plt.figure()
for (group_by_value, series) in lines.items():
if cumulative:
series = np.cumsum(series)
plt.plot(step_ids, series, label=group_by_value)
if (group_by is not None):
plt.legend(title=group_by)
plt.title(event_name)
plt.xlabel('Step ID')
metric_label = metric
if aggregation_func:
metric_label = ((aggregation_func.__name__ + ': ') + metric_label)
if cumulative:
metric_label += ' (cumulative)'
plt.ylabel(metric_label)
plt.show() |
class XCalExporter():
def __init__(self):
pass
def export(event_id):
event = Event.query.get(event_id)
i_calendar_node = Element('iCalendar')
i_calendar_node.set('xmlns:xCal', 'urn:ietf:params:xml:ns:xcal')
v_calendar_node = SubElement(i_calendar_node, 'vcalendar')
version_node = SubElement(v_calendar_node, 'version')
version_node.text = '2.0'
prod_id_node = SubElement(v_calendar_node, 'prodid')
prod_id_node.text = '-//fossasia//open-event//EN'
cal_desc_node = SubElement(v_calendar_node, 'x-wr-caldesc')
cal_desc_node.text = ('Schedule for sessions at ' + event.name)
cal_name_node = SubElement(v_calendar_node, 'x-wr-calname')
cal_name_node.text = event.name
sessions = Session.query.filter_by(event_id=event_id).filter_by(state='accepted').filter(Session.deleted_at.is_(None)).order_by(asc(Session.starts_at)).all()
for session in sessions:
if (session and session.starts_at and session.ends_at):
v_event_node = SubElement(v_calendar_node, 'vevent')
method_node = SubElement(v_event_node, 'method')
method_node.text = 'PUBLISH'
uid_node = SubElement(v_event_node, 'uid')
uid_node.text = ((str(session.id) + '-') + event.identifier)
dtstart_node = SubElement(v_event_node, 'dtstart')
dtstart_node.text = session.starts_at.isoformat()
dtend_node = SubElement(v_event_node, 'dtend')
dtend_node.text = session.ends_at.isoformat()
duration_node = SubElement(v_event_node, 'duration')
duration_node.text = (str((session.ends_at - session.starts_at)) + '00:00')
summary_node = SubElement(v_event_node, 'summary')
summary_node.text = session.title
description_node = SubElement(v_event_node, 'description')
description_node.text = (session.short_abstract or 'N/A')
class_node = SubElement(v_event_node, 'class')
class_node.text = 'PUBLIC'
status_node = SubElement(v_event_node, 'status')
status_node.text = 'CONFIRMED'
categories_node = SubElement(v_event_node, 'categories')
categories_node.text = (session.session_type.name if session.session_type else '')
url_node = SubElement(v_event_node, 'url')
url_node.text = url_for('v1.event_list', identifier=event.identifier, _external=True)
location_node = SubElement(v_event_node, 'location')
location_node.text = (session.microlocation.name if session.microlocation else 'Not decided yet')
for speaker in session.speakers:
attendee_node = SubElement(v_event_node, 'attendee')
attendee_node.text = speaker.name
return tostring(i_calendar_node) |
class TestURLFuzzer(unittest.TestCase):
def setUp(self):
self.TestHost = Host
self.TestHost.create_host_dir_and_set_file_logger = (lambda _: None)
self.TestFuzzer = URLFuzzer
self.TestFuzzer.get_log_file_path = (lambda _, __: SystemOutLogger())
self.loop = asyncio.get_event_loop()
def test_bad_wordlist(self):
host = self.TestHost('127.0.0.1', ())
with self.assertRaises(FuzzerException):
fuzzer = self.TestFuzzer(host, (), path_to_wordlist='no/such/path', num_threads=1) |
def latest_review_time():
if ah.user_settings['keep_log']:
ah.log.debug('Begin function')
out = mw.col.db.scalar('select max(id/1000) from revlog')
if (out is None):
out = intTime()
if ah.user_settings['keep_log']:
ah.log.debug(('End function returning: %s (%s)' % (out, prettyTime(out))))
return out |
def search_view(request):
if ('q' in request.GET):
form = SearchForm(request.GET)
if form.is_valid():
search_params = form.cleaned_data
max_results_per_obj_type = (search_params.pop('max_results_per_obj_type') or 10)
results = search(**search_params)
_annotate_search_results(results, search_params, max_results_per_obj_type)
if (len(results) == 1):
if (len(results[0]['objs']) == 1):
obj = results[0]['objs'][0]
link = reverse('dmd_obj', args=[obj.obj_type, obj.id])
return redirect(link)
else:
results = None
else:
form = SearchForm()
results = None
ctx = {'form': form, 'results': results}
ctx.update(_release_metadata())
return render(request, 'dmd/search.html', ctx) |
class OptionSeriesHistogramSonificationContexttracksMappingHighpass(Options):
def frequency(self) -> 'OptionSeriesHistogramSonificationContexttracksMappingHighpassFrequency':
return self._config_sub_data('frequency', OptionSeriesHistogramSonificationContexttracksMappingHighpassFrequency)
def resonance(self) -> 'OptionSeriesHistogramSonificationContexttracksMappingHighpassResonance':
return self._config_sub_data('resonance', OptionSeriesHistogramSonificationContexttracksMappingHighpassResonance) |
def extractEvelantranslationsWordpressCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('God of Thunder', 'God of Thunder', 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TlsBulkCertificatesResponse(ModelComposed):
allowed_values = {}
validations = {}
_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type)
_nullable = False
_property
def openapi_types():
lazy_import()
return {'links': (PaginationLinks,), 'meta': (PaginationMeta,), 'data': ([TlsBulkCertificateResponseData],)}
_property
def discriminator():
return None
attribute_map = {'links': 'links', 'meta': 'meta', 'data': 'data'}
read_only_vars = {}
_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
return self
required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances'])
_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,))
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = (_visited_composed_classes + (self.__class__,))
constant_args = {'_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes}
composed_info = validate_get_composed_info(constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for (var_name, var_value) in kwargs.items():
if ((var_name in discarded_args) and (self._configuration is not None) and self._configuration.discard_unknown_keys and self._additional_properties_model_instances):
continue
setattr(self, var_name, var_value)
if (var_name in self.read_only_vars):
raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
_property
def _composed_schemas():
lazy_import()
return {'anyOf': [], 'allOf': [Pagination, TlsBulkCertificatesResponseAllOf], 'oneOf': []} |
class LinuxFirewall(Firewall):
def __init__(self, device, config):
super().__init__(device, config)
self._connector_helper = ConnectorHelper(self._device)
self._testing_chain_name = ('xv_leak_testing_' + ''.join((random.choice(string.ascii_uppercase) for _ in range(8))))
self._create_testing_chain()
def __del__(self):
self._delete_testing_chain()
def _block_ip_args_rules(self, ip):
return [[self._testing_chain_name, '-s', ip, '-j', 'DROP'], [self._testing_chain_name, '-d', ip, '-j', 'DROP']]
def _chain_exists(self, chain):
(ret, _, _) = self._connector_helper.execute_command(['iptables', '-w', '--list', chain], root=True)
return (ret == 0)
def _create_chain(self, chain):
L.debug('Creating iptables chain {}'.format(chain))
if self._chain_exists(chain):
L.debug('iptables chain {} exists'.format(chain))
return
self._connector_helper.check_command(['iptables', '-w', '-N', chain], root=True)
def _delete_chain(self, chain):
L.debug('Deleting iptables chain {}'.format(chain))
if (not self._chain_exists(chain)):
L.debug("iptables chain {} doesn't exist".format(chain))
self._connector_helper.check_command(['iptables', '-w', '-X', chain], root=True)
def _rule_exists(self, rule_args):
(ret, _, _) = self._connector_helper.execute_command((['iptables', '-w', '-C'] + rule_args), root=True)
return (ret == 0)
def _create_rule(self, rule_args):
L.debug('Creating iptables rule {}'.format(rule_args))
if self._rule_exists(rule_args):
L.debug('iptables rule {} already exists'.format(rule_args))
self._connector_helper.check_command((['iptables', '-w', '-A'] + rule_args), root=True)
def _delete_rule(self, rule_args):
L.debug('Deleting iptables rule {}'.format(rule_args))
if (not self._rule_exists(rule_args)):
L.debug("iptables rule {} doesn't exist".format(rule_args))
return
self._connector_helper.check_command((['iptables', '-w', '-D'] + rule_args), root=True)
def _jump_rule_args(self, chain):
return [chain, '-j', self._testing_chain_name]
def _delete_testing_chain(self):
self._connector_helper.check_command(['iptables', '-w', '-F', self._testing_chain_name], root=True)
L.debug('Cleaning up iptables testing chain')
for source_chain in ['INPUT', 'OUTPUT']:
self._delete_rule(self._jump_rule_args(source_chain))
self._delete_chain(self._testing_chain_name)
def _create_testing_chain(self):
self._create_chain(self._testing_chain_name)
for source_chain in ['INPUT', 'OUTPUT']:
self._create_rule(self._jump_rule_args(source_chain))
def block_ip(self, ip):
rules = self._block_ip_args_rules(ip)
for rule in rules:
self._create_rule(rule)
def unblock_ip(self, ip):
rules = self._block_ip_args_rules(ip)
for rule in rules:
self._delete_rule(rule) |
class TestDataMapperRegistryPopulation(unittest.TestCase, BaseRegistryPopulationTests):
def get_registered_items(self):
from d2go.data.dataset_mappers import D2GO_DATA_MAPPER_REGISTRY
return [k for (k, v) in D2GO_DATA_MAPPER_REGISTRY]
def import_all_modules(self):
import d2go.data.dataset_mappers
import_submodules(d2go.data.dataset_mappers) |
def get_control_tabs(config):
tabs = []
for car in config.myp.vehicles_list:
if (car.label is None):
label = car.vin
else:
label = car.label
myp: PSAClient = config.myp
el = []
buttons_row = []
if (car.status is not None):
cards = OrderedDict({'Battery SOC': {'text': [card_value_div('battery_value', '%', value=convert_value_to_str(car.status.get_energy('Electric').level))], 'src': 'assets/images/battery-charge.svg'}, 'Mileage': {'text': [card_value_div('mileage_value', 'km', value=convert_value_to_str(car.status.timed_odometer.mileage))], 'src': 'assets/images/mileage.svg'}})
soh = Database.get_last_soh_by_vin(car.vin)
if soh:
cards['Battery SOH'] = {'text': [card_value_div('battery_soh_value', '%', value=convert_value_to_str(soh))], 'src': 'assets/images/battery-soh.svg'}
cards.move_to_end('Mileage')
el.append(dbc.Container(dbc.Row(children=create_card(cards)), fluid=True))
if config.remote_control:
try:
preconditionning_state = (car.status.preconditionning.air_conditioning.status != 'Disabled')
charging_state = (car.status.get_energy('Electric').charging.status == 'InProgress')
refresh_date = car.status.get_energy('Electric').updated_at.astimezone().strftime('%X %x')
buttons_row.extend([Button(REFRESH_SWITCH, car.vin, html.Div([html.Img(src='assets/images/sync.svg', width='50px'), refresh_date]), myp.remote_client.wakeup).get_html(), Switch(CHARGE_SWITCH, car.vin, 'Charge', myp.remote_client.charge_now, charging_state).get_html(), Switch(PRECONDITIONING_SWITCH, car.vin, 'Preconditioning', myp.remote_client.preconditioning, preconditionning_state).get_html()])
except (AttributeError, TypeError):
logger.exception('get_control_tabs:')
if (not config.offline):
buttons_row.append(Switch(ABRP_SWITCH, car.vin, 'Send data to ABRP', myp.abrp.enable_abrp, (car.vin in config.myp.abrp.abrp_enable_vin)).get_html())
tabs.append(dbc.Tab(label=label, id=('tab-' + car.vin), children=[dbc.Row(buttons_row), *el]))
return dbc.Tabs(id='control-tabs', children=tabs) |
class AsyncCursorProxy(wrapt.ObjectProxy):
provider_name = None
DML_QUERIES = ('INSERT', 'DELETE', 'UPDATE')
def __init__(self, wrapped, destination_info=None):
super(AsyncCursorProxy, self).__init__(wrapped)
self._self_destination_info = (destination_info or {})
async def callproc(self, procname, params=None):
return (await self._trace_sql(self.__wrapped__.callproc, procname, params, action=EXEC_ACTION))
async def execute(self, sql, params=None):
return (await self._trace_sql(self.__wrapped__.execute, sql, params))
async def executemany(self, sql, param_list):
return (await self._trace_sql(self.__wrapped__.executemany, sql, param_list))
def _bake_sql(self, sql):
return sql
async def _trace_sql(self, method, sql, params, action=QUERY_ACTION):
sql_string = self._bake_sql(sql)
if (action == EXEC_ACTION):
signature = (sql_string + '()')
else:
signature = self.extract_signature(sql_string)
sql_string = shorten(sql_string, string_length=10000)
async with async_capture_span(signature, span_type='db', span_subtype=self.provider_name, span_action=action, extra={'db': {'type': 'sql', 'statement': sql_string, 'instance': getattr(self, '_self_database', None)}, 'destination': self._self_destination_info}, skip_frames=1, leaf=True) as span:
if (params is None):
result = (await method(sql))
else:
result = (await method(sql, params))
if (span and (self.rowcount not in ((- 1), None)) and signature.startswith(self.DML_QUERIES)):
span.update_context('db', {'rows_affected': self.rowcount})
return result
def extract_signature(self, sql):
raise NotImplementedError() |
class BaseProxyPeer(Service):
def __init__(self, session: SessionAPI, event_bus: EndpointAPI) -> None:
self.logger = get_logger('trinity.protocol.common.BaseProxyPeer')
self.event_bus = event_bus
self.session = session
def __str__(self) -> str:
return f'{self.__class__.__name__} {self.session}'
async def run(self) -> None:
self.logger.debug('Starting Proxy Peer %s', self)
(await self.manager.wait_finished())
async def disconnect(self, reason: DisconnectReason) -> None:
self.logger.debug('Forwarding `disconnect()` call from proxy to actual peer: %s', self)
(await self.event_bus.broadcast(DisconnectPeerEvent(self.session, reason), TO_NETWORKING_BROADCAST_CONFIG))
(await self.manager.stop()) |
def validate_rlp_equal(obj_a: BaseBlock, obj_b: BaseBlock, obj_a_name: str=None, obj_b_name: str=None) -> None:
if (obj_a == obj_b):
return
if (obj_a_name is None):
obj_a_name = (obj_a.__class__.__name__ + '_a')
if (obj_b_name is None):
obj_b_name = (obj_b.__class__.__name__ + '_b')
diff = diff_rlp_object(obj_a, obj_b)
if (len(diff) == 0):
raise TypeError(f'{obj_a_name} ({obj_a!r}) != {obj_b_name} ({obj_b!r}) but got an empty diff')
err_fields = '\n - '.join(_humanized_diff_elements(diff, obj_a_name, obj_b_name))
error_message = f'''Mismatch between {obj_a_name} and {obj_b_name} on {len(diff)} fields:
- {err_fields}'''
raise ValidationError(error_message) |
(schema=bodhi.server.schemas.SaveReleaseSchema(), permission='admin', renderer='json', error_handler=bodhi.server.services.errors.json_handler, validators=(colander_body_validator, validate_tags, validate_enums, validate_eol_date))
def save_release(request):
data = request.validated
edited = data.pop('edited', None)
data.pop('csrf_token', None)
try:
if (edited is None):
log.info(('Creating a new release: %s' % data['name']))
r = Release(**data)
else:
log.info(('Editing release: %s' % edited))
r = request.db.query(Release).filter((Release.name == edited)).one()
for (k, v) in data.items():
if ((k == 'state') and (v == ReleaseState.archived) and (r.state != ReleaseState.archived)):
updates = request.db.query(Update).filter((Update.release_id == r.id)).filter(Update.status.notin_([UpdateStatus.obsolete, UpdateStatus.stable, UpdateStatus.unpushed])).all()
for u in updates:
u.status = UpdateStatus.obsolete
u.request = None
u.comment(request.db, 'This update is marked obsolete because the {} release is archived.'.format(u.release.name), author='bodhi')
if ((k == 'state') and (v == ReleaseState.frozen) and (r.state != ReleaseState.frozen)):
updates = request.db.query(Update).filter((Update.release_id == r.id)).filter((Update.request == UpdateRequest.stable)).filter(Update.locked.is_(False)).all()
for u in updates:
u.comment(request.db, 'There is an ongoing freeze; this will be pushed to stable after the freeze is over.', author='bodhi')
setattr(r, k, v)
except Exception as e:
log.exception(e)
request.errors.add('body', 'release', ('Unable to create/edit release: %s' % e))
return
request.db.add(r)
request.db.commit()
Release.all_releases.cache_clear()
Release.get_tags.cache_clear()
return r |
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = ('\nacls:\n nsonly:\n - rule:\n dl_type: %u\n ip_proto: 58\n icmpv6_type: 135\n actions:\n allow: 1\n - rule:\n actions:\n allow: 0\nvlans:\n 100:\n description: "untagged"\n' % IPV6_ETH)
CONFIG = '\n interfaces:\n %(port_1)d:\n native_vlan: 100\n acl_in: nsonly\n %(port_2)d:\n native_vlan: 100\n acl_in: nsonly\n %(port_3)d:\n native_vlan: 100\n acl_in: nsonly\n %(port_4)d:\n native_vlan: 100\n acl_in: nsonly\n '
def test_untagged(self):
self.verify_no_bcast_to_self() |
class SparkChiSquare(SparkStatTestImpl):
base_stat_test = chi_stat_test
def __call__(self, data: SpartStatTestData, feature_type: ColumnType, threshold: float) -> StatTestFuncReturns:
cur = data.current_data
ref = data.reference_data
column_name = data.column_name
from scipy.stats import chisquare
cur_vc = cur.groupby(column_name).count()
cur_count = cur.count()
ref_count = ref.count()
k_norm = (cur_count / ref_count)
ref_vc = ref.groupby(column_name).count().withColumn('count', (col('count') * k_norm))
ref_d = {r[column_name]: r['count'] for r in ref_vc.collect()}
cur_d = {r[column_name]: r['count'] for r in cur_vc.collect()}
keys = (set(cur_d.keys()) | set(ref_d.keys()))
p_val = chisquare([cur_d.get(k, 0) for k in keys], [ref_d.get(k, 0) for k in keys])[1]
return (p_val, (p_val < threshold)) |
def extractBruinTranslation(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
if (not item['title']):
return False
if ((item['tags'] == ['Uncategorized']) and item['title'].startswith('Volume')):
return buildReleaseMessageWithType(item, 'Otherworldly Evil Monarch', vol, chp, frag=frag, postfix=postfix)
return False |
.usefixtures('use_tmpdir')
def test_that_spaces_in_forward_model_args_are_dropped():
test_config_file_name = 'test.ert'
test_config_contents = dedent('\n NUM_REALIZATIONS 1\n FORWARD_MODEL ECLIPSE100(<VERSION>=smersion , <NUM_CPU>=42)\n ')
with open(test_config_file_name, 'w', encoding='utf-8') as fh:
fh.write(test_config_contents)
ert_config = ErtConfig.from_file(test_config_file_name)
assert (len(ert_config.forward_model_list) == 1)
job = ert_config.forward_model_list[0]
assert (job.private_args.get('<VERSION>') == 'smersion') |
()
('--reloader/--no-reloader', default=None)
('--debug/--no-debug', default=None)
('--host', default=None)
('--port', default=None)
_app
def runserver(app=None, reloader=None, debug=None, host=None, port=None):
debug = (debug or app.config.get('DEBUG', False))
reloader = (reloader or app.config.get('RELOADER', False))
host = (host or app.config.get('HOST', '127.0.0.1'))
port = (port or app.config.get('PORT', 5000))
app.run(use_reloader=reloader, debug=debug, host=host, port=port) |
class TestDisabledLinenums(util.MdCase):
extension = ['pymdownx.highlight', 'pymdownx.superfences']
extension_configs = {'pymdownx.highlight': {'linenums': False}}
def test_global_disable(self):
self.check_markdown('\n ```python linenums="1"\n import test\n test.test()\n ```\n ', '\n <div class="highlight"><pre><span></span><code><span class="kn">import</span> <span class="nn">test</span>\n <span class="n">test</span><span class="o">.</span><span class="n">test</span><span class="p">()</span>\n </code></pre></div>\n ', True) |
class IntegratedHingeBox(Boxes):
ui_group = 'Box'
def __init__(self) -> None:
Boxes.__init__(self)
self.addSettingsArgs(edges.FingerJointSettings)
self.addSettingsArgs(edges.ChestHingeSettings)
self.buildArgParser('x', 'y', 'h', 'outside')
self.argparser.add_argument('--lidheight', action='store', type=float, default=20.0, help='height of lid in mm')
def render(self):
(x, y, h, hl) = (self.x, self.y, self.h, self.lidheight)
if self.outside:
x = self.adjustSize(x)
y = self.adjustSize(y)
h = self.adjustSize(h)
t = self.thickness
hy = self.edges['O'].startwidth()
hy2 = self.edges['P'].startwidth()
e1 = edges.CompoundEdge(self, 'Fe', ((h - hy), hy))
e2 = edges.CompoundEdge(self, 'eF', (hy, (h - hy)))
e_back = ('F', e1, 'e', e2)
self.rectangularWall(y, (h - hy), 'FfOf', ignore_widths=[2], move='up')
self.rectangularWall(y, (hl - hy2), 'pfFf', ignore_widths=[1], move='up')
self.rectangularWall(y, (h - hy), 'Ffof', ignore_widths=[5], move='up')
self.rectangularWall(y, (hl - hy2), 'PfFf', ignore_widths=[6], move='up')
self.rectangularWall(x, h, 'FFeF', move='up')
self.rectangularWall(x, h, e_back, move='up')
self.rectangularWall(x, hl, 'FFeF', move='up')
self.rectangularWall(x, (hl - hy2), 'FFqF', move='up')
self.rectangularWall(y, x, 'ffff', move='up')
self.rectangularWall(y, x, 'ffff') |
def choice(values, weights=None, _randomstate_lambda=None):
def decorator(func):
(func)
_synthetic()
def decorator_inner(self, *args, **kw):
randomstate = __get_random_state(self, _randomstate_lambda)
value = randomstate.choice(values, p=weights)
return func(self, value)
return decorator_inner
return decorator |
def _load_config(project_path: Path) -> Dict:
path = _get_project_config_path(project_path)
if (path is None):
return {}
with path.open() as fp:
if (path.suffix in ('.yaml', '.yml')):
return (yaml.safe_load(fp) or {})
raw_json = fp.read()
valid_json = re.sub('\\/\\/[^"]*?(?=\\n|$)', '', raw_json)
return json.loads(valid_json) |
('pyscf')
.parametrize('precon_update, ref_cycles', [(1, 14), (5, 15), (10, 19)])
def test_precon_update(precon_update, ref_cycles):
geom = geom_loader('lib:h2o_shaken.xyz')
calc = PySCF(basis='sto3g')
geom.set_calculator(calc)
opt_kwargs = {'thresh': 'gau_tight', 'max_cycles': 100, 'precon_update': precon_update}
opt = PreconSteepestDescent(geom, **opt_kwargs)
opt.run()
assert opt.is_converged
assert (opt.cur_cycle == ref_cycles)
assert (geom.energy == pytest.approx((- 74.))) |
class Brightness(Filter):
NAME = 'brightness'
ALLOWED_SPACES = ('srgb-linear', 'srgb')
def filter(self, color: Color, amount: (float | None), **kwargs: Any) -> None:
amount = alg.clamp((1 if (amount is None) else amount), 0)
for (e, c) in enumerate(color[:(- 1)]):
color[e] = linear_transfer(c, amount) |
def writeSave():
startData = 'True'
save = open('chess.save', 'w')
save.write(startData)
save.write('\n')
writeBoard(save)
writeGlobal(save)
writePiecesArrays(save)
write_r_PiecesArrays(save)
write_p_moves(save)
write_firstPawns(save)
save.write('\n')
save.close() |
.parametrize('calc_cls, calc_kwargs, ref_energy, ref_charges', [pytest.param(Gaussian16, {'route': 'BP86 def2SVP'}, (- 40.), ((- 0.195353), 0.048838, 0.048838, 0.048838, 0.048838), marks=using('gaussian16'))])
def test_parse_charges(calc_cls, calc_kwargs, ref_energy, ref_charges):
geom = geom_loader('lib:methane_bp86_def2svp_opt.xyz')
calc = calc_cls(**calc_kwargs)
geom.set_calculator(calc)
results = calc.get_forces(geom.atoms, geom.coords)
assert (results['energy'] == pytest.approx(ref_energy))
charges = calc.parse_charges()
np.testing.assert_allclose(charges, ref_charges, atol=1e-05) |
def test_livefootballscores_deprecated_font_colour(caplog):
widget = qtile_extras.widget.LiveFootballScores(font_colour='ffffff')
assert (caplog.record_tuples[0] == ('libqtile', logging.WARNING, 'The use of `font_colour` is deprecated. Please update your config to use `foreground` instead.'))
assert (widget.foreground == 'ffffff') |
def _register_types(TYPE_REGISTER, component, cnames, func, space_only=True):
name_split = func.__qualname__.split('.')
cls_name = name_split[0]
fn_name = name_split[1]
entity_id = ((func.__module__ + '/') + cls_name)
entry = ((func.__module__ + '/') + func.__qualname__)
if space_only:
for (key, space) in cnames.items():
if (space is None):
continue
flag = isinstance(space, Space)
assert flag, f'TYPE REGISTRATION ERROR: [{cls_name}][{fn_name}][{component}]: "{space}" is an invalid space. Please provide a valid space for "{key}"instead.'
log.logdebug(f'[{cls_name}][{fn_name}]: {component}={cnames}, entry={entry}')
(func)
def registered_fn(*args, **kwargs):
return func(*args, **kwargs)
if (entity_id not in TYPE_REGISTER):
TYPE_REGISTER[entity_id] = dict()
if (component in TYPE_REGISTER[entity_id]):
log.logdebug(f'[{entity_id}][{component}]: {component}={cnames}, entry={entry}')
flag = ((cnames == TYPE_REGISTER[entity_id][component]) or bool(eval(os.environ.get('EAGERX_RELOAD', '0'))))
assert flag, f'There is already a [{entity_id}][{component}] registered with cnames "{TYPE_REGISTER[entity_id][component]}", and they do not match the cnames of this function: "{cnames}".'
TYPE_REGISTER[entity_id][component] = cnames
return registered_fn |
class OptionPlotoptionsAreaDatasorting(Options):
def enabled(self):
return self._config_get(None)
def enabled(self, flag: bool):
self._config(flag, js_type=False)
def matchByName(self):
return self._config_get(None)
def matchByName(self, flag: bool):
self._config(flag, js_type=False)
def sortKey(self):
return self._config_get('y')
def sortKey(self, text: str):
self._config(text, js_type=False) |
class King(Piece):
def __init__(self, x, y, c):
super().__init__(x, y, c)
self.set_letter('K')
def drag(self, new_p, pieces):
if self.grabbed:
(path, dist) = self.select_path((self.start_x, self.start_y), [[1, 1], [(- 1), 1], [1, 0], [0, 1]], new_p)
path_len = math.sqrt(((path[0] ** 2) + (path[1] ** 2)))
self.slide(clamp(((path[0] * dist) / path_len), (- 1), 1), clamp(((path[1] * dist) / path_len), (- 1), 1), pieces) |
_os(*metadata.platforms)
def main():
masquerade = '/tmp/openssl'
common.create_macos_masquerade(masquerade)
common.log('Launching fake openssl commands to decode payload')
common.execute([masquerade, '-out', '/tmp/test', 'enc', '-d'], timeout=10, kill=True)
common.remove_file(masquerade) |
def get_queue_stats(dp, waiters, port_no=None, queue_id=None, to_user=True):
if (port_no is None):
port_no = dp.ofproto.OFPP_ANY
else:
port_no = UTIL.ofp_port_from_user(port_no)
if (queue_id is None):
queue_id = dp.ofproto.OFPQ_ALL
else:
queue_id = UTIL.ofp_queue_from_user(queue_id)
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, port_no, queue_id)
msgs = []
ofctl_utils.send_stats_request(dp, stats, waiters, msgs, LOG)
desc = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = stat.to_jsondict()[stat.__class__.__name__]
properties = []
for prop in stat.properties:
p = prop.to_jsondict()[prop.__class__.__name__]
if to_user:
t = UTIL.ofp_queue_stats_prop_type_to_user(prop.type)
p['type'] = (t if (t != p['type']) else 'UNKNOWN')
properties.append(p)
s['properties'] = properties
desc.append(s)
return wrap_dpid_dict(dp, desc, to_user) |
def extractWwwFringecapybaraCom(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
('prefix,override_type', [param('', OverrideType.CHANGE, id='change'), param('+', OverrideType.ADD, id='add'), param('++', OverrideType.FORCE_ADD, id='force_add'), param('~', OverrideType.DEL, id='del')])
('value,expected_key,expected_value,expected_value_type', [param('key=value', 'key', 'value', ValueType.ELEMENT, id='simple_value'), param("key='1,2,3'", 'key', QuotedString(text='1,2,3', quote=Quote.single), ValueType.ELEMENT, id='simple_value'), param("key=''", 'key', QuotedString(text='', quote=Quote.single), ValueType.ELEMENT, id='unicode'), param('key=value-123', 'key', 'value-123', ValueType.ELEMENT, id='id-int'), param('key=value-1.0', 'key', 'value-1.0', ValueType.ELEMENT, id='id-float'), param('key=value-true', 'key', 'value-true', ValueType.ELEMENT, id='id-bool'), param('key=', 'key', '', ValueType.ELEMENT, id='empty_value'), param("key='foo,bar'", 'key', QuotedString(text='foo,bar', quote=Quote.single), ValueType.ELEMENT, id='quoted_value'), param("key='foo , bar'", 'key', QuotedString(text='foo , bar', quote=Quote.single), ValueType.ELEMENT, id='quoted_value'), param('key=1,2,3', 'key', ChoiceSweep(list=[1, 2, 3], simple_form=True), ValueType.SIMPLE_CHOICE_SWEEP, id='choice'), param('key=choice(1)', 'key', ChoiceSweep(list=[1]), ValueType.CHOICE_SWEEP, id='choice_1_element'), param('key=choice(1,2,3)', 'key', ChoiceSweep(list=[1, 2, 3]), ValueType.CHOICE_SWEEP, id='choice_sweep'), param('key=[1,2],[3,4]', 'key', ChoiceSweep(list=[[1, 2], [3, 4]], simple_form=True), ValueType.SIMPLE_CHOICE_SWEEP, id='choice'), param('key=choice([1,2],[3,4])', 'key', ChoiceSweep(list=[[1, 2], [3, 4]]), ValueType.CHOICE_SWEEP, id='choice'), param('key=range(0,2)', 'key', RangeSweep(start=0, stop=2), ValueType.RANGE_SWEEP, id='range'), param('key=range(1,5,2)', 'key', RangeSweep(start=1, stop=5, step=2), ValueType.RANGE_SWEEP, id='range'), param('key=range(10.0, 11.0)', 'key', RangeSweep(start=10.0, stop=11.0, step=1.0), ValueType.RANGE_SWEEP, id='range'), param('key=interval(0,1)', 'key', IntervalSweep(start=0.0, end=1.0), ValueType.INTERVAL_SWEEP, id='interval'), param('key=tag(a,b,choice([1,2],[3,4]))', 'key', ChoiceSweep(list=[[1, 2], [3, 4]], tags={'a', 'b'}), ValueType.CHOICE_SWEEP, id='choice:tags'), param('key=tag(a,b,interval(0,1))', 'key', IntervalSweep(tags={'a', 'b'}, start=0.0, end=1.0), ValueType.INTERVAL_SWEEP, id='interval:tags'), param('key=str([1,2])', 'key', ['1', '2'], ValueType.ELEMENT, id='cast_list'), param('choice=reverse', 'choice', 'reverse', ValueType.ELEMENT, id='using_function_name_in_key')])
def test_override(prefix: str, value: str, override_type: OverrideType, expected_key: str, expected_value: Any, expected_value_type: ValueType) -> None:
line = (prefix + value)
ret = parse_rule(line, 'override')
expected = Override(input_line=line, type=override_type, key_or_group=expected_key, _value=expected_value, value_type=expected_value_type)
assert (ret == expected) |
def setup_vertigo(camera):
global vertigo_attr_name
global vertigo_global_attr_name
vertigo_loc = camera.attr(vertigo_attr_name).inputs()[0]
z1 = vertigo_loc.tz.get()
f1 = camera.focalLength.get()
world_loc = pm.spaceLocator(n=(camera.name() + 'vertigo_space_loc#'))
if (not camera.hasAttr(vertigo_global_attr_name)):
pm.addAttr(camera, ln=vertigo_global_attr_name, at='message')
(world_loc.message >> camera.attr(vertigo_global_attr_name))
pm.parent(world_loc, vertigo_loc)
world_loc.t.set(0, 0, 0)
world_loc.r.set(0, 0, 0)
pm.parent(world_loc, w=True)
vertigo_loc.tx.unlock()
vertigo_loc.ty.unlock()
pm.pointConstraint(world_loc, vertigo_loc)
expr_str = '{camera_name}.focalLength = ({vertigo_loc_name}.tz / {z1}) * {f1};'.format(camera_name=camera.name(), vertigo_loc_name=vertigo_loc.name(), z1=str(z1), f1=str(f1))
expr = pm.expression(s=expr_str) |
def run_reproducible_rollout(env: MazeEnv, pick_action: Callable, n_steps: int=100) -> List[ObservationType]:
env.seed(1234)
act_conv_spaces = dict()
for (policy_id, policy_act_conv) in env.action_conversion_dict.items():
policy_space = policy_act_conv.space()
policy_space.seed(1234)
act_conv_spaces[policy_id] = policy_space
observations: List[ObservationType] = list()
env.reset()
for step in range(n_steps):
(policy_id, actor_id) = env.actor_id()
action = pick_action(observation=env.observation_conversion.maze_to_space(env.core_env.get_maze_state()), action_space=act_conv_spaces[policy_id])
observations.append(env.step(action)[0])
return observations |
class RMSPropGrafting(AdagradGrafting):
def __init__(self, param, beta2: float=0.99, epsilon: float=1e-08, group: Optional[dist.ProcessGroup]=None, group_source_rank: int=0, dist_buffer: Optional[Tensor]=None, use_dtensor: bool=True, communication_dtype: CommunicationDType=CommunicationDType.DEFAULT):
super(RMSPropGrafting, self).__init__(param=param, beta2=beta2, epsilon=epsilon, use_bias_correction=False, normalize_gradient=False, group=group, group_source_rank=group_source_rank, dist_buffer=dist_buffer, use_dtensor=use_dtensor, communication_dtype=communication_dtype) |
def test_issue_26_v0_4_0():
sig = np.array(['0xff864d8f', '0xff86b76d', '0xff880f87'])
fxp_sig = Fxp(sig)
assert (fxp_sig[0] == (- 7975537))
assert (fxp_sig[1] == (- 7948435))
assert (fxp_sig[2] == (- 7860345))
fxp_sig = Fxp(sig, signed=False)
assert (fxp_sig[0] == int('0xff864d8f', 16))
assert (fxp_sig[1] == int('0xff86b76d', 16))
assert (fxp_sig[2] == int('0xff880f87', 16)) |
def suppress(signal):
def wrapper(function):
def wrapped_function(self, *args, **kwargs):
def on_event(sender, *args):
sender.stop_emission_by_name(signal)
return True
handler_id = self.connect(signal, on_event)
function(self, *args, **kwargs)
self.disconnect(handler_id)
return wrapped_function
return wrapper |
class TestHostsResolver(tests.LimitedTestCase):
def test_default_fname(self):
hr = greendns.HostsResolver()
assert os.path.exists(hr.fname)
def test_readlines_lines(self):
hr = _make_host_resolver()
hr.hosts.write(b'line0\n')
hr.hosts.flush()
assert (list(hr._readlines()) == ['line0'])
hr._last_stat = 0
hr.hosts.write(b'line1\n')
hr.hosts.flush()
assert (list(hr._readlines()) == ['line0', 'line1'])
hr._last_stat = 0
hr.hosts.seek(0)
hr.hosts.truncate()
hr.hosts.write(b'\naa\r\nbb\r cc \n\n\tdd ee')
hr.hosts.flush()
assert (list(hr._readlines()) == ['aa', 'bb', 'cc', 'dd ee'])
hr._last_stat = 0
hr.hosts.seek(0)
hr.hosts.truncate()
hr.hosts.write(b'# First couple lines\n# are comments.\nline1\n#comment\nline2 # inline comment\n')
hr.hosts.flush()
assert (list(hr._readlines()) == ['line1', 'line2'])
def test_readlines_missing_file(self):
hr = _make_host_resolver()
hr.hosts.close()
hr._last_stat = 0
assert (list(hr._readlines()) == [])
def test_load_no_contents(self):
hr = _make_host_resolver()
hr._load()
assert (not hr._v4)
assert (not hr._v6)
assert (not hr._aliases)
def test_load_v4_v6_cname_aliases(self):
hr = _make_host_resolver()
hr.hosts.write(b'1.2.3.4 v4.example.com v4\ndead:beef::1 v6.example.com v6\n')
hr.hosts.flush()
hr._load()
assert (hr._v4 == {'v4.example.com': '1.2.3.4', 'v4': '1.2.3.4'})
assert (hr._v6 == {'v6.example.com': 'dead:beef::1', 'v6': 'dead:beef::1'})
assert (hr._aliases == {'v4': 'v4.example.com', 'v6': 'v6.example.com'})
def test_load_v6_link_local(self):
hr = _make_host_resolver()
hr.hosts.write(b'fe80:: foo\nfe80:dead:beef::1 bar\n')
hr.hosts.flush()
hr._load()
assert (not hr._v4)
assert (not hr._v6)
def test_query_A(self):
hr = _make_host_resolver()
hr._v4 = {'v4.example.com': '1.2.3.4'}
ans = hr.query('v4.example.com')
assert (ans[0].address == '1.2.3.4')
ans = hr.query('v4.example.com')
assert (ans[0].address == '1.2.3.4')
ans = hr.query(b'v4.example.com')
assert (ans[0].address == '1.2.3.4')
def test_query_ans_types(self):
hr = _make_host_resolver()
hr._v4 = {'v4.example.com': '1.2.3.4'}
hr._last_stat = time.time()
ans = hr.query('v4.example.com')
assert isinstance(ans, greendns.dns.resolver.Answer)
assert (ans.response is None)
assert (ans.qname == dns.name.from_text('v4.example.com'))
assert (ans.rdtype == dns.rdatatype.A)
assert (ans.rdclass == dns.rdataclass.IN)
assert (ans.canonical_name == dns.name.from_text('v4.example.com'))
assert ans.expiration
assert isinstance(ans.rrset, dns.rrset.RRset)
assert (ans.rrset.rdtype == dns.rdatatype.A)
assert (ans.rrset.rdclass == dns.rdataclass.IN)
ttl = greendns.HOSTS_TTL
assert ((ttl - 1) <= ans.rrset.ttl <= (ttl + 1))
rr = ans.rrset[0]
assert isinstance(rr, greendns.dns.rdtypes.IN.A.A)
assert (rr.rdtype == dns.rdatatype.A)
assert (rr.rdclass == dns.rdataclass.IN)
assert (rr.address == '1.2.3.4')
def test_query_AAAA(self):
hr = _make_host_resolver()
hr._v6 = {'v6.example.com': 'dead:beef::1'}
ans = hr.query('v6.example.com', dns.rdatatype.AAAA)
assert (ans[0].address == 'dead:beef::1')
def test_query_unknown_raises(self):
hr = _make_host_resolver()
with tests.assert_raises(greendns.dns.resolver.NoAnswer):
hr.query('example.com')
def test_query_unknown_no_raise(self):
hr = _make_host_resolver()
ans = hr.query('example.com', raise_on_no_answer=False)
assert isinstance(ans, greendns.dns.resolver.Answer)
assert (ans.response is None)
assert (ans.qname == dns.name.from_text('example.com'))
assert (ans.rdtype == dns.rdatatype.A)
assert (ans.rdclass == dns.rdataclass.IN)
assert (ans.canonical_name == dns.name.from_text('example.com'))
assert ans.expiration
assert isinstance(ans.rrset, greendns.dns.rrset.RRset)
assert (ans.rrset.rdtype == dns.rdatatype.A)
assert (ans.rrset.rdclass == dns.rdataclass.IN)
assert (len(ans.rrset) == 0)
def test_query_CNAME(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com'}
ans = hr.query('host', dns.rdatatype.CNAME)
assert (ans[0].target == dns.name.from_text('host.example.com'))
assert (str(ans[0].target) == 'host.example.com.')
def test_query_unknown_type(self):
hr = _make_host_resolver()
with tests.assert_raises(greendns.dns.resolver.NoAnswer):
hr.query('example.com', dns.rdatatype.MX)
def test_getaliases(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com', 'localhost': 'host.example.com'}
res = set(hr.getaliases('host'))
assert (res == {'host.example.com', 'localhost'})
def test_getaliases_unknown(self):
hr = _make_host_resolver()
assert (hr.getaliases('host.example.com') == [])
def test_getaliases_fqdn(self):
hr = _make_host_resolver()
hr._aliases = {'host': 'host.example.com'}
res = set(hr.getaliases('host.example.com'))
assert (res == {'host'})
def test_hosts_case_insensitive(self):
name = 'example.com'
hr = _make_host_resolver()
hr.hosts.write(b'1.2.3.4 ExAmPlE.CoM\n')
hr.hosts.flush()
hr._load()
ans = hr.query(name)
rr = ans.rrset[0]
assert isinstance(rr, greendns.dns.rdtypes.IN.A.A)
assert (rr.rdtype == dns.rdatatype.A)
assert (rr.rdclass == dns.rdataclass.IN)
assert (rr.address == '1.2.3.4') |
class CmdWriteAsm(Cmd):
keywords = ['writeasm', 'asm']
description = 'Writes assembler instructions to a specified memory address.'
parser = argparse.ArgumentParser(prog=keywords[0], description=description, epilog=('Aliases: ' + ', '.join(keywords)))
parser.add_argument('--dry', '-d', action='store_true', help="Only pass code to the assembler but don't write to memory")
parser.add_argument('--file', '-f', help='Open file in text editor, then read assembly from this file.')
parser.add_argument('address', type=auto_int, help='Destination address')
parser.add_argument('code', nargs='*', help='Assembler code as string')
def work(self):
args = self.getArgs()
if (args == None):
return True
if (args.file != None):
if (not os.path.exists(args.file)):
f = open(args.file, 'w')
f.write('/* Write arm thumb code here.\n')
f.write(" Use '' or '//' for single line comments or C-like block comments. */\n")
f.write(('\n// 0x%08x:\n\n' % args.address))
f.close()
editor = os.environ.get('EDITOR', 'vim')
subprocess.call([editor, args.file])
code = read(args.file)
elif (len(args.code) > 0):
code = ' '.join(args.code)
else:
self.parser.print_usage()
print('Either code or --file is required!')
return False
try:
data = asm(code, vma=args.address)
except PwnlibException:
return False
if (len(data) > 0):
log.info(('Assembler was successful. Machine code (len = %d bytes) is:' % len(data)))
log.hexdump(data, begin=args.address)
else:
log.info("Assembler didn't produce any machine code.")
return False
if args.dry:
log.info('This was a dry run. No data written to memory!')
return True
if (not self.isAddressInSections(args.address, len(data), sectiontype='RAM')):
answer = yesno(('Warning: Address 0x%08x (len=0x%x) is not inside a RAM section. Continue?' % (args.address, len(data))))
if (not answer):
return False
self.progress_log = log.progress('Writing Memory')
if self.writeMem(args.address, data, self.progress_log, bytes_done=0, bytes_total=len(data)):
self.progress_log.success(('Written %d bytes to 0x%08x.' % (len(data), args.address)))
return True
else:
self.progress_log.failure('Write failed!')
return False |
def convert_input_media_array(array):
media = []
files = {}
for input_media in array:
if isinstance(input_media, types.InputMedia):
media_dict = input_media.to_dict()
if media_dict['media'].startswith('attach://'):
key = media_dict['media'].replace('attach://', '')
files[key] = input_media.media
media.append(media_dict)
return (json.dumps(media), files) |
class version1(StdOutputParams, ExecutorTopicContinuum, CreateMakeDependencies):
def __init__(self, oconfig):
tracer.debug('Called.')
StdOutputParams.__init__(self, oconfig)
CreateMakeDependencies.__init__(self)
self.__used_vcs_id = None
def cmad_topic_continuum_pre(self, _):
tracer.debug('Called.')
CreateMakeDependencies.write_reqs_dep(self._cmad_file, self._output_filename)
def topic_continuum_sort(self, vcs_commit_ids, topic_sets):
self.__used_vcs_id = vcs_commit_ids[(- 1)]
return [topic_sets[vcs_commit_ids[(- 1)].get_commit()]]
def topic_set_pre(self, _requirement_set):
tracer.debug('Called')
with open(self._output_filename, 'w') as versfd:
versfd.write(('%s\n' % self.__used_vcs_id)) |
def _resolve_model(obj):
if (isinstance(obj, six.string_types) and (len(obj.split('.')) == 2)):
(app_name, model_name) = obj.split('.')
return apps.get_model(app_name, model_name)
elif (inspect.isclass(obj) and issubclass(obj, models.Model)):
return obj
else:
raise ValueError('{0} is not a Django model'.format(obj)) |
.django_db
def test_messages_not_nested(client, monkeypatch, elasticsearch_award_index, awards_over_different_date_ranges):
setup_elasticsearch_test(monkeypatch, elasticsearch_award_index)
contract_type_list = all_award_types_mappings['contracts']
request_for_2015 = {'subawards': False, 'fields': ['Award ID'], 'sort': 'Award ID', 'limit': 50, 'page': 1, 'filters': {'time_period': [{'start_date': '2015-01-01', 'end_date': '2015-12-31', 'date_type': 'date_signed'}], 'award_type_codes': contract_type_list, 'not_a_real_filter': 'abc'}}
resp = client.post('/api/v2/search/spending_by_award/', content_type='application/json', data=json.dumps(request_for_2015))
resp_json = resp.json()
assert (resp.status_code == status.HTTP_200_OK)
assert (len(resp.data['results']) == 5)
assert (resp_json['messages'] == get_generic_filters_message(request_for_2015['filters'].keys(), {'time_period', 'award_type_codes'})) |
def hc_worker(crack=None, hash_file=None, session=None, wordlist=None, outfile=None, hash_mode=1000, attack_mode=None, mask=None, rules=None, name=None, username=False, pot_path=None, restore=None, brain=True, mask_file=False, increment=False, increment_min=None, increment_max=None, speed=True, benchmark=False, benchmark_all=False, wordlist2=None, potcheck=None):
if attack_mode:
if (not isinstance(attack_mode, int)):
attack_mode = None
if potcheck:
try:
pot_job(hash_file=hash_file, session=session, hash_mode=hash_mode, attack_mode=attack_mode, rules=None, pot_path=pot_path, username=username)
except Exception as err:
logger.error('Error running potcheck: {}'.format(err))
hcat = runner(hash_file=hash_file, mask=mask, session=session, wordlist=wordlist, outfile=outfile, attack_mode=attack_mode, hash_mode=hash_mode, rules=rules, username=username, pot_path=pot_path, restore=restore, brain=brain, wordlist2=wordlist2, benchmark=benchmark, benchmark_all=benchmark_all)
hcat.event_connect(callback=error_callback, signal='EVENT_LOG_ERROR')
hcat.event_connect(callback=warning_callback, signal='EVENT_LOG_WARNING')
if benchmark:
hcat.event_connect(callback=bench_callback, signal='EVENT_CRACKER_FINISHED')
hcat.event_connect(callback=finished_callback, signal='EVENT_OUTERLOOP_FINISHED')
hcat.event_connect(callback=any_callback, signal='ANY')
else:
hcat.event_connect(callback=finished_callback, signal='EVENT_CRACKER_FINISHED')
hcat.event_connect(callback=cracked_callback, signal='EVENT_CRACKER_HASH_CRACKED')
try:
main_counter = 0
while True:
hc_state = hcat.status_get_status_string()
logger.debug('MAIN loop')
if ((hc_state == 'Exhausted') and (not mask_file)):
finished_callback(hcat)
return 'Exhausted'
if ((hc_state == 'Exhausted') and mask_file):
sleep(30)
if (hc_state == 'Exhausted'):
logger.info('checking mask file')
if (hc_state == 'Exhausted'):
finished_callback(hcat)
return 'Exhausted'
elif (hc_state == 'Cracked'):
cracked_callback(hcat)
return 'Cracked'
elif (hc_state == 'Aborted'):
logger.debug('Hashcat Abort status returned')
event_log = hcat.hashcat_status_get_log()
raise ValueError('Aborted: {}'.format(event_log))
elif ((main_counter > 3000) and (hc_state != 'Running') and (mask_file == False)):
logger.debug('Reseting job, seems to be hung')
raise ValueError('Error: Hashcat hung - Initialize timeout')
else:
logger.debug('HC State: {}'.format(hc_state))
if ('Initializing' not in hc_state):
init_callback(hcat)
logger.debug('Hashcat initialized')
job = redis_q.fetch_job(str(hcat.session))
speed_started = rq.registry.StartedJobRegistry(queue=speed_q)
cur_speed = speed_started.get_job_ids()
if job:
if (job.meta['CrackQ State'] == 'Stop'):
logger.info('Stopping Job: {}'.format(hcat.session))
hcat.hashcat_session_quit()
return
elif (job.meta['CrackQ State'] == 'Delete'):
logger.info('Deleting Job: {}'.format(hcat.session))
speed_session = '{}_speed'.format(hcat.session)
speed_job = speed_q.fetch_job(speed_session)
if speed_job:
logger.debug('Deleting speed job')
speed_status = speed_job.get_status()
finished_states = ['finished', 'failed']
del_count = 0
while ((speed_status not in finished_states) and (del_count < 100)):
logger.debug('DELETE wait loop')
speed_status = speed_job.get_status()
del_count += 1
logger.debug('Breaking runner loop speed check job has finished')
speed_job.delete()
cq_api.del_jobid(hcat.session)
hcat.hashcat_session_quit()
hcat.reset()
return
elif (job.meta['CrackQ State'] == 'Pause'):
hcat.hashcat_session_pause()
pause_counter = 0
logger.debug('Pausing job: {}'.format(hcat.session))
logger.debug('PAUSE loop begin')
while (pause_counter < 600):
if (hcat.status_get_status_string() == 'Paused'):
logger.debug('Job Paused: {}'.format(hcat.session))
break
elif del_check(job):
break
pause_counter += 1
logger.debug('PAUSE loop finished')
if (hcat.status_get_status_string() != 'Paused'):
logger.debug('Pause failed: {}'.format(hc_state))
if (len(cur_speed) < 1):
if (not del_check(job)):
logger.debug('Stale paused job caught, resuming')
(job.meta['CrackQ State'] == 'Run/Restored')
job.save_meta()
hcat.hashcat_session_resume()
elif (hc_state == 'Bypass'):
logger.debug('Error: Bypass not cleared')
else:
logger.debug('Haschat state: {}'.format(hc_state))
if (len(cur_speed) < 1):
if (not del_check(job)):
if (hcat.status_get_status_string() == 'Paused'):
logger.debug('Stale paused job caught, resuming')
(job.meta['CrackQ State'] == 'Run/Restored')
job.save_meta()
hcat.hashcat_session_resume()
else:
logger.error('Error finding redis job')
sleep(10)
main_counter += 10
except KeyboardInterrupt:
hcat.hashcat_session_quit()
exit(0)
except Exception as err:
logger.error('MAIN loop closed: {}'.format(err)) |
class TestAddCliOption(object):
def cli(self):
cli = argparse.ArgumentParser()
rgbgradientv2.add_cli_option(cli, 'color0', {'label': 'Logo LED colors and effects', 'description': 'Set the colors and the effects of the logo LED', 'cli': ['-c', '--logo-color', '--foobar'], 'report_type': 3, 'command': [91, 0, 0], 'value_type': 'rgbgradientv2', 'rgbgradientv2_header': {'color_field_length': 139, 'duration_length': 2, 'maxgradient': 14}, 'led_id': 2, 'default': 'rgbgradient(duration=1000; colors=0%: #ff0000, 33%: #00ff00, 66%: #0000ff)'})
return cli
def test_cli_options(self, cli):
assert ('-c' in cli.format_help())
assert ('--logo-color' in cli.format_help())
assert ('--foobar' in cli.format_help())
def test_cli_metavar(self, cli):
assert ('-c COLOR0' in cli.format_help())
def test_default_value_displayed(self, cli):
assert ('rgbgradient(' in cli.format_help())
.parametrize('color', ['#AABBCC', '#aaBBcc', 'AAbbCC', '#ABC', 'AbC', 'red', 'rgbgradient(duration=1000; colors=0%: #ff0000, 33%: #00ff00, 66%: #0000ff)', 'rgbgradient(colors=0%: #ff0000, 33%: #00ff00, 66%: #0000ff; duration=1000;)', 'rgbgradient(colors=0%: #ff0000, 33%: #00ff00, 66%: #0000ff)', 'rgbgradient(colors=0:red,33:#0f0,66:00f)'])
def test_passing_valid_color_arguments(self, cli, color):
params = cli.parse_args(['--logo-color', color])
assert (params.COLOR0 == color)
.parametrize('color', ['hello', '#AABBCCFF', '~AABBCC', '#HHIIFF', 'fa0b', 'rgbgradient()'])
def test_passing_invalid_color_arguments(self, cli, color):
with pytest.raises(SystemExit) as pytest_wrapped_e:
cli.parse_args(['--logo-color', color])
assert (pytest_wrapped_e.type == SystemExit)
assert (pytest_wrapped_e.value.code == 2) |
def extractScarletMadness(item):
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())):
return None
tagmap = [('Kanna The Godless; The Summoned Heretic Is A Scenario Breaker', 'Kanna no Kanna Itanshoukansha wa Scenario Breaker', 'translated'), ('When I was going out from my house to stop become a Hiki-NEET after 10 years I was transported to another world', 'When I was going out from my house to stop become a Hiki-NEET after 10 years I was transported to another world', 'translated'), ('A Second Time for an Otherworld Summoning', 'A Second Time for an Otherworld Summoning', 'translated'), ('Blessing From the Goddess and Transfer to Another World! ~No Thanks, I Already Have a Special Power~', 'Blessing From the Goddess and Transfer to Another World! ~No Thanks, I Already Have a Special Power~', 'translated'), ('Blessing from the goddess and transfer to another world! ~No thanks, I already have special power~', 'Blessing From the Goddess and Transfer to Another World! ~No Thanks, I Already Have a Special Power~', 'translated'), ('Blessing From the Goddess and Transfer to Another World! ~No Thanks, I Already Have Special Powers~', 'Blessing From the Goddess and Transfer to Another World! ~No Thanks, I Already Have a Special Power~', 'translated'), ("Corporate Slave Hero says He's Quitting his Job", "Corporate Slave Hero says He's Quitting his Job", 'translated')]
for (tagname, name, tl_type) in tagmap:
if (tagname in item['tags']):
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
class TestDemoImageFile(unittest.TestCase):
def test_description_contains_file_uri(self):
with tempfile.NamedTemporaryFile() as file_obj:
(dirname, basename) = os.path.split(file_obj.name)
parent = DemoPath(name=dirname)
image_node = DemoImageFile(parent=parent, name=basename)
image_node.init()
tree = ET.fromstring(image_node.description)
img_xml = next(tree.iter(get_html_tag('img')))
self.assertEqual(img_xml.attrib['src'], basename)
self.assertEqual(image_node.base_url, dirname) |
def send_recognized_voice(user_id, file, duration):
if (duration <= 60):
recognized_text = recognize(file.read())
subject = ('Voice: {}'.format(get_subject(recognized_text)) if recognized_text else 'Voice note to self')
else:
recognized_text = ''
subject = 'Voice note to self'
send_file(user_id=user_id, file=file, filename='voice.oga', subject=subject, text=recognized_text) |
class VariantTestCase(unittest.TestCase):
('This fails with a memory leak. Figure out if false positive.')
def test_VARIANT_array(self):
v = VARIANT()
v.value = ((1, 2, 3), ('foo', 'bar', None))
self.assertEqual(v.vt, (VT_ARRAY | VT_VARIANT))
self.assertEqual(v.value, ((1, 2, 3), ('foo', 'bar', None)))
def func():
VARIANT((1, 2, 3), ('foo', 'bar', None))
bytes = find_memleak(func)
self.assertFalse(bytes, ('Leaks %d bytes' % bytes))
('This fails with a memory leak. Figure out if false positive.')
def test_double_array(self):
a = array.array('d', (3.14, 2.78))
v = VARIANT(a)
self.assertEqual(v.vt, (VT_ARRAY | VT_R8))
self.assertEqual(tuple(a.tolist()), v.value)
def func():
VARIANT(array.array('d', (3.14, 2.78)))
bytes = find_memleak(func)
self.assertFalse(bytes, ('Leaks %d bytes' % bytes))
def test_float_array(self):
a = array.array('f', (3.14, 2.78))
v = VARIANT(a)
self.assertEqual(v.vt, (VT_ARRAY | VT_R4))
self.assertEqual(tuple(a.tolist()), v.value)
def test_2dim_array(self):
data = ((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
v = VARIANT(data)
self.assertEqual(v.value, data) |
def set_chatroom_name(self, chatroomUserName, name):
url = ('%s/webwxupdatechatroom?fun=modtopic&pass_ticket=%s' % (self.loginInfo['url'], self.loginInfo['pass_ticket']))
data = {'BaseRequest': self.loginInfo['BaseRequest'], 'ChatRoomName': chatroomUserName, 'NewTopic': name}
headers = {'content-type': 'application/json; charset=UTF-8', 'User-Agent': self.user_agent}
r = self.s.post(url, headers=headers, data=json.dumps(data, ensure_ascii=False).encode('utf8', 'ignore'))
return ReturnValue(rawResponse=r) |
class SimpleEditor(CustomEditor):
orientation = wx.HORIZONTAL
extra = 2
_dialog_ui = Instance('traitsui.ui.UI')
def create_editor(self, parent, sizer):
self._button = button = wx.Button(parent, (- 1), '')
sizer.Add(button, 1, (wx.EXPAND | wx.LEFT), 5)
button.Bind(wx.EVT_BUTTON, self.edit_instance, id=button.GetId())
def dispose(self):
button = self._button
if (button is not None):
button.Bind(wx.EVT_BUTTON, None, id=button.GetId())
if (self._dialog_ui is not None):
self._dialog_ui.dispose()
self._dialog_ui = None
super().dispose()
def edit_instance(self, event):
factory = self.factory
view = self.ui.handler.trait_view_for(self.ui.info, factory.view, self.value, self.object_name, self.name)
ui = self.value.edit_traits(view, self.control, factory.kind, id=factory.id)
if (ui.control is not None):
position_window(ui.control)
if (ui.history is None):
ui.history = self.ui.history
self._dialog_ui = ui
def resynch_editor(self):
button = self._button
if (button is not None):
label = self.factory.label
if (label == ''):
label = user_name_for(self.name)
button.SetLabel(label)
button.Enable(isinstance(self.value, HasTraits)) |
class ColumnDriftParameter(ExcludeNoneMixin, TestParameters):
stattest: str
score: float
threshold: float
detected: bool
column_name: Optional[str] = None
def from_metric(cls, data: ColumnDataDriftMetrics, column_name: str=None):
return ColumnDriftParameter(stattest=data.stattest_name, score=np.round(data.drift_score, 3), threshold=data.stattest_threshold, detected=data.drift_detected, column_name=column_name) |
.parametrize('python_version_enum, expected_image_string', [(PythonVersion.PYTHON_3_8, 'cr.flyte.org/flyteorg/flytekit:py3.8-latest'), (PythonVersion.PYTHON_3_9, 'cr.flyte.org/flyteorg/flytekit:py3.9-latest'), (PythonVersion.PYTHON_3_10, 'cr.flyte.org/flyteorg/flytekit:py3.10-latest')])
def test_defaults(python_version_enum, expected_image_string):
assert (DefaultImages.find_image_for(python_version_enum) == expected_image_string) |
def send_email_ticket_sales_end_next_week(event, emails):
action = MailType.TICKET_SALES_END_NEXT_WEEK
mail = MAILS[action]
settings = get_settings()
tickets = []
for ticket in event.tickets:
if (ticket.sales_ends_at.date() == (datetime.date.today() - datetime.timedelta(days=(- 7)))):
tickets.append(ticket.name)
ticket_names = ', '.join(tickets)
event_dashboard = ((settings['frontend_url'] + '/events/') + event.identifier)
if (len(emails) > 0):
send_email(to=emails[0], action=action, subject=mail['subject'].format(event_name=event.name), html=render_template(mail['template'], settings=settings, event_dashboard=event_dashboard, event_name=event.name, ticket_names=ticket_names), bcc=emails[1:], reply_to=emails[(- 1)]) |
class Definition(Node):
def __init__(self):
super().__init__()
self.n_docstring = None
def set_docstring(self, n_docstring):
assert isinstance(n_docstring, Docstring)
self.n_docstring = n_docstring
self.n_docstring.set_parent(self)
def get_local_name(self):
raise ICE('get_local_name not implemented') |
class ExportPanel(QWidget):
updateExportButton = Signal(str, bool)
runExport = Signal(dict)
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setMinimumWidth(500)
self.setMinimumHeight(200)
self._dynamic = False
self.setWindowTitle('Export data')
self.activateWindow()
layout = QFormLayout()
self._column_keys_input = QLineEdit()
self._column_keys_input.setMinimumWidth(250)
self._column_keys_input.setText('*')
layout.addRow('Columns to export:', self._column_keys_input)
self._time_index_input = QLineEdit()
self._time_index_input.setMinimumWidth(250)
self._time_index_input.setText('raw')
layout.addRow('Time index:', self._time_index_input)
file_name_button = QToolButton()
file_name_button.setText('Browse')
file_name_button.clicked.connect(self.selectFileDirectory)
self._defaultPath = (QDir.currentPath() + '/export.csv')
self._file_name = QLineEdit()
self._file_name.setEnabled(False)
self._file_name.setText(self._defaultPath)
self._file_name.setMinimumWidth(250)
file_name_layout = QHBoxLayout()
file_name_layout.addWidget(self._file_name)
file_name_layout.addWidget(file_name_button)
layout.addRow('Select directory to save files to:', file_name_layout)
self.setLayout(layout)
def selectFileDirectory(self):
directory = QFileDialog(self).getExistingDirectory(self, 'Directory', self._file_name.text(), QFileDialog.ShowDirsOnly)
if (len(str(directory)) > 0):
self._file_name.setText(str(directory))
def export(self):
path = self._file_name.text()
time_index = self._time_index_input.text()
column_keys = self._column_keys_input.text()
values = {'output_file': path, 'time_index': time_index, 'column_keys': column_keys}
self.runExport.emit(values) |
class SocketCalc(Calculator):
valid_requests = ('energy', 'forces', 'hessian')
def __init__(self, *args, host='localhost', port=8080, **kwargs):
super().__init__(*args, **kwargs)
self.port = port
self.host = host
def listen_for(self, atoms, coords, request):
request = request.lower()
assert (request.lower() in self.valid_requests), f"Invalid request '{request}'! Valid requests are '{self.valid_requests}'."
request_for = {'atoms': atoms, 'coords': coords.tolist(), 'request': request}
request_for = json.dumps(request_for).encode('utf-8')
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.host, self.port))
sock.listen(0)
while True:
(client, address) = sock.accept()
with client:
self.log(f'Got connection from {address}')
client.sendall(request_for)
to_json = b''
while True:
data = client.recv(1024)
to_json += data
if (b'\n' in data):
self.log('Found linebreaking. Stop listening.')
break
results = dict()
try:
to_json.decode('utf-8')
results = json.loads(to_json)
except json.JSONDecodeError:
self.log('JSON decode error')
if (('energy' in results) and (request in results)):
self.log('All required fields are present in the received JSON. Breaking.')
break
else:
self.log('Could not parse received data as JSON!')
sock.close()
results = {key: results[key] for key in ('energy', request)}
if ('forces' in results):
results['forces'] = np.array(results['forces'], dtype=float)
if ('hessian' in results):
results['hessian'] = np.array(results['hessian'], dtype=float).reshape((- 1), (3 * len(atoms)))
return results
def get_energy(self, atoms, coords):
result = self.listen_for(atoms, coords, 'energy')
return result
def get_forces(self, atoms, coords):
result = self.listen_for(atoms, coords, 'forces')
return result
def get_hessian(self, atoms, coords):
result = self.listen_for(atoms, coords, 'hessian')
return result |
def all_platforms_message():
message = messaging.Message(notification=messaging.Notification(title='$GOOG up 1.43% on the day', body='$GOOG gained 11.80 points to close at 835.67, up 1.43% on the day.'), android=messaging.AndroidConfig(ttl=datetime.timedelta(seconds=3600), priority='normal', notification=messaging.AndroidNotification(icon='stock_ticker_update', color='#f45342')), apns=messaging.APNSConfig(payload=messaging.APNSPayload(aps=messaging.Aps(badge=42))), topic='industry-tech')
return message |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.