code
stringlengths
281
23.7M
class StakingRestClient(Staking): API_URL = '/cosmos/staking/v1beta1' def __init__(self, rest_api: RestClient) -> None: self._rest_api = rest_api def Validators(self, request: QueryValidatorsRequest) -> QueryValidatorsResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators', request) return Parse(json_response, QueryValidatorsResponse()) def Validator(self, request: QueryValidatorRequest) -> QueryValidatorResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators/{request.validator_addr}') return Parse(json_response, QueryValidatorResponse()) def ValidatorDelegations(self, request: QueryValidatorDelegationsRequest) -> QueryValidatorDelegationsResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators/{request.validator_addr}/delegations', request, ['validatorAddr']) return Parse(json_response, QueryValidatorDelegationsResponse()) def ValidatorUnbondingDelegations(self, request: QueryValidatorUnbondingDelegationsRequest) -> QueryValidatorUnbondingDelegationsResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators/{request.validator_addr}/unbonding_delegations', request, ['validatorAddr']) return Parse(json_response, QueryValidatorUnbondingDelegationsResponse()) def Delegation(self, request: QueryDelegationRequest) -> QueryDelegationResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators/{request.validator_addr}/delegations/{request.delegator_addr}') return Parse(json_response, QueryDelegationResponse()) def UnbondingDelegation(self, request: QueryUnbondingDelegationRequest) -> QueryUnbondingDelegationResponse: json_response = self._rest_api.get(f'{self.API_URL}/validators/{request.validator_addr}/delegations/{request.delegator_addr}/unbonding_delegation') return Parse(json_response, QueryUnbondingDelegationResponse()) def DelegatorDelegations(self, request: QueryDelegatorDelegationsRequest) -> QueryDelegatorDelegationsResponse: json_response = self._rest_api.get(f'{self.API_URL}/delegations/{request.delegator_addr}', request, ['delegatorAddr']) return Parse(json_response, QueryDelegatorDelegationsResponse()) def DelegatorUnbondingDelegations(self, request: QueryDelegatorUnbondingDelegationsRequest) -> QueryDelegatorUnbondingDelegationsResponse: json_response = self._rest_api.get(f'{self.API_URL}/delegators/{request.delegator_addr}/unbonding_delegations', request, ['delegatorAddr']) return Parse(json_response, QueryDelegatorUnbondingDelegationsResponse()) def Redelegations(self, request: QueryRedelegationsRequest) -> QueryRedelegationsResponse: json_response = self._rest_api.get(f'{self.API_URL}/delegators/{request.delegator_addr}/redelegations', request, ['delegatorAddr']) return Parse(json_response, QueryRedelegationsResponse()) def DelegatorValidators(self, request: QueryDelegatorValidatorsRequest) -> QueryDelegatorValidatorsResponse: json_response = self._rest_api.get(f'{self.API_URL}/delegators/{request.delegator_addr}/validators', request, ['delegatorAddr']) return Parse(json_response, QueryDelegatorValidatorsResponse()) def DelegatorValidator(self, request: QueryDelegatorValidatorRequest) -> QueryDelegatorValidatorResponse: json_response = self._rest_api.get(f'{self.API_URL}/delegators/{request.delegator_addr}/validators/{request.validator_addr}') return Parse(json_response, QueryDelegatorValidatorResponse()) def HistoricalInfo(self, request: QueryHistoricalInfoRequest) -> QueryHistoricalInfoResponse: json_response = self._rest_api.get(f'{self.API_URL}/historical_info/{request.height}') return Parse(json_response, QueryHistoricalInfoResponse()) def Pool(self, request: QueryPoolRequest) -> QueryPoolResponse: json_response = self._rest_api.get(f'{self.API_URL}/pool') return Parse(json_response, QueryPoolResponse()) def Params(self, request: QueryParamsRequest) -> QueryParamsResponse: json_response = self._rest_api.get(f'{self.API_URL}/params') return Parse(json_response, QueryParamsResponse())
def test_fill_config_overrides(): config = {'cfg': {'one': 1, 'two': {'three': {'': 'catsie.v1', 'evil': True, 'cute': False}}}} overrides = {'cfg.two.three.evil': False} result = my_registry.fill(config, overrides=overrides, validate=True) assert (result['cfg']['two']['three']['evil'] is False) overrides = {'cfg.two.three': 3} result = my_registry.fill(config, overrides=overrides, validate=True) assert (result['cfg']['two']['three'] == 3) overrides = {'cfg': {'one': {'': 'catsie.v1', 'evil': False}, 'two': None}} result = my_registry.fill(config, overrides=overrides) assert (result['cfg']['two'] is None) assert (result['cfg']['one'][''] == 'catsie.v1') assert (result['cfg']['one']['evil'] is False) assert (result['cfg']['one']['cute'] is True) with pytest.raises(ConfigValidationError): overrides = {'cfg.two.three.evil': 20} my_registry.fill(config, overrides=overrides, validate=True) with pytest.raises(ConfigValidationError): overrides = {'cfg': {'one': {'': 'catsie.v1'}, 'two': None}} my_registry.fill(config, overrides=overrides) with pytest.raises(ConfigValidationError): overrides = {'cfg.two.three.evil': False, 'two.four': True} my_registry.fill(config, overrides=overrides, validate=True) with pytest.raises(ConfigValidationError): overrides = {'cfg.five': False} my_registry.fill(config, overrides=overrides, validate=True)
def parse_css(cspace: 'Space', string: str, start: int=0, fullmatch: bool=True, color: bool=False) -> Optional[Tuple[(Tuple[(Vector, float)], int)]]: target = cspace.SERIALIZE if (not target): target = (cspace.NAME,) tokens = tokenize_css(string, start=start) if (not tokens): return None if (tokens['id'] not in target): return None if (fullmatch and (tokens['end'] < len(string))): return None end = tokens['end'] if (('func' in tokens) and (tokens['func']['name'] == 'color')): if (color is False): return None result = parse_color(tokens, cspace) if (result is None): return result return (result, end) elif (tokens['id'] == 'srgb'): if ('hex' in tokens): return (parse_hex(tokens['hex']['value']), end) elif ('name' in tokens): values = color_names.from_name(tokens['name']['color']) return ((values[:(- 1)], values[(- 1)]), end) else: return (parse_rgb_channels([v['value'] for v in tokens['func']['values']], cspace.CHANNELS), end) elif (tokens['id'] in ('--hsl', '--hwb')): return (parse_channels([v['value'] for v in tokens['func']['values']], cspace.CHANNELS, scaled=True), end) else: return (parse_channels([v['value'] for v in tokens['func']['values']], cspace.CHANNELS), end)
def test_revert_clears_reverted_journal_entries(journal_db): journal_db.set(b'1', b'test-a') assert (journal_db.get(b'1') == b'test-a') checkpoint_a = journal_db.record() journal_db.set(b'1', b'test-b') journal_db.delete(b'1') journal_db.set(b'1', b'test-c') assert (journal_db.get(b'1') == b'test-c') checkpoint_b = journal_db.record() journal_db.set(b'1', b'test-d') journal_db.delete(b'1') journal_db.set(b'1', b'test-e') assert (journal_db.get(b'1') == b'test-e') journal_db.discard(checkpoint_b) assert (journal_db.get(b'1') == b'test-c') journal_db.delete(b'1') assert (journal_db.exists(b'1') is False) journal_db.discard(checkpoint_a) assert (journal_db.get(b'1') == b'test-a')
def query_to_parameters(endpoint_method): params_to_not_look_for = {'self', 'args', 'kwargs'} (endpoint_method) def convert_query_parameters_of_endpoint_method(self, *args, **kwargs): kwargs = _convert_query_params(endpoint_method, params_to_not_look_for, **kwargs) return endpoint_method(self, *args, **kwargs) return convert_query_parameters_of_endpoint_method
def init_wordmap(): wordmap = {'upos': None, 'lemma': None, 'homonym': 0, 'new_para': None, 'kotus_tn': None, 'kotus_av': None, 'plurale_tantum': None, 'possessive': None, 'clitics': None, 'is_proper': None, 'proper_noun_class': None, 'style': None, 'stub': None, 'gradestem': None, 'twolstem': None, 'grade_dir': None, 'harmony': None, 'is_suffix': None, 'is_prefix': None, 'stem_vowel': None, 'stem_diphthong': None, 'sem': None, 'particle': None, 'pronunciation': None, 'boundaries': None, 'bracketstub': None, 'origin': None, 'extra_i': False, 'extra_e': False, 'real_pos': None, 'symbol': None, 'argument': None, 'pronoun': None, 'abbr': None, 'lex': None, 'numtype': None, 'prontype': None, 'adptype': None, 'homonym': 0, 'blacklist': None, 'pos': None, 'deletion': None, 'suffix_regex': None} return wordmap
class slice_reshape_scatter(Operator): def is_valid(cat_op: Operator, reshape_op: Operator, cat_op_2: Operator) -> bool: assert (cat_op._attrs['op'] == 'concatenate') assert (reshape_op._attrs['op'] == 'reshape') assert cat_op_2._attrs['op'].startswith('concatenate') cat_dim = cat_op._attrs['concat_dim'] if (cat_dim != cat_op_2._attrs['concat_dim']): return False cat_output_shape = cat_op._attrs['outputs'][0]._attrs['shape'] cat_output_rank = len(cat_output_shape) if (cat_output_rank <= 1): return False cat_output_shape_2 = cat_op_2._attrs['outputs'][0]._attrs['shape'] cat_output_rank_2 = len(cat_output_shape_2) if (cat_dim != (cat_output_rank - 1)): return False if (cat_output_rank >= cat_output_rank_2): return False if (not all(((d1._attrs['values'][0] == d2._attrs['values'][0]) for (d1, d2) in zip(cat_output_shape[:cat_dim], cat_output_shape_2[:cat_dim])))): return False reshape_to_shape = reshape_op._attrs['outputs'][0]._attrs['shape'] if (not all((isinstance(d, (IntImm, IntVar)) for d in reshape_to_shape))): return False if (not all(((d1._attrs['values'][0] == d2._attrs['values'][0]) for (d1, d2) in zip(cat_output_shape[:cat_dim], reshape_to_shape[:cat_dim])))): return False return all((((x._attrs['src_ops'] is not None) and (len(x._attrs['src_ops']) == 1) and (list(x._attrs['src_ops'])[0]._attrs['op'] == 'dynamic_slice')) for x in cat_op._attrs['inputs'])) def _update_inputs_outputs(self, cat_op, reshape_op, cat_op_2): from aitemplate.compiler.transform import transform_utils idx = (- 1) for (i, input_tensor) in enumerate(cat_op_2._attrs['inputs']): if (input_tensor == reshape_op._attrs['outputs'][0]): idx = i break assert (idx >= 0) self._attrs['output_accessors'] = [TensorAccessor(reshape_op._attrs['outputs'][0])] cat_op_2.remove_input_at(idx) transform_utils.remove_single_tensor_op_from_sorted_graph(reshape_op) self._attrs['inputs'] = [op._attrs['inputs'][0] for op in self._attrs['slice_ops']] cat_op_2_outputs = cat_op_2._attrs['outputs'] assert (len(cat_op_2_outputs) == 1), f"""cat_op_2._attrs["name"]={cat_op_2._attrs['name']!r} may only have one output, but got more cat_op_2_outputs={cat_op_2_outputs!r}""" self._attrs['outputs'] = cat_op_2_outputs offset = 0 cat_dim = cat_op_2._attrs['concat_dim'] orig_idx = (- 1) for (i, input_tensor) in enumerate(cat_op_2._attrs['original_inputs']): if (input_tensor == reshape_op._attrs['outputs'][0]): orig_idx = i break input_tensor_shape = input_tensor._attrs['shape'] offset += input_tensor_shape[cat_dim].value() assert (orig_idx >= 0), f"""could not find input_tensor._attrs["name"]={input_tensor._attrs['name']!r} in the original_inputsof cat_op_2""" self._attrs['output_accessors'][0].update_base_tensor(cat_op_2_outputs[0], cat_dim, offset) for x in self._attrs['inputs']: x._attrs['dst_ops'].add(self) for y in self._attrs['outputs']: y._attrs['src_ops'].add(self) for op in self._attrs['slice_ops']: op._attrs['outputs'][0]._attrs['src_ops'] = StableSet() op._attrs['outputs'][0]._attrs['dst_ops'] = StableSet() for x in cat_op._attrs['inputs']: x._attrs['src_ops'] = StableSet() x._attrs['dst_ops'] = StableSet() for y in cat_op._attrs['outputs']: y._attrs['src_ops'] = StableSet() y._attrs['dst_ops'] = StableSet() def __init__(self, scatter_dim: int, element_func: Optional[str]=None) -> None: super().__init__() self._attrs['element_func'] = element_func self._attrs['op'] = 'slice_reshape_scatter' self._attrs['has_profiler'] = False self._attrs['scatter_dim'] = scatter_dim def make_op(cat_op: Operator, reshape_op: Operator, cat_op_2: Operator) -> Operator: assert slice_reshape_scatter.is_valid(cat_op, reshape_op, cat_op_2) element_func = None if (cat_op_2._attrs['op'] == 'concatenate_tanh'): element_func = 'fast_tanh' scatter_dim = cat_op._attrs['concat_dim'] new_op = slice_reshape_scatter(scatter_dim, element_func) slice_ops = [] for x in cat_op._attrs['inputs']: src_ops = x.src_ops() assert (len(src_ops) == 1) slice_op = list(src_ops)[0] slice_ops.append(slice_op) new_op._attrs['slice_ops'] = slice_ops new_op._update_inputs_outputs(cat_op, reshape_op, cat_op_2) new_op._set_depth() return new_op def __call__(self): raise RuntimeError('op {} cannot be called directly'.format(self._attrs['op'])) def _get_func(self, fmt_str): target = backend.target.Target.current() func_key = fmt_str.format(target=target.name(), op=self._attrs['op']) return registry.get(func_key) def gen_function(self) -> str: func = self._get_func('{target}.{op}.gen_function') return func(self._attrs, self._attrs['element_func'])
class MeArticleListView(viewsets.ReadOnlyModelViewSet): queryset = Article.objects.filter(is_show=True).order_by('-add_time') serializer_class = ArticleSerializer pagination_class = StandardResultsSetPagination filter_backends = (DjangoFilterBackend,) filter_class = ArticleFilter permission_classes = (IsAuthenticated, IsOwnerOr) authentication_classes = [JSONWebTokenAuthentication] def get_queryset(self): return Article.objects.filter(authors_id=self.request.user.id).filter(is_show=True).order_by('-add_time')
class ServeRequest(BaseModel): class Config(): title = f'ServeRequest for {SERVE_APP_NAME_HUMP}' chat_scene: Optional[str] = Field(None, description='The chat scene, e.g. chat_with_db_execute, chat_excel, chat_with_db_qa.', examples=['chat_with_db_execute', 'chat_excel', 'chat_with_db_qa']) sub_chat_scene: Optional[str] = Field(None, description='The sub chat scene.', examples=['sub_scene_1', 'sub_scene_2', 'sub_scene_3']) prompt_type: Optional[str] = Field(None, description='The prompt type, either common or private.', examples=['common', 'private']) prompt_name: Optional[str] = Field(None, description='The prompt name.', examples=['code_assistant', 'joker', 'data_analysis_expert']) content: Optional[str] = Field(None, description='The prompt content.', examples=['Write a qsort function in python', 'Tell me a joke about AI', 'You are a data analysis expert.']) prompt_desc: Optional[str] = Field(None, description='The prompt description.', examples=['This is a prompt for code assistant.', 'This is a prompt for joker.', 'This is a prompt for data analysis expert.']) user_name: Optional[str] = Field(None, description='The user name.', examples=['zhangsan', 'lisi', 'wangwu']) sys_code: Optional[str] = Field(None, description='The system code.', examples=['dbgpt', 'auth_manager', 'data_platform'])
_decorator(removed, name='list') class SubmissionAttributesViewSet(FilterQuerysetMixin, CachedDetailViewSet): serializer_class = SubmissionAttributesSerializer def get_queryset(self): queryset = SubmissionAttributes.objects.all() queryset = self.serializer_class.setup_eager_loading(queryset) filtered_queryset = self.filter_records(self.request, queryset=queryset) ordered_queryset = self.order_records(self.request, queryset=filtered_queryset) return ordered_queryset
def show_in_file_explorer(path: str) -> bool: if (sys.platform == 'win32'): args = [] if (not os.path.isdir(path)): args.append('/select,') args.append(QDir.toNativeSeparators(path)) QProcess.startDetached('explorer', args) elif (sys.platform == 'darwin'): args = ['-e', 'tell application "Finder"', '-e', 'activate', '-e', ('select POSIX file "%s"' % path), '-e', 'end tell', '-e', 'return'] QProcess.execute('/usr/bin/osascript', args)
class Solution(): def minStoneSum(self, piles: List[int], k: int) -> int: ps = [(- a) for a in piles] heapq.heapify(ps) while (k > 0): curr = (- heapq.heappop(ps)) curr = (curr - (curr // 2)) heapq.heappush(ps, (- curr)) k -= 1 return sum([(- a) for a in ps])
def get_hit_plots(legend, event): if ((legend is None) or (not legend.is_in(event.x, event.y))): return [] try: label = legend.get_label_at(event.x, event.y) except: raise label = None if (label is None): return [] try: ndx = legend._cached_labels.index(label) label_name = legend._cached_label_names[ndx] renderers = legend.plots[label_name] return _ensure_list(renderers) except (ValueError, KeyError): return []
def find_missing_dependencies(func: Callable, env: dict) -> Iterator[tuple[(str, list[str])]]: if (env['kind'] != 'virtualenv'): return used_modules = defaultdict(list) scope = {**dill.detect.globalvars(func, recurse=True), **dill.detect.freevars(func)} for (name, obj) in scope.items(): if isinstance(obj, IsolatedFunction): used_modules['fal'].append(name) continue module = inspect.getmodule(obj) if (module is None): continue possible_package = getattr(module, '__package__', None) if possible_package: (pkg_name, *_) = possible_package.split('.') else: pkg_name = module.__name__ used_modules[canonicalize_name(pkg_name)].append(name) raw_requirements = env.get('requirements', []) specified_requirements = set() for raw_requirement in raw_requirements: try: requirement = Requirement(raw_requirement) except ValueError: continue specified_requirements.add(canonicalize_name(requirement.name)) for (module_name, used_names) in used_modules.items(): if (module_name in specified_requirements): continue (yield (module_name, used_names))
class OFPBundleFeaturesStats(ofproto_parser.namedtuple('OFPBundleFeaturesStats', ('capabilities', 'properties'))): def parser(cls, buf, offset): (capabilities,) = struct.unpack_from(ofproto.OFP_BUNDLE_FEATURES_PACK_STR, buf, offset) properties = [] length = ofproto.OFP_BUNDLE_FEATURES_SIZE rest = buf[(offset + length):] while rest: (p, rest) = OFPBundleFeaturesProp.parse(rest) properties.append(p) length += p.length bndl = cls(capabilities, properties) bndl.length = length return bndl
class TestUsage(): ('bodhi.server.scripts.initializedb.sys.exit') ('sys.stdout', new_callable=StringIO) def test_usage(self, stdout, exit): initializedb.usage(['initializedb']) assert (stdout.getvalue() == 'usage: initializedb <config_uri>\n(example: "initializedb development.ini")\n') exit.assert_called_once_with(1)
.requires_eclipse .usefixtures('use_tmpdir', 'init_eclrun_config') def test_summary_block(source_root): shutil.copy(os.path.join(source_root, 'test-data/eclipse/SPE1.DATA'), 'SPE1.DATA') econfig = ecl_config.Ecl100Config() erun = ecl_run.EclRun('SPE1.DATA', None) ret_value = erun.summary_block() assert (ret_value is None) erun.runEclipse(eclrun_config=ecl_config.EclrunConfig(econfig, '2019.3')) ecl_sum = erun.summary_block() assert isinstance(ecl_sum, Summary)
_blueprint.route('/flags/<flag_id>/set/<state>', methods=['POST']) _required def set_flag_state(flag_id, state): if (not is_admin()): flask.abort(401) if (state not in ('open', 'closed')): flask.abort(422) flag = models.ProjectFlag.get(Session, flag_id) if (not flag): flask.abort(404) form = anitya.forms.ConfirmationForm() if form.validate_on_submit(): try: utilities.set_flag_state(Session, flag=flag, state=state, user_id=flask.g.user.username) flask.flash(f'Flag {flag.id} set to {state}') except anitya.lib.exceptions.AnityaException as err: flask.flash(str(err), 'errors') return flask.redirect(flask.url_for('anitya_ui.browse_flags'))
.django_db def test_federal_account_loans_invalid_defc(client, generic_account_data, helpers, elasticsearch_account_index, monkeypatch): setup_elasticsearch_test(monkeypatch, elasticsearch_account_index) resp = helpers.post_for_spending_endpoint(client, url, def_codes=['ZZ']) assert (resp.status_code == status.HTTP_400_BAD_REQUEST) assert (resp.data['detail'] == "Field 'filter|def_codes' is outside valid values ['9', 'A', 'L', 'M', 'N', 'O', 'P']")
class TestBoolean(): def test_accepted_text(self): for text in ('yes', 'y', 'on', 'true', 't', '1'): assert boolean(text) assert boolean(text.upper()) for text in ('no', 'n', 'off', 'false', 'f', '0'): assert (not boolean(text)) assert (not boolean(text.upper())) .parametrize('text', ('a', 'b')) def test_unaccepted_text(self, text): with pytest.raises(ValueError): boolean(text) def test_nonstring(self): for obj in (10, [1], {1: 1}): assert boolean(obj) for obj in (0, [], {}): assert (not boolean(obj))
class XiAiNovelPageProcessor(HtmlProcessor.HtmlPageProcessor): wanted_mimetypes = ['text/html'] want_priority = 80 loggerPath = 'Main.Text.XiAiNovel' def wantsUrl(url): if re.search('^ url, flags=re.IGNORECASE): print(("XiAiNovel Wants url: '%s'" % url)) return True return False def preprocessBody(self, soup): badspans = soup.find_all('span', style=re.compile('color\\W?:\\W?#ffffff', re.I)) for bad in badspans: bad.decompose() return soup
class LinkedList(): def __init__(self, values=None): values = (values if (values is not None) else []) self._head = None self._len = 0 for value in values: self.push(value) def __iter__(self): return LinkedIterator(self) def __len__(self): return self._len def head(self): if (self._head is None): raise EmptyListException('The list is empty.') return self._head def push(self, value): new_node = Node(value) new_node._next = self._head self._head = new_node self._len += 1 def pop(self): if (self._head is None): raise EmptyListException('The list is empty.') self._len -= 1 ret = self._head.value() self._head = self._head.next() return ret def reversed(self): return LinkedList(self)
def _makeCostSavingMeasureValues(measure, practice, savings): for i in range(len(savings)): month = (datetime.today() + relativedelta(months=i)) MeasureValue.objects.create(measure=measure, practice=practice, percentile=0.5, cost_savings={'10': (savings[i] * 0.1), '50': savings[i], '90': (savings[i] * 100)}, month=month)
def test_secret_no_group(monkeypatch): plugin_mock = Mock() plugin_mock.secret_requires_group.return_value = False mock_global_plugin = {'plugin': plugin_mock} monkeypatch.setattr(flytekit.configuration.plugin, '_GLOBAL_CONFIG', mock_global_plugin) s = Secret(key='key') assert (s.group is None)
def GetReposAndCurrBranch(params, verbose=True): repos_and_curr_branch = [] def OnOutput(output): stdout = output.stdout.strip() if stdout: repos_and_curr_branch.append((output.repo, stdout)) elif verbose: Print(('Unable to update (could not get current branch for: %s)' % (output.repo,))) from .action_default import Run from mu_repo import Params old_serial = params.config.serial params.config.serial = False Run(Params(params.config, ['rev-parse', '--abbrev-ref', 'HEAD'], params.config_file), on_output=OnOutput) if verbose: branch_to_repos = {} for (repo, branch) in repos_and_curr_branch: branch_to_repos.setdefault(branch, []).append(repo) for (branch, repos) in iteritems(branch_to_repos): Print(('Will handle ${START_COLOR}origin %s${RESET_COLOR} for: %s\n' % (branch, ', '.join(sorted(repos))))) params.config.serial = old_serial return repos_and_curr_branch
def test_bool_convert(): converter = DataType.boolean.value assert (converter.to_value(1) == True) assert (converter.to_value(0) == False) assert (converter.to_value('True') == True) assert (converter.to_value('False') == False) assert (converter.to_value('NOT A BOOLEAN ') is None)
def test_get_latencies_sample(session, request_1, endpoint): interval = DateInterval((datetime.utcnow() - timedelta(days=1)), datetime.utcnow()) requests_criterion = create_time_based_sample_criterion(interval.start_date(), interval.end_date()) data = get_latencies_sample(session, endpoint.id, requests_criterion, sample_size=500) assert (data == [request_1.duration])
class Bridge(): m = {0: {'python': python, 'open': open, 'fileImport': fileImport, 'eval': eval, 'exec': exec, 'setattr': setattr, 'getattr': getattr, 'Iterate': Iterate, 'tuple': tuple, 'set': set, 'enumerate': enumerate, 'repr': repr}} weakmap = WeakValueDictionary() cur_ffid = 0 def __init__(self, ipc): self.ipc = ipc self.m[0]['sendInspect'] = (lambda x: setattr(self, 'send_inspect', x)) self.send_inspect = True self.q = (lambda r, key, val, sig='': self.ipc.queue({'r': r, 'key': key, 'val': val, 'sig': sig})) self.executor = Executor(self) setattr(os, 'JSPyBridge', Proxy(self.executor, 0)) def assign_ffid(self, what): self.cur_ffid += 1 self.m[self.cur_ffid] = what return self.cur_ffid def make_class(this, name, proxy, bases, overriden): def init(self): for (base_ffid, baseArgs, baseKwargs) in bases: base = this.m[base_ffid] base.__init__(self, *baseArgs, **baseKwargs) def getAttribute(self, attr): if attr.startswith('__'): return object.__getattribute__(self, attr) if attr.startswith('~~'): return super(clas, self).__getattribute__(attr[2:]) if (attr in overriden): return getattr(proxy, attr) return super(clas, self).__getattribute__(attr) def setAttr(self, attr, val): super(clas, self).__setattr__(attr, val) object.__setattr__(self, attr, val) base_classes = [] for (base_ffid, a, kw) in bases: base = this.m[base_ffid] base_classes.append(base) claz = type(base_classes[0]) clas = type(name, tuple(base_classes), {'__init__': init, '__getattribute__': getAttribute, '__setattr__': setAttr}) inst = clas() setattr(proxy, '~class', inst) return inst def makeclass(self, r, ffid, key, params): self.cur_ffid += 1 js_ffid = self.cur_ffid proxy = Proxy(self.executor, js_ffid) self.m[js_ffid] = proxy inst = self.make_class(params['name'], proxy, params['bases'], params['overriden']) py_ffid = self.assign_ffid(inst) self.q(r, 'inst', [js_ffid, py_ffid]) def length(self, r, ffid, keys, args): v = self.m[ffid] for key in keys: if (type(v) in (dict, tuple, list)): v = v[key] elif hasattr(v, str(key)): v = getattr(v, str(key)) elif hasattr(v, '__getitem__'): try: v = v[key] except: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") else: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") l = len(v) self.q(r, 'num', l) def init(self, r, ffid, key, args): v = self.m[ffid](*args) ffid = self.assign_ffid(v) self.q(r, 'inst', ffid) def call(self, r, ffid, keys, args, kwargs, invoke=True): v = self.m[ffid] if invoke: for key in keys: t = getattr(v, str(key), None) if t: v = t elif hasattr(v, '__getitem__'): try: v = v[key] except: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") else: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") else: for key in keys: if (type(v) in (dict, tuple, list)): v = v[key] elif hasattr(v, str(key)): v = getattr(v, str(key)) elif hasattr(v, '__getitem__'): try: v = v[key] except: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") else: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") was_class = False if invoke: if inspect.isclass(v): was_class = True v = v(*args, **kwargs) typ = type(v) if (typ is str): self.q(r, 'string', v) return if ((typ is int) or (typ is float) or (v is None) or (v is True) or (v is False)): self.q(r, 'int', v) return if (inspect.isclass(v) or isinstance(v, type)): self.q(r, 'class', self.assign_ffid(v), self.make_signature(v)) return if callable(v): self.q(r, 'fn', self.assign_ffid(v), self.make_signature(v)) return if ((typ is dict) or inspect.ismodule(v) or was_class): self.q(r, 'obj', self.assign_ffid(v), self.make_signature(v)) return if (typ is list): self.q(r, 'list', self.assign_ffid(v), self.make_signature(v)) return if hasattr(v, '__class__'): self.q(r, 'class', self.assign_ffid(v), self.make_signature(v)) return self.q(r, 'void', self.cur_ffid) def get(self, r, ffid, keys, args): o = self.call(r, ffid, keys, [], {}, invoke=False) return o def Set(self, r, ffid, keys, args): v = self.m[ffid] (on, val) = args for key in keys: if (type(v) in (dict, tuple, list)): v = v[key] elif hasattr(v, str(key)): v = getattr(v, str(key)) else: try: v = v[key] except: raise LookupError(f"Property '{fix_key(key)}' does not exist on {repr(v)}") if (type(v) in (dict, tuple, list, set)): v[on] = val else: setattr(v, on, val) self.q(r, 'void', self.cur_ffid) def inspect(self, r, ffid, keys, args): v = self.m[ffid] for key in keys: v = (getattr(v, key, None) or v[key]) s = repr(v) self.q(r, '', s) def free(self, r, ffid, key, args): for i in args: if (i not in self.m): continue del self.m[i] def make(self, r, ffid, key, args): self.cur_ffid += 1 p = Proxy(self.executor, self.cur_ffid) self.weakmap[self.cur_ffid] = p self.m[self.cur_ffid] = p self.ipc.queue({'r': r, 'val': self.cur_ffid}) def queue_request(self, request_id, payload, timeout=None): payload['c'] = 'jsi' self.ipc.queue(payload) def queue_request_raw(self, request_id, payload, timeout=None): self.ipc.queue(payload) def make_signature(self, what): if self.send_inspect: return repr(what) return '' def read(self): data = self.ipc.readline() if (not data): exit() j = json.loads(data) return j def pcall(self, r, ffid, key, args, set_attr=False): created = {} def process(json_input, lookup_key): if isinstance(json_input, dict): for (k, v) in json_input.items(): if (isinstance(v, dict) and (lookup_key in v)): lookup = v[lookup_key] if (lookup == ''): self.cur_ffid += 1 proxy = (self.m[v['extend']] if ('extend' in v) else Proxy(self.executor, self.cur_ffid)) self.weakmap[self.cur_ffid] = proxy json_input[k] = proxy created[v['r']] = self.cur_ffid else: json_input[k] = self.m[lookup] else: process(v, lookup_key) elif isinstance(json_input, list): for (k, v) in enumerate(json_input): if (isinstance(v, dict) and (lookup_key in v)): lookup = v[lookup_key] if (lookup == ''): self.cur_ffid += 1 proxy = (self.m[v['extend']] if ('extend' in v) else Proxy(self.executor, self.cur_ffid)) self.weakmap[self.cur_ffid] = proxy json_input[k] = proxy created[v['r']] = self.cur_ffid else: json_input[k] = self.m[lookup] else: process(v, lookup_key) process(args, 'ffid') (pargs, kwargs) = args if len(created): self.q(r, 'pre', created) if set_attr: self.Set(r, ffid, key, pargs) else: self.call(r, ffid, key, pargs, (kwargs or {})) def setval(self, r, ffid, key, args): return self.pcall(r, ffid, key, args, set_attr=True) def value(self, r, ffid, keys, args): v = self.m[ffid] for key in keys: t = getattr(v, str(key), None) if (t is None): v = v[key] else: v = t self.q(r, 'ser', v) def onMessage(self, r, action, ffid, key, args): try: return getattr(self, action)(r, ffid, key, args) except Exception: self.q(r, 'error', '', traceback.format_exc()) pass
class InnerContainer(): def __init__(self, outer: OuterContainer): self.outer = outer self.inner_icon = Icon(icons.CIRCLE, color=colors.WHITE54, size=100, tooltip='drag me!') self.view = Draggable(group='inner', content=DragTarget(group='inner', content=self.inner_icon, on_accept=self.drag_accept, on_leave=self.drag_leave, on_will_accept=self.drag_will_accept), data=self) def change_color(self, color: str): self.inner_icon.color = color self.view.update() def drag_accept(self, e): if (e.data == 'true'): self.change_color(colors.WHITE54) print('inner_drag_accept') def drag_will_accept(self, e): if (e.data == 'true'): self.change_color(colors.BLUE_GREY) self.view.update() def drag_leave(self, e): self.change_color(colors.WHITE54) self.view.update()
.unit .parametrize('server_version, cli_version, expected_result', [('1.6.0+7.ge953df5', '1.6.0+7.ge953df5', True), ('1.6.0+7.ge953df5', '1.6.0+9.ge953df5', False), ('1.6.0+7.ge953df5', '1.6.0+7.ge953df5.dirty', True), ('1.6.0+7.ge953df5.dirty', '1.6.0+7.ge953df5', True)]) def test_check_server_version_comparisons(server_version: str, cli_version: str, expected_result: str) -> None: actual_result = utils.compare_application_versions(server_version, cli_version) assert (expected_result == actual_result)
def find_fmriname(settings, bold_preproc): derivs_layout = get_derivatives_layout(settings.func_derivs_dir) fmriname = derivs_layout.build_path(bold_preproc.entities, '[ses-{session}_]task-{task}[_acq-{acquisition}][_rec-{reconstruction}][_run-{run}][_desc-{desc}]') if ('_run-0' in bold_preproc.path): fmriname = fmriname.replace('_run-', '_run-0') return fmriname
def notify_outdated_chroots_function(dry_run, email_filter, all): if (not dry_run): dev_instance_warning(email_filter) notifier = (DryRunNotifier() if dry_run else Notifier()) outdated = coprs_logic.CoprChrootsLogic.filter_outdated(coprs_logic.CoprChrootsLogic.get_multiple()) user_chroots_map = get_user_chroots_map(outdated, email_filter).items() for (i, (user, chroots)) in enumerate(user_chroots_map, start=1): chroots = filter_chroots([chroot for chroot in chroots], all) if (not chroots): continue chroots.sort(key=(lambda x: x.copr.full_name)) notifier.notify(user, chroots) if ((i % 1000) == 0): notifier.commit() notifier.commit()
def init_test(port, retries): bus = RemoteClient(port=port) bus.open() init_done = False for i in range(retries): bus.regs.sata_phy_enable.write(0) bus.regs.sata_phy_enable.write(1) time.sleep(0.01) if ((bus.regs.sata_phy_status.read() & 1) != 0): init_done = True break print('.', end='') sys.stdout.flush() print('') print(('Success (retries: {:d})'.format(i) if init_done else 'Failed')) bus.close()
class NotificationContentSchema(NormalSchema): type = fields.Str(dump_only=True) target_type = fields.Str(dump_only=True) target_id = fields.Int(dump_only=True) target_action = fields.Str(dump_only=True) actors = fields.Nested(NotificationActorSchema, many=True) _dump(pass_original=True) def add_target(self, data, obj): if (obj.target is None): return {} event = None if (obj.target_type == 'Order'): serialized = normalize_jsonapi_data(OrderSchema(only=('id', 'amount', 'identifier')), obj.target) data['order'] = serialized event = obj.target.event elif (obj.target_type == 'Session'): serialized = normalize_jsonapi_data(SessionSchema(only=('id', 'title')), obj.target) data['session'] = serialized event = obj.target.event elif (obj.target_type == 'EventInvoice'): serialized = normalize_jsonapi_data(EventInvoiceSchema(only=('id', 'identifier', 'amount', 'issued_at', 'due_at', 'status')), obj.target) data['invoice'] = serialized event = obj.target.event elif (obj.target_type == 'RoleInvite'): serialized = normalize_jsonapi_data(RoleInviteSchema(), obj.target) data['event_invite'] = serialized data['role'] = normalize_jsonapi_data(RoleSchema(), obj.target.role) event = obj.target.event if event: data['event'] = normalize_jsonapi_data(EventSchemaPublic(only=('id', 'name', 'identifier')), obj.target.event) return data
class Baz(HasTraits): bar = Instance(Bar) test = Any def _bar_changed(self, obj, old, new): if ((old is not None) and (old is not new)): old.on_trait_change(self._effect_changed, name='effect', remove=True) old.foo.on_trait_change(self._cause_changed, name='cause', remove=True) if (new is not None): new.foo.on_trait_change(self._cause_changed, name='cause') new.on_trait_change(self._effect_changed, name='effect') def _cause_changed(self, obj, name, old, new): self.test.events_delivered.append('Baz._caused_changed') def _effect_changed(self, obj, name, old, new): self.test.events_delivered.append('Baz._effect_changed')
def get_unpack_status(file_path: str, binary: bytes, extracted_files: List[Path], meta_data: Dict, config: ConfigParser): meta_data['summary'] = [] meta_data['entropy'] = avg_entropy(binary) if ((not extracted_files) and (meta_data.get('number_of_excluded_files', 0) == 0)): if ((get_file_type_from_path(file_path)['mime'] in read_list_from_config(config, 'ExpertSettings', 'compressed_file_types')) or (not is_compressed(binary, compress_entropy_threshold=config.getfloat('ExpertSettings', 'unpack_threshold'), classifier=avg_entropy))): meta_data['summary'] = ['unpacked'] else: meta_data['summary'] = ['packed'] else: _detect_unpack_loss(binary, extracted_files, meta_data, config.getint('ExpertSettings', 'header_overhead'))
class TestGymHandler(GymTestCase): is_agent_to_agent_messages = False def test__init__(self): assert (self.gym_handler._task_id is None) def test_setup(self): with patch.object(self.task_manager, 'enqueue_task', return_value=self.mocked_task_id) as mocked_enqueue_task: with patch.object(self.logger, 'log') as mock_logger: assert (self.gym_handler.setup() is None) self.assert_quantity_in_outbox(0) mock_logger.assert_any_call(logging.INFO, 'Gym handler: setup method called.') mocked_enqueue_task.assert_any_call(self.gym_handler.task) assert (self.gym_handler._task_id == self.mocked_task_id) def test_handle_unidentified_dialogue(self): incorrect_dialogue_reference = ('', '') incoming_message = self.build_incoming_message(message_type=GymMessage, dialogue_reference=incorrect_dialogue_reference, performative=GymMessage.Performative.RESET) with patch.object(self.logger, 'log') as mock_logger: self.gym_handler.handle(incoming_message) mock_logger.assert_any_call(logging.INFO, f'received invalid gym message={incoming_message}, unidentified dialogue.') self.assert_quantity_in_outbox(1) message = self.get_message_from_outbox() (has_attributes, error_str) = self.message_has_attributes(actual_message=message, message_type=DefaultMessage, performative=DefaultMessage.Performative.ERROR, to=incoming_message.sender, sender=str(self.skill.skill_context.skill_id), error_code=DefaultMessage.ErrorCode.INVALID_DIALOGUE, error_msg='Invalid dialogue.', error_data={'gym_message': incoming_message.encode()}) assert has_attributes, error_str def test_handle_percept_i(self): gym_dialogue = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:3])) incoming_message = cast(GymMessage, self.build_incoming_message_for_skill_dialogue(dialogue=gym_dialogue, performative=GymMessage.Performative.PERCEPT, step_id=self.mocked_step_id, observation=self.mocked_observation, reward=self.mocked_reward, done=True, info=self.mocked_info)) self.gym_handler.task.proxy_env._active_dialogue = gym_dialogue with patch.object(self.gym_handler.task.proxy_env_queue, 'put') as mocked_put: self.gym_handler.handle(incoming_message) mocked_put.assert_any_call(incoming_message) def test_handle_percept_ii(self): gym_dialogue = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:3])) incoming_message = cast(GymMessage, self.build_incoming_message_for_skill_dialogue(dialogue=gym_dialogue, performative=GymMessage.Performative.PERCEPT, step_id=self.mocked_step_id, observation=self.mocked_observation, reward=self.mocked_reward, done=True, info=self.mocked_info)) gym_dialogue_ii = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:1])) self.gym_handler.task.proxy_env._active_dialogue = gym_dialogue_ii with patch.object(self.logger, 'log') as mock_logger: self.gym_handler.handle(incoming_message) mock_logger.assert_any_call(logging.WARNING, 'gym dialogue not active dialogue.') def test_handle_status_i(self): gym_dialogue = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:1])) incoming_message = cast(GymMessage, self.build_incoming_message_for_skill_dialogue(dialogue=gym_dialogue, performative=GymMessage.Performative.STATUS, content=self.mocked_status_content)) self.gym_handler.task.proxy_env._active_dialogue = gym_dialogue with patch.object(self.gym_handler.task.proxy_env_queue, 'put') as mocked_put: self.gym_handler.handle(incoming_message) mocked_put.assert_any_call(incoming_message) def test_handle_status_ii(self): gym_dialogue = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:1])) incoming_message = cast(GymMessage, self.build_incoming_message_for_skill_dialogue(dialogue=gym_dialogue, performative=GymMessage.Performative.STATUS, content={'reset': 'failure'})) gym_dialogue_ii = cast(GymDialogue, self.prepare_skill_dialogue(dialogues=self.gym_dialogues, messages=self.list_of_gym_messages[:1])) self.gym_handler.task.proxy_env._active_dialogue = gym_dialogue_ii with patch.object(self.logger, 'log') as mock_logger: self.gym_handler.handle(incoming_message) mock_logger.assert_any_call(logging.WARNING, 'gym dialogue not active dialogue.') def test_handle_invalid(self): incoming_message = self.build_incoming_message(message_type=GymMessage, performative=GymMessage.Performative.RESET) with patch.object(self.logger, 'log') as mock_logger: self.gym_handler.handle(incoming_message) gym_dialogue = cast(GymDialogue, self.gym_dialogues.get_dialogue(incoming_message)) mock_logger.assert_any_call(logging.WARNING, f'cannot handle gym message of performative={incoming_message.performative} in dialogue={gym_dialogue}.') def test_teardown(self): self.gym_handler._task_id = self.mocked_task_id mock_task_result = Mock(wraps=ApplyResult) mock_task_result.ready.return_value = True mock_task_result.successful.return_value = False with patch.object(self.gym_handler.task, 'teardown') as mocked_gym_task_teardown: with patch.object(self.task_manager, 'get_task_result', return_value=mock_task_result) as mocked_get_result: with patch.object(self.logger, 'log') as mock_logger: assert (self.gym_handler.teardown() is None) self.assert_quantity_in_outbox(0) mock_logger.assert_any_call(logging.INFO, 'Gym handler: teardown method called.') mocked_gym_task_teardown.assert_called_once() mocked_get_result.assert_any_call(self.mocked_task_id) mock_logger.assert_any_call(logging.WARNING, 'Task not successful!')
class TraitGridCellAdapter(GridCellEditor): def __init__(self, trait_editor_factory, obj, name, description, handler=None, context=None, style='simple', width=(- 1.0), height=(- 1.0)): super().__init__() self._factory = trait_editor_factory self._style = style self._width = width self._height = height self._editor = None self._obj = obj self._name = name self._description = description self._handler = handler self._context = context def Create(self, parent, id, evtHandler): from traitsui.api import UI, default_handler if hasattr(self, '_control'): return handler = self._handler if (handler is None): handler = default_handler() if (self._context is None): ui = UI(handler=handler) else: context = self._context.copy() context['table_editor_object'] = context['object'] context['object'] = self._obj ui = UI(handler=handler, context=context) factory = self._factory if (factory._ui is not None): ui.history = factory._ui.history factory.is_grid_cell = True factory_method = getattr(factory, (self._style + '_editor')) self._editor = factory_method(ui, self._obj, self._name, self._description, parent) self._editor.prepare(parent) self._control = control = self._editor.control (grid, row, col) = getattr(self, '_grid_info', (None, None, None)) (width, height) = control.GetBestSize() self_height = self._height if (self_height > 1.0): height = int(self_height) elif ((self_height >= 0.0) and (grid is not None)): height = int((self_height * grid.GetSize().Get()[1])) self_width = self._width if (self_width > 1.0): width = int(self_width) elif ((self_width >= 0.0) and (grid is not None)): width = int((self_width * grid.GetSize().Get()[0])) (self._edit_width, self._edit_height) = (width, height) control = get_control(control) if (control is not None): self.SetControl(control) def SetSize(self, rect): changed = False (edit_width, edit_height) = (rect.width, rect.height) (grid, row, col) = getattr(self, '_grid_info', (None, None, None)) if ((grid is not None) and self._editor.scrollable): (edit_width, cur_width) = (self._edit_width, grid.GetColSize(col)) restore_width = getattr(grid, '_restore_width', None) if (restore_width is not None): cur_width = restore_width if ((edit_width > cur_width) or (restore_width is not None)): edit_width = max(edit_width, cur_width) grid._restore_width = cur_width grid.SetColSize(col, ((edit_width + 1) + (col == 0))) changed = True else: edit_width = cur_width (edit_height, cur_height) = (self._edit_height, grid.GetRowSize(row)) restore_height = getattr(grid, '_restore_height', None) if (restore_height is not None): cur_height = restore_height if ((edit_height > cur_height) or (restore_height is not None)): edit_height = max(edit_height, cur_height) grid._restore_height = cur_height grid.SetRowSize(row, ((edit_height + 1) + (row == 0))) changed = True else: edit_height = cur_height if changed: grid.ForceRefresh() self._control.SetSize((rect.x + 1), (rect.y + 1), edit_width, edit_height, SIZE_ALLOW_MINUS_ONE) if changed: grid.MakeCellVisible(grid.GetGridCursorRow(), grid.GetGridCursorCol()) def Show(self, show, attr): if self.IsCreated(): super().Show(show, attr) def PaintBackground(self, rect, attr): def BeginEdit(self, row, col, grid): self._editor.update_editor() control = self._control control.Show(True) control.SetFocus() if isinstance(control, wx.TextCtrl): control.SetSelection((- 1), (- 1)) def EndEdit(self, row, col, grid): self._control.Show(False) changed = False (grid, row, col) = self._grid_info if grid._no_reset_col: grid._no_reset_col = False else: width = getattr(grid, '_restore_width', None) if (width is not None): del grid._restore_width grid.SetColSize(col, width) changed = True if grid._no_reset_row: grid._no_reset_row = False else: height = getattr(grid, '_restore_height', None) if (height is not None): del grid._restore_height grid.SetRowSize(row, height) changed = True if changed: grid.ForceRefresh() def Reset(self): def StartingKey(self, evt): def StartingClick(self): def Destroy(self): self._editor.dispose() def Clone(self): return TraitGridCellAdapter(self._factory, self._obj, self._name, self._description, style=self._style) def dispose(self): if (self._editor is not None): self._editor.dispose()
class btnHeader(QtWidgets.QPushButton): def __init__(self, parent=None): super(btnHeader, self).__init__(parent) self.__parent = parent self.__settings = QSettings(QSettings.NativeFormat, QSettings.UserScope, ORG_NAME, APP_NAME) def mousePressEvent(self, e): self.x0 = e.x() self.y0 = e.y() def mouseMoveEvent(self, e): newX = (self.__parent.pos().x() + (e.x() - self.x0)) if (newX < 0): newX = 0 if (newX > (self.__parent.parent().width() - self.__parent.width())): newX = (self.__parent.parent().width() - self.__parent.width()) newY = (self.__parent.pos().y() + (e.y() - self.y0)) if (newY < 0): newY = 0 if (newY > (self.__parent.parent().height() - self.__parent.height())): newY = (self.__parent.parent().height() - self.__parent.height()) self.__parent.move(newX, newY) def mouseReleaseEvent(self, e): self.__settings.setValue('ProgressBox/posX', self.__parent.pos().x()) self.__settings.setValue('ProgressBox/posY', self.__parent.pos().y())
class OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpass(Options): def frequency(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpassFrequency': return self._config_sub_data('frequency', OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpassFrequency) def resonance(self) -> 'OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpassResonance': return self._config_sub_data('resonance', OptionPlotoptionsColumnrangeSonificationContexttracksMappingHighpassResonance)
.usefixtures('copy_poly_case') def test_case_tool_init_updates_the_case_info_tab(qtbot, storage): config = ErtConfig.from_file('poly.ert') notifier = ErtNotifier(config.config_path) notifier.set_storage(storage) ensemble = storage.create_experiment(parameters=config.ensemble_config.parameter_configuration).create_ensemble(ensemble_size=config.model_config.num_realizations, name='default') notifier.set_current_case(ensemble) tool = CaseInitializationConfigurationPanel(config, notifier, config.model_config.num_realizations) html_edit = tool.findChild(QTextEdit, name='html_text') assert (not html_edit.toPlainText()) tool.setCurrentIndex(2) assert ('UNDEFINED' in html_edit.toPlainText()) tool.setCurrentIndex(1) qtbot.mouseClick(tool.findChild(QPushButton, name='initialize_from_scratch_button'), Qt.LeftButton) tool.setCurrentIndex(2) assert ('INITIALIZED' in html_edit.toPlainText())
def assert_same_xarray(x, y): assert x.broadcast_equals(y) assert x.equals(y) assert x.identical(y) assert (len(x) == len(y)) assert (set(x.keys()) == set(y.keys())) assert (len(x.dims) == len(y.dims)) assert (len(x.coords) == len(y.coords)) for k in x.keys(): (xda, yda) = (x[k], y[k]) assert (xda.values.shape == yda.values.shape) assert np.all((xda.values == yda.values))
class OptionPlotoptionsVariwideSonificationContexttracksMappingVolume(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class Updater(object): def __init__(self): GIT_REPOSITORY = ' GIT_REPOSITORY2 = ' rootDir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '')) if (not os.path.exists('.git')): print('Not any .git repository found!\n') print(('=' * 30)) print('\nTo have working this feature, you should clone XSSer with:\n') print(('$ git clone %s' % GIT_REPOSITORY)) print('\nAlso you can try this other mirror:\n') print((('$ git clone %s' % GIT_REPOSITORY2) + '\n')) else: checkout = execute('git checkout . && git pull', shell=True, stdout=PIPE, stderr=PIPE).communicate()[0] print(('[Info] [GitHub] Reply:\n\n' + checkout.decode('utf-8'))) if (not (b'Already up-to-date' in checkout)): print('[Info] [AI] Congratulations!! XSSer has been updated... ;-)\n') else: print("[Info] [AI] Your XSSer doesn't need to be updated... ;-)\n")
class ouraActivitySamples(Base): __tablename__ = 'oura_activity_samples' timestamp_local = Column('timestamp_local', DateTime(), index=True, primary_key=True) summary_date = Column('summary_date', Date()) met_1min = Column('met_1min', Float()) class_5min = Column('class_5min', Integer()) class_5min_desc = Column('class_5min_desc', String(10))
class UniqueNameProvider(): def __init__(self): self._name_to_count: Dict[(str, int)] = {} def get_unique_name(self, name: str) -> str: if (name not in self._name_to_count): self._name_to_count[name] = 1 return name else: self._name_to_count[name] += 1 return f'{name}__{self._name_to_count[name]}'
def extractNirvanatranslationsBlogspotCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
.parametrize('test_file_high_nonce', ['ttNonce/TransactionWithHighNonce64Minus1.json', 'ttNonce/TransactionWithHighNonce64.json', 'ttNonce/TransactionWithHighNonce64Plus1.json']) def test_high_nonce(test_file_high_nonce: str) -> None: test = load_spurious_dragon_transaction(test_dir, test_file_high_nonce) tx = rlp.decode_to(Transaction, test['tx_rlp']) assert (validate_transaction(tx) == False)
class TorchStepStateCritic(TorchStateCritic): (StateCritic) def predict_values(self, critic_input: StateCriticInput) -> StateCriticOutput: critic_output = StateCriticOutput() for critic_step_input in critic_input: value = self.networks[critic_step_input.actor_id.step_key](critic_step_input.tensor_dict)['value'][(..., 0)] critic_output.append(StateCriticStepOutput(values=value, detached_values=value.detach(), actor_id=critic_step_input.actor_id)) return critic_output (TorchStateCritic) def num_critics(self) -> int: return self._num_critics (TorchStateCritic) def compute_structured_return(self, gamma: float, gae_lambda: float, rewards: List[torch.Tensor], values: List[torch.Tensor], dones: torch.Tensor) -> List[torch.Tensor]: returns = [] for (substep_rewards, substep_values) in zip(rewards, values): sub_step_return = self.compute_return(gamma=gamma, gae_lambda=gae_lambda, rewards=substep_rewards, values=substep_values, dones=dones) returns.append(sub_step_return) return returns
class TestLaunchEndToEnd(AEATestCaseMany): key = 'seller_service' value = None registration_agent_connection = {'delegate_uri': '127.0.0.1:11011', 'entry_peers': [], 'ledger_id': 'fetchai', 'local_uri': '127.0.0.1:9011', 'log_file': 'libp2p_node.log', 'public_uri': '127.0.0.1:9011'} search_agent_connection = {'delegate_uri': '127.0.0.1:11012', 'entry_peers': [], 'ledger_id': 'fetchai', 'local_uri': '127.0.0.1:9012', 'log_file': 'libp2p_node.log', 'public_uri': '127.0.0.1:9012'} .integration def test_end_to_end(self): registration_agent_name = 'registration_agent' self.value = uuid.uuid4().hex self.fetch_agent('fetchai/simple_service_registration', agent_name=registration_agent_name, is_local=True) self.run_cli_command('config', 'set', 'vendor.fetchai.connections.p2p_libp2p.config', '--type', 'dict', json.dumps(self.registration_agent_connection), cwd=registration_agent_name) self.run_cli_command('config', 'set', 'vendor.fetchai.skills.simple_service_registration.models.strategy.args.service_data', '--type', 'dict', json.dumps({'key': self.key, 'value': self.value}), cwd=registration_agent_name) self.run_cli_command('config', 'set', 'vendor.fetchai.connections.soef.config.token_storage_path', os.path.join(self.t, registration_agent_name, 'soef_key.txt'), cwd=registration_agent_name) storage_file_name = os.path.abspath(os.path.join(registration_agent_name, 'test.db')) self.run_cli_command('config', 'set', 'agent.storage_uri', f'sqlite://{storage_file_name}', cwd=registration_agent_name) search_agent_name = 'search_agent' self.fetch_agent('fetchai/simple_service_search', agent_name=search_agent_name, is_local=True) self.run_cli_command('config', 'set', 'vendor.fetchai.connections.p2p_libp2p.config', '--type', 'dict', json.dumps(self.search_agent_connection), cwd=search_agent_name) self.run_cli_command('config', 'set', 'vendor.fetchai.skills.simple_service_search.models.strategy.args.search_query', '--type', 'dict', json.dumps({'constraint_type': '==', 'search_key': self.key, 'search_value': self.value}), cwd=search_agent_name) self.run_cli_command('config', 'set', 'vendor.fetchai.skills.simple_service_search.behaviours.service_search.args.tick_interval', '--type', 'int', '2', cwd=search_agent_name) self.run_cli_command('config', 'set', 'vendor.fetchai.connections.soef.config.token_storage_path', os.path.join(self.t, search_agent_name, 'soef_key.txt'), cwd=search_agent_name) self.run_cli_command('build', cwd=registration_agent_name) self.run_cli_command('build', cwd=search_agent_name) self.set_agent_context(registration_agent_name) self.generate_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION) self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True) self.generate_private_key() self.add_private_key() self.unset_agent_context() self.run_cli_command('issue-certificates', cwd=registration_agent_name) self.set_agent_context(search_agent_name) self.generate_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION) self.add_private_key(FetchAICrypto.identifier, FETCHAI_PRIVATE_KEY_FILE_CONNECTION, connection=True) self.generate_private_key() self.add_private_key() self.unset_agent_context() self.run_cli_command('issue-certificates', cwd=search_agent_name) proc = PexpectWrapper([sys.executable, '-m', 'aea.cli', '-v', 'DEBUG', 'launch', registration_agent_name, search_agent_name], env=os.environ, maxread=10000, encoding='utf-8', logfile=sys.stdout) try: proc.expect_all([f'[{search_agent_name}] found number of agents=1, search_response'], timeout=30) finally: proc.control_c() proc.expect('Exit cli. code: 0', timeout=30) assert os.path.exists(storage_file_name) con = sqlite3.connect(storage_file_name) try: cursor = con.cursor() tables = cursor.execute("SELECT name FROM sqlite_master WHERE type='table';").fetchall() assert tables table_name = tables[0][0] num_of_records = cursor.execute(f'SELECT count(*) FROM {table_name};').fetchone()[0] assert (num_of_records > 0) finally: con.close
_cache def custom_art(text): img_data = io.BytesIO(b64decode(DEFAULT_ART)) art_img: Image = Image.open(img_data) size = art_img.size x1 = y1 = (size[0] * 0.95) x0 = (x1 - ((len(text) * 0.0625) * size[0])) y0 = (y1 - (0.11 * size[0])) d = ImageDraw.Draw(art_img) try: username = os.getenv('USERNAME') fnt = ImageFont.truetype(f'C:/Users/{username}/AppData/Local/Microsoft/Windows/Fonts/MYRIADPRO-BOLD.OTF', 80) shift = 5 except OSError: try: fnt = ImageFont.truetype('gadugib.ttf', 80) shift = (- 5) except OSError: try: fnt = ImageFont.truetype('arial.ttf', 80) shift = 0 except OSError: fnt = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 80, encoding='unic') shift = 0 d.rounded_rectangle((x0, y0, x1, y1), fill='#cc1a21', radius=7) d.text((((x0 + x1) / 2), (((y0 + y1) / 2) + shift)), text, fill='#fff', font=fnt, align='center', anchor='mm') data = io.BytesIO() art_img.save(data, format='png', quality=95) return b64encode(data.getvalue())
def update(latest_version: str, command: str='py') -> bool: helper.colored_text('Updating...', base=helper.GREEN) try: full_cmd = f'{command} -m pip install --upgrade battle-cats-save-editor=={latest_version}' subprocess.run(full_cmd, shell=True, capture_output=True, check=True) helper.colored_text('Update successful', base=helper.GREEN) return True except subprocess.CalledProcessError: return False
class TestCurrentPrivacyPreference(): def test_get_preference_by_notice_and_fides_user_device(self, db, empty_provided_identity, privacy_preference_history_us_ca_provide_for_fides_user, privacy_notice, privacy_notice_us_ca_provide, fides_user_provided_identity): pref = CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.privacy_notice_id, preference_value=privacy_notice_us_ca_provide.id) assert (pref == privacy_preference_history_us_ca_provide_for_fides_user.current_privacy_preference) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=empty_provided_identity, preference_type=ConsentRecordType.privacy_notice_id, preference_value=privacy_notice.id) is None) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.privacy_notice_id, preference_value=privacy_notice.id) is None) def test_get_preference_by_purpose_and_fides_user_device(self, db, empty_provided_identity, privacy_preference_history_for_tcf_purpose_consent, fides_user_provided_identity): pref = CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.purpose_consent, preference_value=8) assert (pref == privacy_preference_history_for_tcf_purpose_consent.current_privacy_preference) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=empty_provided_identity, preference_type=ConsentRecordType.purpose_consent, preference_value=8) is None) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.purpose_consent, preference_value=500) is None) def test_get_preference_by_feature_and_fides_user_device(self, db, empty_provided_identity, privacy_preference_history_for_tcf_feature, fides_user_provided_identity): pref = CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.feature, preference_value=2) assert (pref == privacy_preference_history_for_tcf_feature.current_privacy_preference) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=empty_provided_identity, preference_type=ConsentRecordType.feature, preference_value=2) is None) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.feature, preference_value=500) is None) def test_get_preference_by_system_and_fides_user_device(self, db, empty_provided_identity, privacy_preference_history_for_system, fides_user_provided_identity, system): pref = CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.system_consent, preference_value=system.id) assert (pref == privacy_preference_history_for_system.current_privacy_preference) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=empty_provided_identity, preference_type=ConsentRecordType.system_consent, preference_value=system.id) is None) assert (CurrentPrivacyPreference.get_preference_by_type_and_fides_user_device(db=db, fides_user_provided_identity=fides_user_provided_identity, preference_type=ConsentRecordType.system_consent, preference_value='another system') is None)
class TestOptions(BaseOptions): def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') parser.add_argument('--num_test', type=int, default=float('inf'), help='how many test images to run') parser.add_argument('--use_eval', action='store_true', help='Call model.eval() before test') self.isTrain = False return parser
class OptionPlotoptionsVariwideSonificationDefaultinstrumentoptions(Options): def activeWhen(self) -> 'OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsActivewhen': return self._config_sub_data('activeWhen', OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsActivewhen) def instrument(self): return self._config_get('piano') def instrument(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsMapping': return self._config_sub_data('mapping', OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsMapping) def midiName(self): return self._config_get(None) def midiName(self, text: str): self._config(text, js_type=False) def pointGrouping(self) -> 'OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsPointgrouping': return self._config_sub_data('pointGrouping', OptionPlotoptionsVariwideSonificationDefaultinstrumentoptionsPointgrouping) def roundToMusicalNotes(self): return self._config_get(True) def roundToMusicalNotes(self, flag: bool): self._config(flag, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def type(self): return self._config_get('instrument') def type(self, text: str): self._config(text, js_type=False)
class TestGetRules(): (scope='function') def url(self, policy: Policy) -> str: return (V1_URL_PREFIX + RULE_CREATE_URI.format(policy_key=policy.key)) def test_get_rules_unauthenticated(self, url, api_client): resp = api_client.get(url) assert (resp.status_code == 401) def test_get_rules_wrong_scope(self, url, api_client: TestClient, generate_auth_header): auth_header = generate_auth_header(scopes=[scopes.POLICY_READ]) resp = api_client.get(url, headers=auth_header) assert (resp.status_code == 403) .usefixtures('policy_drp_action') def test_get_rules(self, db, api_client: TestClient, generate_auth_header, policy: Policy, url): all_policies = Policy.query(db=db).all() assert (len(all_policies) > 1) all_rules = Rule.query(db=db).all() assert (len(all_rules) > 1) auth_header = generate_auth_header(scopes=[scopes.RULE_READ]) resp = api_client.get(url, headers=auth_header) assert (resp.status_code == 200) data = resp.json() assert ('items' in data) assert (data['total'] == 1) access_rule: Rule = policy.get_rules_for_action(ActionType.access.value)[0] rule_data = data['items'][0] assert (rule_data['name'] == access_rule.name) assert (rule_data['key'] == 'access_request_rule') assert (rule_data['action_type'] == access_rule.action_type) assert (rule_data['storage_destination']['type'] == 's3') assert ('targets' in rule_data) assert (len(rule_data['targets']) == 1) rule_target_data = rule_data['targets'][0] rule_target_data['data_category'] = access_rule.get_target_data_categories() def test_pagination_ordering_rules(self, db, oauth_client, api_client: TestClient, generate_auth_header, url, policy, storage_config): auth_header = generate_auth_header(scopes=[scopes.RULE_READ]) rules = [] RULE_COUNT = 50 for _ in range(RULE_COUNT): key = str(uuid4()).replace('-', '') rules.append(Rule.create(db=db, data={'name': key, 'key': key, 'action_type': ActionType.access.value, 'storage_destination_id': storage_config.id, 'policy_id': policy.id})) resp = api_client.get(url, headers=auth_header) assert (resp.status_code == 200) data = resp.json() assert ('items' in data) assert (data['total'] == (RULE_COUNT + 1)) for rule in data['items']: most_recent = rules.pop() assert (rule['key'] == most_recent.key) most_recent.delete(db=db)
('jsonrpcserver.dispatcher.dispatch_request', side_effect=ValueError('foo')) def test_dispatch_to_response_pure_notification_server_error(*_: Mock) -> None: def foo() -> Result: return Success() assert (dispatch_to_response_pure(deserializer=default_deserializer, validator=default_validator, post_process=identity, context=NOCONTEXT, methods={'foo': foo}, request='{"jsonrpc": "2.0", "method": "foo"}') == Left(ErrorResponse(ERROR_SERVER_ERROR, 'Server error', 'foo', None)))
class TestResolverExceptions(): def test_no_env(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(NoEnv, desc='Test Builder') config.generate() def test_no_def_allowed(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(NoDefAllowed, desc='Test Builder') config.generate() def test_multiple_defaults(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(MultipleDefaults, desc='Test Builder') config.generate() def test_cast_fail(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) os.environ['CAST_MISS'] = 'foo' with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(CastRaise, desc='Test Builder') config.generate() def test_annotation_not_in_set(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(AnnotationNotInSetRaise, desc='Test Builder') config.generate() def test_annotation_not_allowed(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockFieldHandlerError): config = SpockBuilder(AnnotationNotAllowedRaise, desc='Test Builder') config.generate()
def _execute(cmd, stdin=None, stderr_to_stdout=False): logger.log(IMPORTANT, 'Running command (panel): %s', cmd) if stderr_to_stdout: stderr = STDOUT else: stderr = PIPE proc = Popen(cmd, bufsize=0, close_fds=True, stdout=PIPE, stderr=stderr, stdin=PIPE) (stdout, stderr) = proc.communicate(input=stdin) return {'returncode': proc.returncode, 'stdout': stdout, 'stderr': stderr}
def upgrade(): op.add_column('sessions', sa.Column('creator_id', sa.Integer(), nullable=True)) op.create_foreign_key(None, 'sessions', 'users', ['creator_id'], ['id'], ondelete='CASCADE') op.add_column('sessions_version', sa.Column('creator_id', sa.Integer(), autoincrement=False, nullable=True)) op.execute('UPDATE events SET state = LOWER(state)') op.execute("UPDATE events SET state = 'draft' WHERE state != 'published'") op.execute('UPDATE sessions SET state = LOWER(state)') op.execute("UPDATE sessions SET state = 'draft' WHERE state not in\n ('accepted', 'pending', 'confirmed', 'rejected')")
def test_for_loop_variable_generation(): renamer = ForLoopVariableRenamer(AbstractSyntaxTree(SeqNode(LogicCondition.initialize_true(LogicCondition.generate_new_context())), {}), ['i', 'j', 'k', 'l', 'm', 'n']) assert ([renamer._get_variable_name() for _ in range(14)] == ['i', 'j', 'k', 'l', 'm', 'n', 'i1', 'j1', 'k1', 'l1', 'm1', 'n1', 'i2', 'j2'])
class OptionPlotoptionsItemSonificationDefaultinstrumentoptionsMappingVolume(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class TestExplainerAccessByResource(): .client .e2e .explainer def test_access_by_resource_for_organization(self, forseti_cli: ForsetiCli, forseti_model_readonly, forseti_server_service_account, organization_id): (model_name, _, _) = forseti_model_readonly forseti_cli.model_use(model_name=model_name) result = forseti_cli.explainer_access_by_resource(f'organization/{organization_id}') assert (result.returncode == 0), f'Forseti stdout: {str(result.stdout)}' assert (EXPECTED_COUNT_FOR_ORG == len(re.findall(forseti_server_service_account, str(result.stdout)))) .client .e2e .explainer def test_access_by_resource_for_project(self, forseti_cli: ForsetiCli, forseti_model_readonly, forseti_server_service_account, project_id): (model_name, _, _) = forseti_model_readonly forseti_cli.model_use(model_name=model_name) result = forseti_cli.explainer_access_by_resource(f'project/{project_id}') assert (result.returncode == 0), f'Forseti stdout: {str(result.stdout)}' assert (EXPECTED_COUNT_FOR_PROJECT == len(re.findall(forseti_server_service_account, str(result.stdout))))
def writePiecesArrays(save): for i in range(len(globVar.w_pieces)): save.write(str(globVar.w_pieces[i].color)) save.write(',') save.write(str(globVar.w_pieces[i].selected)) save.write(',') save.write(str(globVar.w_pieces[i].type)) save.write(',') save.write(str(globVar.w_pieces[i].label)) save.write(',') save.write(str(globVar.w_pieces[i].row)) save.write(',') save.write(str(globVar.w_pieces[i].col)) save.write(',') save.write('\n') for i in range(len(globVar.b_pieces)): save.write(str(globVar.b_pieces[i].color)) save.write(',') save.write(str(globVar.b_pieces[i].selected)) save.write(',') save.write(str(globVar.b_pieces[i].type)) save.write(',') save.write(str(globVar.b_pieces[i].label)) save.write(',') save.write(str(globVar.b_pieces[i].row)) save.write(',') save.write(str(globVar.b_pieces[i].col)) save.write(',')
class icmptype_ContentHandler(IO_Object_ContentHandler): def startElement(self, name, attrs): IO_Object_ContentHandler.startElement(self, name, attrs) self.item.parser_check_element_attrs(name, attrs) if (name == 'icmptype'): if ('name' in attrs): log.warning(("Ignoring deprecated attribute name='%s'" % attrs['name'])) if ('version' in attrs): self.item.version = attrs['version'] elif (name == 'short'): pass elif (name == 'description'): pass elif (name == 'destination'): for x in ['ipv4', 'ipv6']: if ((x in attrs) and (attrs[x].lower() in ['yes', 'true'])): self.item.destination.append(str(x))
class Plotly3D(): def __init__(self, ui): self.page = ui.page self.chartFamily = 'Plotly' def scatter(self, record, y_columns=None, x_axis=None, z_axis=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {}) options.update({'y_columns': y_columns, 'x_column': x_axis, 'z_axis': z_axis, 'type': 'scatter3d', 'mode': 'markers'}) data = self.page.data.plotly.xyz(record, y_columns, x_axis, z_axis) sc_chart = graph.GraphPlotly.Scatter3D(self.page, width, height, (options or {}), html_code, profile) sc_chart.colors(self.page.theme.charts) for (i, series) in enumerate(data['datasets']): sc_chart.add_trace({'x': series['x'], 'y': series['y'], 'z': series['z']}) return sc_chart def line(self, record, y_columns=None, x_axis=None, z_axis=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {}) options.update({'y_columns': y_columns, 'x_column': x_axis, 'z_axis': z_axis, 'type': 'scatter3d', 'mode': 'lines'}) data = self.page.data.plotly.xyz(record, y_columns, x_axis, z_axis) sc_chart = graph.GraphPlotly.Scatter3D(self.page, width, height, (options or {}), html_code, profile) sc_chart.colors(self.page.theme.charts) for (i, series) in enumerate(data['datasets']): sc_chart.add_trace({'x': series['x'], 'y': series['y'], 'z': series['z']}) return sc_chart def marker(self, record, y_columns=None, x_axis=None, z_axis=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {}) options.update({'y_columns': y_columns, 'x_column': x_axis, 'z_axis': z_axis, 'type': 'scatter3d', 'mode': 'lines+markers'}) data = self.page.data.plotly.xyz(record, y_columns, x_axis, z_axis) sc_chart = graph.GraphPlotly.Scatter3D(self.page, width, height, (options or {}), html_code, profile) for (i, series) in enumerate(data['datasets']): sc_chart.add_trace({'x': series['x'], 'y': series['y'], 'z': series['z']}) sc_chart.data.line.color = self.page.theme.colors[i] return sc_chart def ribbon(self, record, y_columns=None, x_axis=None, z_axis=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {'delta': {'x': 1, 'y': 1, 'z': 0}}) options.update({'y_columns': y_columns, 'x_column': x_axis, 'z_axis': z_axis, 'type': 'scatter3d', 'mode': 'lines+markers'}) data = self.page.data.plotly.x_yz(record, y_columns, x_axis, z_axis, dy=options['delta']['y'], dx=options['delta']['x'], dz=options['delta']['z']) line_chart = graph.GraphPlotly.Surface(self.page, width, height, (options or {}), html_code, profile) line_chart.colors(self.page.theme.charts) for (i, d) in enumerate(data['datasets']): line_chart.add_trace(d) line_chart.data.showscale = False return line_chart def mesh3d(self, record, intensity, x, y, z, i=None, j=None, k=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): data = {'intensity': [], 'x': [], 'y': [], 'z': []} if (i is not None): data[i] = [] if (j is not None): data[j] = [] if (k is not None): data[k] = [] for rec in record: data['intensity'].append(rec[intensity]) data['x'].append(rec[x]) data['y'].append(rec[y]) data['z'].append(rec[z]) if (i is not None): data['i'].append(rec[i]) if (j is not None): data['j'].append(rec[j]) if (k is not None): data['k'].append(rec[k]) mesh_chart = graph.GraphPlotly.Mesh3d(self.page, width, height, (options or {}), html_code, profile) mesh_chart.colors(self.page.theme.charts) mesh_chart.add_trace(data) return mesh_chart def surface(self, record, y_columns=None, x_axis=None, z_axis=None, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {}) options.update({'type': 'surface', 'mode': ''}) naps = self.page.data.plotly.surface(record, y_columns, x_axis, z_axis) surf_chart = graph.GraphPlotly.Surface(self.page, width, height, options, html_code, profile) surf_chart.colors(self.page.theme.charts) for (i, d) in enumerate(naps['datasets']): surf_chart.add_trace({'z': d}) surf_chart.data.showscale = False return surf_chart def maps(self, record, profile=None, options=None, width=(100, '%'), height=(500, 'px'), html_code=None): options = (options or {}) options.update({'type': 'surface', 'mode': ''}) surf_chart = graph.GraphPlotly.Surface(self.page, width, height, options, html_code, profile) for d in record: surf_chart.add_trace({'z': d}) return surf_chart
class PluginRunner(): class Config(BaseModel): process_count: int timeout: int class Task(BaseModel): virtual_file_path: typing.Dict path: str dependencies: typing.Dict scheduler_state: FileObject model_config = ConfigDict(arbitrary_types_allowed=True) def __init__(self, plugin: AnalysisPluginV0, config: Config, schemata: typing.Dict[(str, pydantic.BaseModel)]): self._plugin = plugin self._config = config self._schemata = schemata self._in_queue: mp.Queue = mp.Queue() self.out_queue: mp.Queue = mp.Queue() self.stats = mp.Array(ctypes.c_float, ANALYSIS_STATS_LIMIT) self.stats_count = mp.Value('i', 0) self._stats_idx = mp.Value('i', 0) self._fsorganizer = FSOrganizer() worker_config = Worker.Config(timeout=self._config.timeout) self._workers = [Worker(plugin=plugin, worker_config=worker_config, in_queue=self._in_queue, out_queue=self.out_queue, stats=self.stats, stats_count=self.stats_count, stats_idx=self._stats_idx) for _ in range(self._config.process_count)] def get_queue_len(self) -> int: return self._in_queue.qsize() def get_active_worker_count(self) -> int: return sum([worker.is_working() for worker in self._workers]) def start(self): for worker in self._workers: worker.start() def shutdown(self): for worker in self._workers: if (not worker.is_alive()): continue worker.terminate() def queue_analysis(self, file_object: FileObject): dependencies = {} for dependency in self._plugin.metadata.dependencies: Schema = self._schemata[dependency] result = file_object.processed_analysis[dependency]['result'] dependencies[dependency] = Schema(**result) logging.debug(f'Queueing analysis for {file_object.uid}') self._in_queue.put(PluginRunner.Task(virtual_file_path=file_object.virtual_file_path, path=self._fsorganizer.generate_path(file_object), dependencies=dependencies, scheduler_state=file_object))
def test_config_snapshotting_span_compression_drop_exit_span(elasticapm_client): elasticapm_client.config.update(version='1', exit_span_min_duration='10ms') elasticapm_client.begin_transaction('foo') elasticapm_client.config.update(version='2', exit_span_min_duration='0ms') with elasticapm.capture_span('x', leaf=True, span_type='a', span_subtype='b', span_action='c', extra={'destination': {'service': {'resource': 'x'}}}, duration=0.005): pass elasticapm_client.end_transaction() spans = elasticapm_client.events[SPAN] assert (len(spans) == 0)
class OptionSeriesBulletSonificationTracks(Options): def activeWhen(self) -> 'OptionSeriesBulletSonificationTracksActivewhen': return self._config_sub_data('activeWhen', OptionSeriesBulletSonificationTracksActivewhen) def instrument(self): return self._config_get('piano') def instrument(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionSeriesBulletSonificationTracksMapping': return self._config_sub_data('mapping', OptionSeriesBulletSonificationTracksMapping) def midiName(self): return self._config_get(None) def midiName(self, text: str): self._config(text, js_type=False) def pointGrouping(self) -> 'OptionSeriesBulletSonificationTracksPointgrouping': return self._config_sub_data('pointGrouping', OptionSeriesBulletSonificationTracksPointgrouping) def roundToMusicalNotes(self): return self._config_get(True) def roundToMusicalNotes(self, flag: bool): self._config(flag, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def type(self): return self._config_get('instrument') def type(self, text: str): self._config(text, js_type=False)
class OptionSeriesSankeyOnpointConnectoroptions(Options): def dashstyle(self): return self._config_get(None) def dashstyle(self, text: str): self._config(text, js_type=False) def stroke(self): return self._config_get(None) def stroke(self, text: str): self._config(text, js_type=False) def width(self): return self._config_get(1) def width(self, num: float): self._config(num, js_type=False)
def _parent_SK_to_lamport_PK(*, parent_SK: int, index: int) -> bytes: salt = index.to_bytes(4, byteorder='big') IKM = parent_SK.to_bytes(32, byteorder='big') lamport_0 = _IKM_to_lamport_SK(IKM=IKM, salt=salt) not_IKM = _flip_bits_256(parent_SK).to_bytes(32, byteorder='big') lamport_1 = _IKM_to_lamport_SK(IKM=not_IKM, salt=salt) lamport_SKs = (lamport_0 + lamport_1) lamport_PKs = [SHA256(sk) for sk in lamport_SKs] compressed_PK = SHA256(b''.join(lamport_PKs)) return compressed_PK
def to_actions(dp, acts): inst = [] actions = [] ofp = dp.ofproto parser = dp.ofproto_parser for a in acts: action = to_action(dp, a) if (action is not None): actions.append(action) else: action_type = a.get('type') if (action_type == 'WRITE_ACTIONS'): write_actions = [] write_acts = a.get('actions') for act in write_acts: action = to_action(dp, act) if (action is not None): write_actions.append(action) else: LOG.error('Unknown action type: %s', action_type) if write_actions: inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS, write_actions)) elif (action_type == 'CLEAR_ACTIONS'): inst.append(parser.OFPInstructionActions(ofp.OFPIT_CLEAR_ACTIONS, [])) elif (action_type == 'GOTO_TABLE'): table_id = UTIL.ofp_table_from_user(a.get('table_id')) inst.append(parser.OFPInstructionGotoTable(table_id)) elif (action_type == 'WRITE_METADATA'): metadata = str_to_int(a.get('metadata')) metadata_mask = (str_to_int(a['metadata_mask']) if ('metadata_mask' in a) else parser.UINT64_MAX) inst.append(parser.OFPInstructionWriteMetadata(metadata, metadata_mask)) else: LOG.error('Unknown action type: %s', action_type) if actions: inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions)) return inst
def perform_rollout_seeding_test(hydra_overrides_sequential: Dict[(str, str)], hydra_overrides_parallel: Dict[(str, str)]) -> None: sequential_writer = LogStatsWriterExtract() register_log_stats_writer(sequential_writer) hydra_overrides_sequential.update({'runner': 'sequential', 'runner.n_episodes': 8}) run_maze_job(hydra_overrides_sequential, config_module='maze.conf', config_name='conf_rollout') sequential_data = copy.deepcopy(sequential_writer.data) parallel_writer = LogStatsWriterExtract() register_log_stats_writer(parallel_writer) hydra_overrides_parallel.update({'runner': 'parallel', 'runner.n_episodes': 8, 'runner.n_processes': 2}) run_maze_job(hydra_overrides_parallel, config_module='maze.conf', config_name='conf_rollout') parallel_data = copy.deepcopy(parallel_writer.data) for kk in sequential_data.keys(): assert np.isclose(sequential_data[kk], parallel_data[kk]), f'Not equal stats: {kk} -> {sequential_data[kk]} vs {parallel_data[kk]}'
class OptionPlotoptionsWordcloudSonificationDefaultinstrumentoptionsMappingHighpassResonance(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class Migration(migrations.Migration): initial = True dependencies = [] operations = [migrations.CreateModel(name='SourceAssistanceTransaction', fields=[('published_award_financial_assistance_id', models.IntegerField(help_text='surrogate primary key defined in Broker', primary_key=True, serialize=False)), ('afa_generated_unique', models.TextField(help_text='natural key defined in Broker', unique=True)), ('action_date', models.TextField(blank=True, null=True)), ('action_type', models.TextField(blank=True, null=True)), ('action_type_description', models.TextField(blank=True, null=True)), ('assistance_type', models.TextField(blank=True, null=True)), ('assistance_type_desc', models.TextField(blank=True, null=True)), ('award_description', models.TextField(blank=True, null=True)), ('award_modification_amendme', models.TextField(blank=True, null=True)), ('awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('awarding_agency_code', models.TextField(blank=True, null=True)), ('awarding_agency_name', models.TextField(blank=True, null=True)), ('awarding_office_code', models.TextField(blank=True, null=True)), ('awarding_office_name', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_c', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_n', models.TextField(blank=True, null=True)), ('business_categories', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=None, null=True, size=None)), ('business_funds_ind_desc', models.TextField(blank=True, null=True)), ('business_funds_indicator', models.TextField(blank=True, null=True)), ('business_types', models.TextField(blank=True, null=True)), ('business_types_desc', models.TextField(blank=True, null=True)), ('cfda_number', models.TextField(blank=True, null=True)), ('cfda_title', models.TextField(blank=True, null=True)), ('correction_delete_ind_desc', models.TextField(blank=True, null=True)), ('correction_delete_indicatr', models.TextField(blank=True, null=True)), ('created_at', usaspending_api.common.custom_django_fields.NaiveTimestampField(blank=True, help_text='record creation datetime in Broker', null=True)), ('face_value_loan_guarantee', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('fain', models.TextField(blank=True, db_index=True, null=True)), ('federal_action_obligation', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('fiscal_year_and_quarter_co', models.TextField(blank=True, null=True)), ('funding_agency_code', models.TextField(blank=True, null=True)), ('funding_agency_name', models.TextField(blank=True, null=True)), ('funding_office_code', models.TextField(blank=True, null=True)), ('funding_office_name', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_co', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_na', models.TextField(blank=True, null=True)), ('high_comp_officer1_amount', models.TextField(blank=True, null=True)), ('high_comp_officer1_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer2_amount', models.TextField(blank=True, null=True)), ('high_comp_officer2_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer3_amount', models.TextField(blank=True, null=True)), ('high_comp_officer3_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer4_amount', models.TextField(blank=True, null=True)), ('high_comp_officer4_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer5_amount', models.TextField(blank=True, null=True)), ('high_comp_officer5_full_na', models.TextField(blank=True, null=True)), ('is_active', usaspending_api.common.custom_django_fields.BooleanFieldWithDefault()), ('is_historical', models.BooleanField(null=True, blank=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_line3', models.TextField(blank=True, null=True)), ('legal_entity_city_code', models.TextField(blank=True, null=True)), ('legal_entity_city_name', models.TextField(blank=True, null=True)), ('legal_entity_congressional', models.TextField(blank=True, null=True)), ('legal_entity_country_code', models.TextField(blank=True, null=True)), ('legal_entity_country_name', models.TextField(blank=True, null=True)), ('legal_entity_county_code', models.TextField(blank=True, null=True)), ('legal_entity_county_name', models.TextField(blank=True, null=True)), ('legal_entity_foreign_city', models.TextField(blank=True, null=True)), ('legal_entity_foreign_descr', models.TextField(blank=True, null=True)), ('legal_entity_foreign_posta', models.TextField(blank=True, null=True)), ('legal_entity_foreign_provi', models.TextField(blank=True, null=True)), ('legal_entity_state_code', models.TextField(blank=True, null=True)), ('legal_entity_state_name', models.TextField(blank=True, null=True)), ('legal_entity_zip5', models.TextField(blank=True, null=True)), ('legal_entity_zip_last4', models.TextField(blank=True, null=True)), ('modified_at', usaspending_api.common.custom_django_fields.NaiveTimestampField(blank=True, null=True)), ('non_federal_funding_amount', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('original_loan_subsidy_cost', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('period_of_performance_curr', models.TextField(blank=True, null=True)), ('period_of_performance_star', models.TextField(blank=True, null=True)), ('place_of_perfor_state_code', models.TextField(blank=True, null=True)), ('place_of_perform_country_c', models.TextField(blank=True, null=True)), ('place_of_perform_country_n', models.TextField(blank=True, null=True)), ('place_of_perform_county_co', models.TextField(blank=True, null=True)), ('place_of_perform_county_na', models.TextField(blank=True, null=True)), ('place_of_perform_state_nam', models.TextField(blank=True, null=True)), ('place_of_perform_zip_last4', models.TextField(blank=True, null=True)), ('place_of_performance_city', models.TextField(blank=True, null=True)), ('place_of_performance_code', models.TextField(blank=True, null=True)), ('place_of_performance_congr', models.TextField(blank=True, null=True)), ('place_of_performance_forei', models.TextField(blank=True, null=True)), ('place_of_performance_zip4a', models.TextField(blank=True, null=True)), ('place_of_performance_zip5', models.TextField(blank=True, null=True)), ('place_of_performance_scope', models.TextField(blank=True, null=True)), ('record_type', models.IntegerField(blank=True, null=True)), ('record_type_description', models.TextField(blank=True, null=True)), ('sai_number', models.TextField(blank=True, null=True)), ('submission_id', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('total_funding_amount', models.TextField(blank=True, null=True)), ('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('unique_award_key', models.TextField(db_index=True, null=True)), ('updated_at', usaspending_api.common.custom_django_fields.NaiveTimestampField(blank=True, help_text='record last update datetime in Broker', null=True)), ('uri', models.TextField(blank=True, db_index=True, null=True))], options={'db_table': 'source_assistance_transaction'}), migrations.CreateModel(name='SourceProcurementTransaction', fields=[('detached_award_procurement_id', models.IntegerField(help_text='surrogate primary key defined in Broker', primary_key=True, serialize=False)), ('detached_award_proc_unique', models.TextField(help_text='natural key defined in Broker', unique=True)), ('a_76_fair_act_action', models.TextField(blank=True, null=True)), ('a_76_fair_act_action_desc', models.TextField(blank=True, null=True)), ('action_date', models.TextField(blank=True, null=True)), ('action_type', models.TextField(blank=True, null=True)), ('action_type_description', models.TextField(blank=True, null=True)), ('additional_reporting', models.TextField(blank=True, null=True)), ('agency_id', models.TextField(blank=True, null=True)), ('airport_authority', models.TextField(blank=True, null=True)), ('alaskan_native_owned_corpo', models.TextField(blank=True, null=True)), ('alaskan_native_servicing_i', models.TextField(blank=True, null=True)), ('american_indian_owned_busi', models.TextField(blank=True, null=True)), ('annual_revenue', models.TextField(blank=True, null=True)), ('asian_pacific_american_own', models.TextField(blank=True, null=True)), ('award_description', models.TextField(blank=True, null=True)), ('award_modification_amendme', models.TextField(blank=True, null=True)), ('award_or_idv_flag', models.TextField(blank=True, null=True)), ('awardee_or_recipient_legal', models.TextField(blank=True, null=True)), ('awardee_or_recipient_uniqu', models.TextField(blank=True, null=True)), ('awarding_agency_code', models.TextField(blank=True, null=True)), ('awarding_agency_name', models.TextField(blank=True, null=True)), ('awarding_office_code', models.TextField(blank=True, null=True)), ('awarding_office_name', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_c', models.TextField(blank=True, null=True)), ('awarding_sub_tier_agency_n', models.TextField(blank=True, null=True)), ('base_and_all_options_value', models.TextField(blank=True, null=True)), ('base_exercised_options_val', models.TextField(blank=True, null=True)), ('black_american_owned_busin', models.TextField(blank=True, null=True)), ('business_categories', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=None, null=True, size=None)), ('c1862_land_grant_college', models.TextField(blank=True, null=True)), ('c1890_land_grant_college', models.TextField(blank=True, null=True)), ('c1994_land_grant_college', models.TextField(blank=True, null=True)), ('c8a_program_participant', models.TextField(blank=True, null=True)), ('cage_code', models.TextField(blank=True, null=True)), ('city_local_government', models.TextField(blank=True, null=True)), ('clinger_cohen_act_pla_desc', models.TextField(blank=True, null=True)), ('clinger_cohen_act_planning', models.TextField(blank=True, null=True)), ('commercial_item_acqui_desc', models.TextField(blank=True, null=True)), ('commercial_item_acquisitio', models.TextField(blank=True, null=True)), ('commercial_item_test_desc', models.TextField(blank=True, null=True)), ('commercial_item_test_progr', models.TextField(blank=True, null=True)), ('community_developed_corpor', models.TextField(blank=True, null=True)), ('community_development_corp', models.TextField(blank=True, null=True)), ('consolidated_contract', models.TextField(blank=True, null=True)), ('consolidated_contract_desc', models.TextField(blank=True, null=True)), ('construction_wage_rat_desc', models.TextField(blank=True, null=True)), ('construction_wage_rate_req', models.TextField(blank=True, null=True)), ('contingency_humanitar_desc', models.TextField(blank=True, null=True)), ('contingency_humanitarian_o', models.TextField(blank=True, null=True)), ('contract_award_type', models.TextField(blank=True, null=True)), ('contract_award_type_desc', models.TextField(blank=True, null=True)), ('contract_bundling', models.TextField(blank=True, null=True)), ('contract_bundling_descrip', models.TextField(blank=True, null=True)), ('contract_financing', models.TextField(blank=True, null=True)), ('contract_financing_descrip', models.TextField(blank=True, null=True)), ('contracting_officers_desc', models.TextField(blank=True, null=True)), ('contracting_officers_deter', models.TextField(blank=True, null=True)), ('contracts', models.TextField(blank=True, null=True)), ('corporate_entity_not_tax_e', models.TextField(blank=True, null=True)), ('corporate_entity_tax_exemp', models.TextField(blank=True, null=True)), ('cost_accounting_stand_desc', models.TextField(blank=True, null=True)), ('cost_accounting_standards', models.TextField(blank=True, null=True)), ('cost_or_pricing_data', models.TextField(blank=True, null=True)), ('cost_or_pricing_data_desc', models.TextField(blank=True, null=True)), ('council_of_governments', models.TextField(blank=True, null=True)), ('country_of_product_or_desc', models.TextField(blank=True, null=True)), ('country_of_product_or_serv', models.TextField(blank=True, null=True)), ('county_local_government', models.TextField(blank=True, null=True)), ('created_at', usaspending_api.common.custom_django_fields.NaiveTimestampField(blank=True, help_text='record creation datetime in Broker', null=True)), ('current_total_value_award', models.TextField(blank=True, null=True)), ('division_name', models.TextField(blank=True, null=True)), ('division_number_or_office', models.TextField(blank=True, null=True)), ('dod_claimant_prog_cod_desc', models.TextField(blank=True, null=True)), ('dod_claimant_program_code', models.TextField(blank=True, null=True)), ('domestic_or_foreign_e_desc', models.TextField(blank=True, null=True)), ('domestic_or_foreign_entity', models.TextField(blank=True, null=True)), ('domestic_shelter', models.TextField(blank=True, null=True)), ('dot_certified_disadvantage', models.TextField(blank=True, null=True)), ('economically_disadvantaged', models.TextField(blank=True, null=True)), ('educational_institution', models.TextField(blank=True, null=True)), ('emerging_small_business', models.TextField(blank=True, null=True)), ('epa_designated_produc_desc', models.TextField(blank=True, null=True)), ('epa_designated_product', models.TextField(blank=True, null=True)), ('evaluated_preference', models.TextField(blank=True, null=True)), ('evaluated_preference_desc', models.TextField(blank=True, null=True)), ('extent_compete_description', models.TextField(blank=True, null=True)), ('extent_competed', models.TextField(blank=True, null=True)), ('fair_opportunity_limi_desc', models.TextField(blank=True, null=True)), ('fair_opportunity_limited_s', models.TextField(blank=True, null=True)), ('fed_biz_opps', models.TextField(blank=True, null=True)), ('fed_biz_opps_description', models.TextField(blank=True, null=True)), ('federal_action_obligation', usaspending_api.common.custom_django_fields.NumericField(blank=True, null=True)), ('federal_agency', models.TextField(blank=True, null=True)), ('federally_funded_research', models.TextField(blank=True, null=True)), ('for_profit_organization', models.TextField(blank=True, null=True)), ('foreign_funding', models.TextField(blank=True, null=True)), ('foreign_funding_desc', models.TextField(blank=True, null=True)), ('foreign_government', models.TextField(blank=True, null=True)), ('foreign_owned_and_located', models.TextField(blank=True, null=True)), ('foundation', models.TextField(blank=True, null=True)), ('funding_agency_code', models.TextField(blank=True, null=True)), ('funding_agency_name', models.TextField(blank=True, null=True)), ('funding_office_code', models.TextField(blank=True, null=True)), ('funding_office_name', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_co', models.TextField(blank=True, null=True)), ('funding_sub_tier_agency_na', models.TextField(blank=True, null=True)), ('government_furnished_desc', models.TextField(blank=True, null=True)), ('government_furnished_prope', models.TextField(blank=True, null=True)), ('grants', models.TextField(blank=True, null=True)), ('high_comp_officer1_amount', models.TextField(blank=True, null=True)), ('high_comp_officer1_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer2_amount', models.TextField(blank=True, null=True)), ('high_comp_officer2_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer3_amount', models.TextField(blank=True, null=True)), ('high_comp_officer3_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer4_amount', models.TextField(blank=True, null=True)), ('high_comp_officer4_full_na', models.TextField(blank=True, null=True)), ('high_comp_officer5_amount', models.TextField(blank=True, null=True)), ('high_comp_officer5_full_na', models.TextField(blank=True, null=True)), ('hispanic_american_owned_bu', models.TextField(blank=True, null=True)), ('hispanic_servicing_institu', models.TextField(blank=True, null=True)), ('historically_black_college', models.TextField(blank=True, null=True)), ('historically_underutilized', models.TextField(blank=True, null=True)), ('hospital_flag', models.TextField(blank=True, null=True)), ('housing_authorities_public', models.TextField(blank=True, null=True)), ('idv_type', models.TextField(blank=True, null=True)), ('idv_type_description', models.TextField(blank=True, null=True)), ('indian_tribe_federally_rec', models.TextField(blank=True, null=True)), ('information_technolog_desc', models.TextField(blank=True, null=True)), ('information_technology_com', models.TextField(blank=True, null=True)), ('inherently_government_desc', models.TextField(blank=True, null=True)), ('inherently_government_func', models.TextField(blank=True, null=True)), ('initial_report_date', models.TextField(blank=True, null=True)), ('inter_municipal_local_gove', models.TextField(blank=True, null=True)), ('interagency_contract_desc', models.TextField(blank=True, null=True)), ('interagency_contracting_au', models.TextField(blank=True, null=True)), ('international_organization', models.TextField(blank=True, null=True)), ('interstate_entity', models.TextField(blank=True, null=True)), ('joint_venture_economically', models.TextField(blank=True, null=True)), ('joint_venture_women_owned', models.TextField(blank=True, null=True)), ('labor_standards', models.TextField(blank=True, null=True)), ('labor_standards_descrip', models.TextField(blank=True, null=True)), ('labor_surplus_area_firm', models.TextField(blank=True, null=True)), ('last_modified', models.TextField(blank=True, null=True)), ('legal_entity_address_line1', models.TextField(blank=True, null=True)), ('legal_entity_address_line2', models.TextField(blank=True, null=True)), ('legal_entity_address_line3', models.TextField(blank=True, null=True)), ('legal_entity_city_name', models.TextField(blank=True, null=True)), ('legal_entity_congressional', models.TextField(blank=True, null=True)), ('legal_entity_country_code', models.TextField(blank=True, null=True)), ('legal_entity_country_name', models.TextField(blank=True, null=True)), ('legal_entity_county_code', models.TextField(blank=True, null=True)), ('legal_entity_county_name', models.TextField(blank=True, null=True)), ('legal_entity_state_code', models.TextField(blank=True, null=True)), ('legal_entity_state_descrip', models.TextField(blank=True, null=True)), ('legal_entity_zip4', models.TextField(blank=True, null=True)), ('legal_entity_zip5', models.TextField(blank=True, null=True)), ('legal_entity_zip_last4', models.TextField(blank=True, null=True)), ('limited_liability_corporat', models.TextField(blank=True, null=True)), ('local_area_set_aside', models.TextField(blank=True, null=True)), ('local_area_set_aside_desc', models.TextField(blank=True, null=True)), ('local_government_owned', models.TextField(blank=True, null=True)), ('major_program', models.TextField(blank=True, null=True)), ('manufacturer_of_goods', models.TextField(blank=True, null=True)), ('materials_supplies_article', models.TextField(blank=True, null=True)), ('materials_supplies_descrip', models.TextField(blank=True, null=True)), ('minority_institution', models.TextField(blank=True, null=True)), ('minority_owned_business', models.TextField(blank=True, null=True)), ('multi_year_contract', models.TextField(blank=True, null=True)), ('multi_year_contract_desc', models.TextField(blank=True, null=True)), ('multiple_or_single_aw_desc', models.TextField(blank=True, null=True)), ('multiple_or_single_award_i', models.TextField(blank=True, null=True)), ('municipality_local_governm', models.TextField(blank=True, null=True)), ('naics', models.TextField(blank=True, null=True)), ('naics_description', models.TextField(blank=True, null=True)), ('national_interest_action', models.TextField(blank=True, null=True)), ('national_interest_desc', models.TextField(blank=True, null=True)), ('native_american_owned_busi', models.TextField(blank=True, null=True)), ('native_hawaiian_owned_busi', models.TextField(blank=True, null=True)), ('native_hawaiian_servicing', models.TextField(blank=True, null=True)), ('nonprofit_organization', models.TextField(blank=True, null=True)), ('number_of_actions', models.TextField(blank=True, null=True)), ('number_of_employees', models.TextField(blank=True, null=True)), ('number_of_offers_received', models.TextField(blank=True, null=True)), ('ordering_period_end_date', models.TextField(blank=True, null=True)), ('organizational_type', models.TextField(blank=True, null=True)), ('other_minority_owned_busin', models.TextField(blank=True, null=True)), ('other_not_for_profit_organ', models.TextField(blank=True, null=True)), ('other_statutory_authority', models.TextField(blank=True, null=True)), ('other_than_full_and_o_desc', models.TextField(blank=True, null=True)), ('other_than_full_and_open_c', models.TextField(blank=True, null=True)), ('parent_award_id', models.TextField(blank=True, null=True)), ('partnership_or_limited_lia', models.TextField(blank=True, null=True)), ('performance_based_se_desc', models.TextField(blank=True, null=True)), ('performance_based_service', models.TextField(blank=True, null=True)), ('period_of_perf_potential_e', models.TextField(blank=True, null=True)), ('period_of_performance_curr', models.TextField(blank=True, null=True)), ('period_of_performance_star', models.TextField(blank=True, null=True)), ('piid', models.TextField(blank=True, null=True)), ('place_of_manufacture', models.TextField(blank=True, null=True)), ('place_of_manufacture_desc', models.TextField(blank=True, null=True)), ('place_of_perf_country_desc', models.TextField(blank=True, null=True)), ('place_of_perfor_state_desc', models.TextField(blank=True, null=True)), ('place_of_perform_city_name', models.TextField(blank=True, null=True)), ('place_of_perform_country_c', models.TextField(blank=True, null=True)), ('place_of_perform_country_n', models.TextField(blank=True, null=True)), ('place_of_perform_county_co', models.TextField(blank=True, null=True)), ('place_of_perform_county_na', models.TextField(blank=True, null=True)), ('place_of_perform_state_nam', models.TextField(blank=True, null=True)), ('place_of_perform_zip_last4', models.TextField(blank=True, null=True)), ('place_of_performance_congr', models.TextField(blank=True, null=True)), ('place_of_performance_locat', models.TextField(blank=True, null=True)), ('place_of_performance_state', models.TextField(blank=True, null=True)), ('place_of_performance_zip4a', models.TextField(blank=True, null=True)), ('place_of_performance_zip5', models.TextField(blank=True, null=True)), ('planning_commission', models.TextField(blank=True, null=True)), ('port_authority', models.TextField(blank=True, null=True)), ('potential_total_value_awar', models.TextField(blank=True, null=True)), ('price_evaluation_adjustmen', models.TextField(blank=True, null=True)), ('private_university_or_coll', models.TextField(blank=True, null=True)), ('product_or_service_co_desc', models.TextField(blank=True, null=True)), ('product_or_service_code', models.TextField(blank=True, null=True)), ('program_acronym', models.TextField(blank=True, null=True)), ('program_system_or_equ_desc', models.TextField(blank=True, null=True)), ('program_system_or_equipmen', models.TextField(blank=True, null=True)), ('pulled_from', models.TextField(blank=True, null=True)), ('purchase_card_as_paym_desc', models.TextField(blank=True, null=True)), ('purchase_card_as_payment_m', models.TextField(blank=True, null=True)), ('receives_contracts_and_gra', models.TextField(blank=True, null=True)), ('recovered_materials_s_desc', models.TextField(blank=True, null=True)), ('recovered_materials_sustai', models.TextField(blank=True, null=True)), ('referenced_idv_agency_desc', models.TextField(blank=True, null=True)), ('referenced_idv_agency_iden', models.TextField(blank=True, null=True)), ('referenced_idv_agency_name', models.TextField(blank=True, null=True)), ('referenced_idv_modificatio', models.TextField(blank=True, null=True)), ('referenced_idv_type', models.TextField(blank=True, null=True)), ('referenced_idv_type_desc', models.TextField(blank=True, null=True)), ('referenced_mult_or_si_desc', models.TextField(blank=True, null=True)), ('referenced_mult_or_single', models.TextField(blank=True, null=True)), ('research', models.TextField(blank=True, null=True)), ('research_description', models.TextField(blank=True, null=True)), ('sam_exception', models.TextField(blank=True, null=True)), ('sam_exception_description', models.TextField(blank=True, null=True)), ('sba_certified_8_a_joint_ve', models.TextField(blank=True, null=True)), ('school_district_local_gove', models.TextField(blank=True, null=True)), ('school_of_forestry', models.TextField(blank=True, null=True)), ('sea_transportation', models.TextField(blank=True, null=True)), ('sea_transportation_desc', models.TextField(blank=True, null=True)), ('self_certified_small_disad', models.TextField(blank=True, null=True)), ('service_disabled_veteran_o', models.TextField(blank=True, null=True)), ('small_agricultural_coopera', models.TextField(blank=True, null=True)), ('small_business_competitive', models.TextField(blank=True, null=True)), ('small_disadvantaged_busine', models.TextField(blank=True, null=True)), ('sole_proprietorship', models.TextField(blank=True, null=True)), ('solicitation_date', models.TextField(blank=True, null=True)), ('solicitation_identifier', models.TextField(blank=True, null=True)), ('solicitation_procedur_desc', models.TextField(blank=True, null=True)), ('solicitation_procedures', models.TextField(blank=True, null=True)), ('state_controlled_instituti', models.TextField(blank=True, null=True)), ('subchapter_s_corporation', models.TextField(blank=True, null=True)), ('subcontinent_asian_asian_i', models.TextField(blank=True, null=True)), ('subcontracting_plan', models.TextField(blank=True, null=True)), ('subcontracting_plan_desc', models.TextField(blank=True, null=True)), ('the_ability_one_program', models.TextField(blank=True, null=True)), ('total_obligated_amount', models.TextField(blank=True, null=True)), ('township_local_government', models.TextField(blank=True, null=True)), ('transaction_number', models.TextField(blank=True, null=True)), ('transit_authority', models.TextField(blank=True, null=True)), ('tribal_college', models.TextField(blank=True, null=True)), ('tribally_owned_business', models.TextField(blank=True, null=True)), ('type_of_contract_pric_desc', models.TextField(blank=True, null=True)), ('type_of_contract_pricing', models.TextField(blank=True, null=True)), ('type_of_idc', models.TextField(blank=True, null=True)), ('type_of_idc_description', models.TextField(blank=True, null=True)), ('type_set_aside', models.TextField(blank=True, null=True)), ('type_set_aside_description', models.TextField(blank=True, null=True)), ('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)), ('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)), ('undefinitized_action', models.TextField(blank=True, null=True)), ('undefinitized_action_desc', models.TextField(blank=True, null=True)), ('unique_award_key', models.TextField(blank=True, null=True)), ('updated_at', usaspending_api.common.custom_django_fields.NaiveTimestampField(blank=True, help_text='record last update datetime in Broker', null=True)), ('us_federal_government', models.TextField(blank=True, null=True)), ('us_government_entity', models.TextField(blank=True, null=True)), ('us_local_government', models.TextField(blank=True, null=True)), ('us_state_government', models.TextField(blank=True, null=True)), ('us_tribal_government', models.TextField(blank=True, null=True)), ('vendor_alternate_name', models.TextField(blank=True, null=True)), ('vendor_alternate_site_code', models.TextField(blank=True, null=True)), ('vendor_doing_as_business_n', models.TextField(blank=True, null=True)), ('vendor_enabled', models.TextField(blank=True, null=True)), ('vendor_fax_number', models.TextField(blank=True, null=True)), ('vendor_legal_org_name', models.TextField(blank=True, null=True)), ('vendor_location_disabled_f', models.TextField(blank=True, null=True)), ('vendor_phone_number', models.TextField(blank=True, null=True)), ('vendor_site_code', models.TextField(blank=True, null=True)), ('veteran_owned_business', models.TextField(blank=True, null=True)), ('veterinary_college', models.TextField(blank=True, null=True)), ('veterinary_hospital', models.TextField(blank=True, null=True)), ('woman_owned_business', models.TextField(blank=True, null=True)), ('women_owned_small_business', models.TextField(blank=True, null=True))], options={'db_table': 'source_procurement_transaction'})]
def upgrade(): op.drop_constraint(u'call_for_papers_event_id_fkey', 'call_for_papers', type_='foreignkey') op.create_foreign_key(None, 'call_for_papers', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'custom_forms_event_id_fkey', 'custom_forms', type_='foreignkey') op.create_foreign_key(None, 'custom_forms', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'eventsusers_event_id_fkey', 'eventsusers', type_='foreignkey') op.create_foreign_key(None, 'eventsusers', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'invites_event_id_fkey', 'invites', type_='foreignkey') op.create_foreign_key(None, 'invites', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'microlocation_event_id_fkey', 'microlocation', type_='foreignkey') op.create_foreign_key(None, 'microlocation', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'session_event_id_fkey', 'session', type_='foreignkey') op.create_foreign_key(None, 'session', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'session_type_event_id_fkey', 'session_type', type_='foreignkey') op.create_foreign_key(None, 'session_type', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'social_link_event_id_fkey', 'social_link', type_='foreignkey') op.create_foreign_key(None, 'social_link', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'speaker_event_id_fkey', 'speaker', type_='foreignkey') op.create_foreign_key(None, 'speaker', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'sponsor_type_event_id_fkey', 'sponsor_type', type_='foreignkey') op.create_foreign_key(None, 'sponsor_type', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'sponsors_event_id_fkey', 'sponsors', type_='foreignkey') op.create_foreign_key(None, 'sponsors', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'tracks_event_id_fkey', 'tracks', type_='foreignkey') op.create_foreign_key(None, 'tracks', 'events', ['event_id'], ['id'], ondelete='CASCADE') op.drop_constraint(u'users_events_roles_event_id_fkey', 'users_events_roles', type_='foreignkey') op.create_foreign_key(None, 'users_events_roles', 'events', ['event_id'], ['id'], ondelete='CASCADE')
class TestLedgerApiHandler(BaseSkillTestCase): path_to_skill = Path(ROOT_DIR, 'packages', 'fetchai', 'skills', 'ml_data_provider') is_agent_to_agent_messages = False def setup(cls): super().setup() cls.ledger_api_handler = cast(LedgerApiHandler, cls._skill.skill_context.handlers.ledger_api) cls.strategy = cast(Strategy, cls._skill.skill_context.strategy) cls.ml_dialogues = cast(MlTradeDialogues, cls._skill.skill_context.ml_trade_dialogues) cls.ledger_api_dialogues = cast(LedgerApiDialogues, cls._skill.skill_context.ledger_api_dialogues) cls.logger = cls._skill.skill_context.logger cls.ledger_id = 'FET' cls.transaction_digest = TransactionDigest('some_ledger_id', 'some_body') cls.transaction_receipt = TransactionReceipt('some_ledger_id', {'some_key': 'some_value'}, {'some_key': 'some_value'}) cls.list_of_ledger_api_messages = (DialogueMessage(LedgerApiMessage.Performative.GET_TRANSACTION_RECEIPT, {'transaction_digest': cls.transaction_digest}), DialogueMessage(LedgerApiMessage.Performative.TRANSACTION_RECEIPT, {'transaction_receipt': cls.transaction_receipt})) def test_setup(self): assert (self.ledger_api_handler.setup() is None) self.assert_quantity_in_outbox(0) def test_handle_unidentified_dialogue(self): incorrect_dialogue_reference = ('', '') incoming_message = self.build_incoming_message(message_type=LedgerApiMessage, dialogue_reference=incorrect_dialogue_reference, performative=LedgerApiMessage.Performative.GET_BALANCE, ledger_id='some_ledger_id', address='some_address') with patch.object(self.ledger_api_handler.context.logger, 'log') as mock_logger: self.ledger_api_handler.handle(incoming_message) mock_logger.assert_any_call(logging.INFO, f'received invalid ledger_api message={incoming_message}, unidentified dialogue.') def test_handle_balance(self): ledger_api_dialogue = cast(LedgerApiDialogue, self.prepare_skill_dialogue(dialogues=self.ledger_api_dialogues, messages=(DialogueMessage(LedgerApiMessage.Performative.GET_BALANCE, {'ledger_id': 'some_ledger_id', 'address': 'some_address'}),))) incoming_message = cast(LedgerApiMessage, self.build_incoming_message_for_skill_dialogue(dialogue=ledger_api_dialogue, performative=LedgerApiMessage.Performative.BALANCE, ledger_id=self.ledger_id, balance=10)) with patch.object(self.logger, 'log') as mock_logger: self.ledger_api_handler.handle(incoming_message) mock_logger.assert_any_call(logging.INFO, f'starting balance on {self.ledger_id} ledger={incoming_message.balance}.') def test_handle_error(self): ledger_api_dialogue = self.prepare_skill_dialogue(dialogues=self.ledger_api_dialogues, messages=self.list_of_ledger_api_messages[:1]) incoming_message = cast(LedgerApiMessage, self.build_incoming_message_for_skill_dialogue(dialogue=ledger_api_dialogue, performative=LedgerApiMessage.Performative.ERROR, code=1)) with patch.object(self.logger, 'log') as mock_logger: self.ledger_api_handler.handle(incoming_message) mock_logger.assert_any_call(logging.INFO, f'received ledger_api error message={incoming_message} in dialogue={ledger_api_dialogue}.') def test_handle_invalid(self): invalid_performative = LedgerApiMessage.Performative.GET_BALANCE incoming_message = self.build_incoming_message(message_type=LedgerApiMessage, dialogue_reference=('1', ''), performative=invalid_performative, ledger_id='some_ledger_id', address='some_address', to=str(self.skill.skill_context.skill_id)) with patch.object(self.logger, 'log') as mock_logger: self.ledger_api_handler.handle(incoming_message) mock_logger.assert_any_call(logging.WARNING, f'cannot handle ledger_api message of performative={invalid_performative} in dialogue={self.ledger_api_dialogues.get_dialogue(incoming_message)}.') def test_teardown(self): assert (self.ledger_api_handler.teardown() is None) self.assert_quantity_in_outbox(0)
.parametrize('date, name, fracture', itertools.product([, ], ['SGAS', 'SOIL', 'SWAT'], [True, False])) def test_dual_runs_restart_property_to_file(dual_runs, date, name, fracture): prop = dual_runs.get_property_from_restart(name, date=date, fracture=fracture) if fracture: assert (prop.name == f'{name}F_{date}') else: assert (prop.name == f'{name}M_{date}')
('value,expected', [param('abc', 'abc', id='value:simple'), param('abc ', 'abc', id='value:simple_ws'), param(' abc', 'abc', id='ws_value:simple'), param('[1,2,3]', [1, 2, 3], id='value:list'), param('[1 ]', [1], id='value:list1_ws'), param('[1, 2, 3]', [1, 2, 3], id='value:list_ws'), param('1,2,3', ChoiceSweep(list=[1, 2, 3], simple_form=True), id='sweep:int'), param('1, 2, 3', ChoiceSweep(list=[1, 2, 3], simple_form=True), id='sweep:int_ws'), param('${a}, ${b}, ${c}', ChoiceSweep(list=['${a}', '${b}', '${c}'], simple_form=True), id='sweep:interpolations'), param('[a,b],[c,d]', ChoiceSweep(list=[['a', 'b'], ['c', 'd']], simple_form=True), id='sweep:lists'), param('true', True, id='value:bool'), param('True', True, id='value:bool'), param('TRUE', True, id='value:bool'), param('trUe', True, id='value:bool'), param('false', False, id='value:bool'), param('False', False, id='value:bool'), param('FALSE', False, id='value:bool'), param('faLse', False, id='value:bool'), param('int(10.0)', 10, id='int(10.0)'), param('str(10.0)', '10.0', id='str(10.0)'), param('bool(10.0)', True, id='bool(10.0)'), param('float(10)', 10.0, id='float(10)'), param('float(float(10))', 10.0, id='float(float(10))'), param('sort([2,3,1])', [1, 2, 3], id='sort([2,3,1])'), param('sort([2,3,1],reverse=true)', [3, 2, 1], id='sort([2,3,1],reverse=true)'), param('sort(3,2,1)', ChoiceSweep(simple_form=True, list=[1, 2, 3], tags=set()), id='sort(3,2,1)'), param('sort(a,c,b,reverse=true)', ChoiceSweep(simple_form=True, list=['c', 'b', 'a'], tags=set()), id='sort(a,c,b,reverse=true)'), param('float(sort(3,2,1))', ChoiceSweep(simple_form=True, list=[1.0, 2.0, 3.0], tags=set()), id='float(sort(3,2,1))'), param('sort(float(3,2,1))', ChoiceSweep(simple_form=True, list=[1.0, 2.0, 3.0], tags=set()), id='sort(float(3,2,1))'), param('sort(3,2,str(1))', raises(HydraException, match=re.escape("TypeError while evaluating 'sort(3,2,str(1))': '<' not supported between instances of 'str' and 'int'")), id='sort(3,2,str(1))'), param('shuffle(1,2,3)', ChoiceSweep(list=[1, 2, 3], shuffle=True, simple_form=True), id='shuffle(1,2,3)'), param('shuffle(choice(1,2,3))', ChoiceSweep(list=[1, 2, 3], shuffle=True), id='shuffle(choice(1,2,3))')]) def test_value(value: str, expected: Any) -> None: if isinstance(expected, RaisesContext): with expected: parse_rule(value, 'value') else: ret = parse_rule(value, 'value') assert (ret == expected)
def create_2d_list(text: str) -> list: lines = text.strip().split('\n') (result, sublist) = ([], []) for (i, line) in enumerate(lines): if (line.strip() == ''): continue if (((i % 7) == 0) and sublist): result.append(sublist) sublist = [] sublist.append(line.strip()) if sublist: result.append(sublist) result = [([x[0]] + [float(elem) for elem in x[1:]]) for x in result] return result
class SkillComponentTestCase(TestCase): def setUp(self): class TestComponent(SkillComponent): def parse_module(self, *args): pass def setup(self, *args): pass def teardown(self, *args): pass self.TestComponent = TestComponent def test_init_no_ctx(self): with self.assertRaises(ValueError): self.TestComponent(name='some_name', skill_context=None) with self.assertRaises(ValueError): self.TestComponent(name=None, skill_context='skill_context') def test_skill_id_positive(self): ctx = mock.Mock() ctx.skill_id = 'skill_id' component = self.TestComponent(name='name', skill_context=ctx, configuration=Mock()) component.skill_id def test_config_positive(self): component = self.TestComponent(configuration=Mock(args={}), skill_context='ctx', name='name') component.config def test_kwargs_not_empty(self): kwargs = dict(foo='bar') component_name = 'component_name' skill_context = SkillContext() with mock.patch.object(skill_context.logger, 'warning') as mock_logger: self.TestComponent(component_name, skill_context, **kwargs) mock_logger.assert_any_call(f'The kwargs={kwargs} passed to {component_name} have not been set!')
def Oranges(range, **traits): _data = dict(red=[(0.0, 1.0, 1.0), (0.125, 0., 0.), (0.25, 0., 0.), (0.375, 0., 0.), (0.5, 0., 0.), (0.625, 0., 0.), (0.75, 0., 0.), (0.875, 0., 0.), (1.0, 0., 0.)], green=[(0.0, 0., 0.), (0.125, 0., 0.), (0.25, 0., 0.), (0.375, 0., 0.), (0.5, 0., 0.), (0.625, 0., 0.), (0.75, 0., 0.), (0.875, 0., 0.), (1.0, 0., 0.)], blue=[(0.0, 0., 0.), (0.125, 0., 0.), (0.25, 0., 0.), (0.375, 0., 0.), (0.5, 0., 0.), (0.625, 0., 0.), (0.75, 0., 0.), (0.875, 0., 0.), (1.0, 0., 0.)]) return ColorMapper.from_segment_map(_data, range=range, **traits)
def get_last_invocation(region, args, function_name): logs_client = init_boto_client('logs', region, args) last_invocation = (- 1) try: logs = logs_client.describe_log_streams(logGroupName='/aws/lambda/{0}'.format(function_name), orderBy='LastEventTime', descending=True) except ClientError as _: return last_invocation log_streams_timestamp = [log.get('lastEventTimestamp', 0) for log in logs['logStreams']] if log_streams_timestamp: last_invocation = max(log_streams_timestamp) return last_invocation
def upper_key(letter): if (len(letter) != 1): return ' ' custom_alpha = {'': '', '|': '', '<': '', '>': '', '': '', '': '', '': '', '': '', '': '', '': ' '} if (letter in custom_alpha): return custom_alpha[letter] if (letter.upper() != letter.lower()): return letter.upper() return ' '
def is_plugged_in(throw_error=True): if (platform.system() == 'Windows'): if (not GetSystemPowerStatus(ctypes.pointer(powerStatus))): if throw_error: raise RuntimeError('could not get power status') return False return (powerStatus.ACLineStatus == 1) return True
class DefaultSchema(ViewInspector): def __get__(self, instance, owner): result = super().__get__(instance, owner) if (not isinstance(result, DefaultSchema)): return result inspector_class = api_settings.DEFAULT_SCHEMA_CLASS assert issubclass(inspector_class, ViewInspector), 'DEFAULT_SCHEMA_CLASS must be set to a ViewInspector (usually an AutoSchema) subclass' inspector = inspector_class() inspector.view = instance return inspector
class OptionSeriesOrganizationSonificationDefaultinstrumentoptionsMappingPitch(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get('y') def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get('c6') def max(self, text: str): self._config(text, js_type=False) def min(self): return self._config_get('c2') def min(self, text: str): self._config(text, js_type=False) def scale(self): return self._config_get(None) def scale(self, value: Any): self._config(value, js_type=False) def within(self): return self._config_get('yAxis') def within(self, text: str): self._config(text, js_type=False)
class OptionPlotoptionsWaterfallSonificationDefaultspeechoptionsActivewhen(Options): def crossingDown(self): return self._config_get(None) def crossingDown(self, num: float): self._config(num, js_type=False) def crossingUp(self): return self._config_get(None) def crossingUp(self, num: float): self._config(num, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get(None) def prop(self, text: str): self._config(text, js_type=False)
def _transform_bad_request_response_to_exception(response): if ((response.status_code == requests.codes.bad) and (response.json()['errorName'] == 'FoundrySqlServer:InvalidDatasetNoSchema')): raise DatasetHasNoSchemaError('SQL') if ((response.status_code == requests.codes.bad) and (response.json()['errorName'] == 'FoundrySqlServer:InvalidDatasetCannotAccess')): raise BranchNotFoundError(response.json()['parameters']['datasetRid'], _extract_branch_from_sql_error(response)) if ((response.status_code == requests.codes.bad) and (response.json()['errorName'] == 'FoundrySqlServer:InvalidDatasetPathNotFound')): raise DatasetNotFoundError(response.json()['parameters']['path'])
_admin_groups.command() _context def execute(ctx): options = '[*] Available options\n[1] Load harvested groups from json file and check their assigned roles for administrator permissions\n[2] Harvest all groups and check their assigned roles for administrator permissions\n[0] Exit this menu\n[*] Choose from the above options' while True: value = click.prompt(options, type=int) if (value == 1): file_path = Path(click.prompt('[*] Enter full path of file containing harvested Okta groups')) if file_path.exists(): msg = f'Attempting to check roles for groups in file, {file_path}' LOGGER.info(msg) index_event(ctx.obj.es, module=__name__, event_type='INFO', event=msg) click.echo(f'[*] {msg}') groups = load_json_file(file_path) check_assigned_roles(ctx, groups) return else: msg = f'File not found, {file_path}' LOGGER.error(msg) index_event(ctx.obj.es, module=__name__, event_type='ERROR', event=msg) click.secho(f'[!] {msg}', fg='red') elif (value == 2): if click.confirm('[*] Do you want to attempt to harvest information for all groups?', default=True): msg = 'Attempting to harvest all Okta groups' LOGGER.info(msg) index_event(ctx.obj.es, module=__name__, event_type='INFO', event=msg) click.echo(f'[*] {msg}') groups = ctx.obj.okta.get_groups(ctx) check_assigned_roles(ctx, groups) return elif (value == 0): return else: click.secho('[!] Invalid option selected', fg='red')
def test(): assert (len(pattern1) == 2), "The number of tokens in pattern1 doesn't match the real number of tokens in the string." assert (len(pattern2) == 4), "The number of tokens in pattern2 doesn't match the real number of tokens in the string." assert (len(pattern1[0]) == 1), 'The first token of pattern1 should include one attribute.' assert any(((pattern1[0].get(attr) == 'amazon') for attr in ('lower', 'LOWER'))), 'Check the attribute and value of the first token in pattern1.' assert (len(pattern1[1]) == 2), 'The second token of pattern1 should include two attributes.' assert any(((pattern1[1].get(attr) == True) for attr in ('is_title', 'IS_TITLE'))), 'Check the attributes and values of the second token in pattern1.' assert any(((pattern1[1].get(attr) == 'PROPN') for attr in ('pos', 'POS'))), 'Check the attributes and values of the second token in pattern1.' assert any(((pattern2[0].get(attr) == 'ad') for attr in ('lower', 'LOWER'))), 'Check the attribute and value of the first token in pattern2.' assert any(((pattern2[2].get(attr) == 'free') for attr in ('lower', 'LOWER'))), 'Check the attribute and value of the third token in pattern2.' assert any(((pattern2[3].get(attr) == 'NOUN') for attr in ('pos', 'POS'))), 'Check the attribute and value of the fourth token in pattern2.' assert (len(matcher(doc)) == 6), 'Incorrect number of matches expected 6.' __msg__.good("Well done! For the token '-', you can match on the attribute 'TEXT', 'LOWER' or even 'SHAPE'. All of those are correct. As you can see, paying close attention to the tokenization is very important when working with the token-based 'Matcher'. Sometimes it's much easier to just match exact strings instead and use the 'PhraseMatcher', which we'll get to in the next exercise.")
def plot_curve_wrt_time(ax, records, x_wrt_sth, y_wrt_sth, xlabel, ylabel, title=None, markevery_list=None, is_smooth=True, smooth_space=100, l_subset=0.0, r_subset=1.0, reorder_record_item=None, remove_duplicate=True, legend=None, legend_loc='lower right', legend_ncol=2, bbox_to_anchor=[0, 0], ylimit_bottom=None, ylimit_top=None, use_log=False, num_cols=3): num_records = len(records) distinct_conf_set = set() if (reorder_record_item is not None): records = reorder_records(records, based_on=reorder_record_item) count = 0 for (ind, (args, info)) in enumerate(records): _legend = build_legend(args, legend) if ((_legend in distinct_conf_set) and remove_duplicate): continue else: distinct_conf_set.add(_legend) if (';' in y_wrt_sth): has_multiple_y = True list_of_y_wrt_sth = y_wrt_sth.split(';') else: has_multiple_y = False list_of_y_wrt_sth = [y_wrt_sth] for _y_wrt_sth in list_of_y_wrt_sth: (line_style, color_style, mark_style) = determine_color_and_lines(num_rows=(num_records // num_cols), num_cols=num_cols, ind=count) if (markevery_list is not None): mark_every = markevery_list[count] else: mark_style = None mark_every = None count += 1 if (('tr_step' in x_wrt_sth) or ('tr_epoch' in x_wrt_sth)): info['tr_step'] = list(range(1, (1 + len(info['tr_loss'])))) if ('tr_epoch' == x_wrt_sth): x = info['tr_step'] x = [((1.0 * _x) / args['num_batches_train_per_device_per_epoch']) for _x in x] else: x = info[x_wrt_sth] if ('time' in x_wrt_sth): x = [((time - x[0]).seconds + 1) for time in x] y = info[_y_wrt_sth] if is_smooth: (x, y) = smoothing_func(x, y, smooth_space) (_l_subset, _r_subset) = (int((len(x) * l_subset)), int((len(x) * r_subset))) _x = x[_l_subset:_r_subset] _y = y[_l_subset:_r_subset] if use_log: _y = np.log(_y) ax = plot_one_case(ax, x=_x, y=_y, label=(_legend if (not has_multiple_y) else (_legend + f', {_y_wrt_sth}')), line_style=line_style, color_style=color_style, mark_style=mark_style, mark_every=mark_every, remove_duplicate=remove_duplicate) ax.set_ylim(bottom=ylimit_bottom, top=ylimit_top) ax = configure_figure(ax, xlabel=xlabel, ylabel=ylabel, title=title, has_legend=(legend is not None), legend_loc=legend_loc, legend_ncol=legend_ncol, bbox_to_anchor=bbox_to_anchor) return ax
def _is_valid_optional(content_type: str) -> bool: content_type = content_type.strip() if (not content_type.startswith('pt:optional')): return False if (not _has_matched_brackets(content_type)): return False if (not _has_brackets(content_type)): return False sub_types = _get_sub_types_of_compositional_types(content_type) if (len(sub_types) != 1): return False sub_type = sub_types[0] return (_is_valid_ct(sub_type) or _is_valid_pt(sub_type) or _is_valid_set(sub_type) or _is_valid_list(sub_type) or _is_valid_dict(sub_type) or _is_valid_union(sub_type))
class OptionPlotoptionsPackedbubbleStates(Options): def hover(self) -> 'OptionPlotoptionsPackedbubbleStatesHover': return self._config_sub_data('hover', OptionPlotoptionsPackedbubbleStatesHover) def inactive(self) -> 'OptionPlotoptionsPackedbubbleStatesInactive': return self._config_sub_data('inactive', OptionPlotoptionsPackedbubbleStatesInactive) def normal(self) -> 'OptionPlotoptionsPackedbubbleStatesNormal': return self._config_sub_data('normal', OptionPlotoptionsPackedbubbleStatesNormal) def select(self) -> 'OptionPlotoptionsPackedbubbleStatesSelect': return self._config_sub_data('select', OptionPlotoptionsPackedbubbleStatesSelect)
def feed(): global hunger, sick if (hunger <= 88): if (sick == True): hunger += (HUNGER_ADD // 2) _thread.start_new_thread(updateLabel, (3, (TAMA_NAME + " doesn't want to eat."))) else: hunger += HUNGER_ADD else: _thread.start_new_thread(updateLabel, (3, ("Don't overfeed " + TAMA_NAME)))
class AdDynamicCreative(AbstractObject): def __init__(self, api=None): super(AdDynamicCreative, self).__init__() self._isAdDynamicCreative = True self._api = api class Field(AbstractObject.Field): preview_url = 'preview_url' _field_types = {'preview_url': 'string'} def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
def _parse_signed_tx(signed_tx: HexBytes) -> TxParams: tx_type = signed_tx[0] if (tx_type > int('0x7f', 16)): decoded_tx = rlp.decode(signed_tx, Transaction).as_dict() else: if (tx_type == 1): sedes = AccessListTransaction._signed_transaction_serializer elif (tx_type == 2): sedes = DynamicFeeTransaction._signed_transaction_serializer else: raise ValueError(f'Unknown transaction type: {tx_type}.') decoded_tx = rlp.decode(signed_tx[1:], sedes).as_dict() decoded_tx['from'] = Account.recover_transaction(signed_tx) decoded_tx = dissoc(decoded_tx, 'v', 'r', 's') return decoded_tx
def export_hdf5_well(self, wfile, compression='lzf'): logger.debug('Export to hdf5 format...') self._ensure_consistency() self.metadata.required = self meta = self.metadata.get_metadata() jmeta = json.dumps(meta) complib = 'zlib' complevel = 5 if (compression and (compression == 'blosc')): complib = 'blosc' else: complevel = 0 with pd.HDFStore(wfile.file, 'w', complevel=complevel, complib=complib) as store: logger.debug('export to HDF5 %s', wfile.name) store.put('Well', self._wdata.data) store.get_storer('Well').attrs['metadata'] = jmeta store.get_storer('Well').attrs['provider'] = 'xtgeo' store.get_storer('Well').attrs['format_idcode'] = 1401 logger.debug('Export to hdf5 format... done!')
def dump(obj, nested_level=0, output=sys.stdout): spacing = ' ' if (type(obj) == dict): ((print >> output), ('%s{' % (nested_level * spacing))) for (k, v) in obj.items(): if hasattr(v, '__iter__'): ((print >> output), ('%s%s:' % (((nested_level + 1) * spacing), k))) dump(v, (nested_level + 1), output) else: ((print >> output), ('%s%s: %s' % (((nested_level + 1) * spacing), k, v))) ((print >> output), ('%s}' % (nested_level * spacing))) elif (type(obj) == list): ((print >> output), ('%s[' % (nested_level * spacing))) for v in obj: if hasattr(v, '__iter__'): dump(v, (nested_level + 1), output) else: ((print >> output), ('%s%s' % (((nested_level + 1) * spacing), v))) ((print >> output), ('%s]' % (nested_level * spacing))) else: ((print >> output), ('%s%s' % ((nested_level * spacing), obj)))
def test_conversion_of_ai_standard_to_red_shift_material_bump_properties(create_pymel, setup_scene): pm = create_pymel (ai_standard, ai_standard_sg) = pm.createSurfaceShader('aiStandard') file_node = pm.shadingNode('file', asTexture=1) bump2d_node = pm.shadingNode('bump2d', asUtility=1) file_node.outAlpha.connect(bump2d_node.bumpValue) bump2d_node.outNormal.connect(ai_standard.normalCamera) conversion_man = ai2rs.ConversionManager() rs_material = conversion_man.convert(ai_standard) assert (bump2d_node in rs_material.bump_input.inputs())